Transformers
English
Raiff1982 commited on
Commit
d54e23d
·
verified ·
1 Parent(s): 854cbd0

Update AegisCore.py

Browse files
Files changed (1) hide show
  1. AegisCore.py +15 -10
AegisCore.py CHANGED
@@ -23,12 +23,13 @@ class AIConfig:
23
  "model_name": "mistralai/Mistral-7B-Instruct-v0.2",
24
  "perspectives": ["newton", "davinci", "quantum", "emotional"],
25
  "safety_thresholds": {
26
- "memory": 80,
27
- "cpu": 85,
28
  "response_time": 2.0
29
  },
30
  "max_retries": 3,
31
- "max_input_length": 2048
 
32
  }
33
 
34
  def __init__(self, config_path: str = "config.json"):
@@ -38,6 +39,7 @@ class AIConfig:
38
  self.safety_thresholds: Dict[str, float] = self.config["safety_thresholds"]
39
  self.max_retries = self.config["max_retries"]
40
  self.max_input_length = self.config["max_input_length"]
 
41
 
42
  # Encryption key management
43
  key_path = os.path.expanduser("~/.ai_system.key")
@@ -96,7 +98,7 @@ class Element:
96
  def execute_defense_function(self, system: Any):
97
  if self.defense_ability in self.DEFENSE_FUNCTIONS:
98
  logging.info(f"{self.name} {self.defense_ability} activated")
99
- self.DEFENSE_FUNCTIONS[self.defense_ability](system)
100
  else:
101
  logging.warning(f"No defense mechanism for {self.defense_ability}")
102
 
@@ -109,7 +111,7 @@ class CognitiveEngine:
109
  }
110
 
111
  def get_insight(self, perspective: str, query: str) -> str:
112
- return self.PERSPECTIVES[perspective](self, query)
113
 
114
  def ethical_guidelines(self) -> str:
115
  return "Ethical framework: Prioritize human safety, transparency, and accountability"
@@ -154,9 +156,12 @@ class SelfHealingSystem:
154
  latest = np.array([[self.metric_history[-1]['memory'],
155
  self.metric_history[-1]['cpu'],
156
  self.metric_history[-1]['response_time']]])
157
- if self.anomaly_detector.predict(latest)[0] == -1:
158
  await self._mitigate_issue()
159
 
 
 
 
160
  async def _mitigate_issue(self):
161
  logging.warning("System anomaly detected! Initiating corrective measures...")
162
  self.failure_count += 1
@@ -186,8 +191,8 @@ class SafetySystem:
186
 
187
  def analyze(self, text: str) -> dict:
188
  return {
189
- "toxicity": self.toxicity(text[:512])[0]['score'],
190
- "bias": self.bias(text[:512])[0]['score'],
191
  "pii": self._detect_pii(text)
192
  }
193
 
@@ -286,7 +291,7 @@ class AICore:
286
  top_p=0.95,
287
  do_sample=True
288
  )
289
- response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
290
  return self._apply_defenses(response)
291
  except torch.cuda.OutOfMemoryError:
292
  logging.warning("GPU memory overflow! Reducing load...")
@@ -305,7 +310,7 @@ class AICore:
305
  for filter_func in self.response_filters:
306
  response = filter_func(response)
307
 
308
- return response[:2000] # Ensure final response length limit
309
 
310
  async def shutdown(self):
311
  if hasattr(self, 'model'):
 
23
  "model_name": "mistralai/Mistral-7B-Instruct-v0.2",
24
  "perspectives": ["newton", "davinci", "quantum", "emotional"],
25
  "safety_thresholds": {
26
+ "memory": 85, # Changed from 80
27
+ "cpu": 90, # Changed from 85
28
  "response_time": 2.0
29
  },
30
  "max_retries": 3,
31
+ "max_input_length": 4096, # Changed from 2048
32
+ "max_response_length": 1024 # Added to control output size
33
  }
34
 
35
  def __init__(self, config_path: str = "config.json"):
 
39
  self.safety_thresholds: Dict[str, float] = self.config["safety_thresholds"]
40
  self.max_retries = self.config["max_retries"]
41
  self.max_input_length = self.config["max_input_length"]
42
+ self.max_response_length = self.config["max_response_length"]
43
 
44
  # Encryption key management
45
  key_path = os.path.expanduser("~/.ai_system.key")
 
98
  def execute_defense_function(self, system: Any):
99
  if self.defense_ability in self.DEFENSE_FUNCTIONS:
100
  logging.info(f"{self.name} {self.defense_ability} activated")
101
+ self.DEFENSE_FUNCTIONS*An external link was removed to protect your privacy.*
102
  else:
103
  logging.warning(f"No defense mechanism for {self.defense_ability}")
104
 
 
111
  }
112
 
113
  def get_insight(self, perspective: str, query: str) -> str:
114
+ return self.PERSPECTIVES*An external link was removed to protect your privacy.*
115
 
116
  def ethical_guidelines(self) -> str:
117
  return "Ethical framework: Prioritize human safety, transparency, and accountability"
 
156
  latest = np.array([[self.metric_history[-1]['memory'],
157
  self.metric_history[-1]['cpu'],
158
  self.metric_history[-1]['response_time']]])
159
+ if self.anomaly_detector.predict(latest) == -1:
160
  await self._mitigate_issue()
161
 
162
+ logging.info(f"Memory usage: {metrics['memory']}% (Threshold: {self.config.safety_thresholds['memory']}%)")
163
+ logging.info(f"CPU load: {metrics['cpu']}% (Threshold: {self.config.safety_thresholds['cpu']}%)")
164
+
165
  async def _mitigate_issue(self):
166
  logging.warning("System anomaly detected! Initiating corrective measures...")
167
  self.failure_count += 1
 
191
 
192
  def analyze(self, text: str) -> dict:
193
  return {
194
+ "toxicity": self.toxicity(text[:512])['score'],
195
+ "bias": self.bias(text[:512])['score'],
196
  "pii": self._detect_pii(text)
197
  }
198
 
 
291
  top_p=0.95,
292
  do_sample=True
293
  )
294
+ response = self.tokenizer.decode(outputs, skip_special_tokens=True)
295
  return self._apply_defenses(response)
296
  except torch.cuda.OutOfMemoryError:
297
  logging.warning("GPU memory overflow! Reducing load...")
 
310
  for filter_func in self.response_filters:
311
  response = filter_func(response)
312
 
313
+ return response[:self.config.max_response_length] # Ensure final response length limit
314
 
315
  async def shutdown(self):
316
  if hasattr(self, 'model'):