Raiff1982 commited on
Commit
d7a5a23
·
verified ·
1 Parent(s): b7d51cc

Update UniversalReasoning.py

Browse files
Files changed (1) hide show
  1. UniversalReasoning.py +198 -255
UniversalReasoning.py CHANGED
@@ -1,255 +1,198 @@
1
- import asyncio
2
- import json
3
- import os
4
- import logging
5
- from typing import List
6
-
7
- # Ensure vaderSentiment is installed
8
- try:
9
- from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
10
- except ModuleNotFoundError:
11
- import subprocess
12
- import sys
13
- subprocess.check_call([sys.executable, "-m", "pip", "install", "vaderSentiment"])
14
- from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
15
-
16
- # Ensure nltk is installed and download required data
17
- try:
18
- import nltk
19
- from nltk.tokenize import word_tokenize
20
- nltk.download('punkt', quiet=True)
21
- except ImportError:
22
- import subprocess
23
- import sys
24
- subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"])
25
- import nltk
26
- from nltk.tokenize import word_tokenize
27
- nltk.download('punkt', quiet=True)
28
-
29
- # Import perspectives
30
- from perspectives import (
31
- NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
32
- NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
33
- MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective
34
- )
35
-
36
- # Load environment variables
37
- from dotenv import load_dotenv
38
- load_dotenv()
39
- azure_openai_api_key = os.getenv('AZURE_OPENAI_API_KEY')
40
- azure_openai_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
41
-
42
- # Setup Logging
43
- def setup_logging(config):
44
- if config.get('logging_enabled', True):
45
- log_level = config.get('log_level', 'DEBUG').upper()
46
- numeric_level = getattr(logging, log_level, logging.DEBUG)
47
- logging.basicConfig(
48
- filename='universal_reasoning.log',
49
- level=numeric_level,
50
- format='%(asctime)s - %(levelname)s - %(message)s'
51
- )
52
- else:
53
- logging.disable(logging.CRITICAL)
54
-
55
- # Load JSON configuration
56
- def load_json_config(file_path):
57
- if not os.path.exists(file_path):
58
- logging.error(f"Configuration file '{file_path}' not found.")
59
- return {}
60
- try:
61
- with open(file_path, 'r') as file:
62
- config = json.load(file)
63
- logging.info(f"Configuration loaded from '{file_path}'.")
64
- return config
65
- except json.JSONDecodeError as e:
66
- logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}")
67
- return {}
68
-
69
- # Initialize NLP (basic tokenization)
70
- def analyze_question(question):
71
- tokens = word_tokenize(question)
72
- logging.debug(f"Question tokens: {tokens}")
73
- return tokens
74
-
75
- # Define the Element class
76
- class Element:
77
- def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
78
- self.name = name
79
- self.symbol = symbol
80
- self.representation = representation
81
- self.properties = properties
82
- self.interactions = interactions
83
- self.defense_ability = defense_ability
84
-
85
- def execute_defense_function(self):
86
- message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}"
87
- logging.info(message)
88
- return message
89
-
90
- # Define the CustomRecognizer class
91
- class CustomRecognizer:
92
- def recognize(self, question):
93
- # Simple keyword-based recognizer for demonstration purposes
94
- if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]):
95
- return RecognizerResult(question)
96
- return RecognizerResult(None)
97
-
98
- def get_top_intent(self, recognizer_result):
99
- if recognizer_result.text:
100
- return "ElementDefense"
101
- else:
102
- return "None"
103
-
104
- class RecognizerResult:
105
- def __init__(self, text):
106
- self.text = text
107
-
108
- # Universal Reasoning Aggregator
109
- class UniversalReasoning:
110
- def __init__(self, config):
111
- self.config = config
112
- self.perspectives = self.initialize_perspectives()
113
- self.elements = self.initialize_elements()
114
- self.recognizer = CustomRecognizer()
115
- # Initialize the sentiment analyzer
116
- self.sentiment_analyzer = SentimentIntensityAnalyzer()
117
-
118
- def initialize_perspectives(self):
119
- perspective_names = self.config.get('enabled_perspectives', [
120
- "newton",
121
- "davinci",
122
- "human_intuition",
123
- "neural_network",
124
- "quantum_computing",
125
- "resilient_kindness",
126
- "mathematical",
127
- "philosophical",
128
- "copilot",
129
- "bias_mitigation"
130
- ])
131
- perspective_classes = {
132
- "newton": NewtonPerspective,
133
- "davinci": DaVinciPerspective,
134
- "human_intuition": HumanIntuitionPerspective,
135
- "neural_network": NeuralNetworkPerspective,
136
- "quantum_computing": QuantumComputingPerspective,
137
- "resilient_kindness": ResilientKindnessPerspective,
138
- "mathematical": MathematicalPerspective,
139
- "philosophical": PhilosophicalPerspective,
140
- "copilot": CopilotPerspective,
141
- "bias_mitigation": BiasMitigationPerspective
142
- }
143
- perspectives = []
144
- for name in perspective_names:
145
- cls = perspective_classes.get(name.lower())
146
- if cls:
147
- perspectives.append(cls(self.config))
148
- logging.debug(f"Perspective '{name}' initialized.")
149
- else:
150
- logging.warning(f"Perspective '{name}' is not recognized and will be skipped.")
151
- return perspectives
152
-
153
- def initialize_elements(self):
154
- elements = [
155
- Element(
156
- name="Hydrogen",
157
- symbol="H",
158
- representation="Lua",
159
- properties=["Simple", "Lightweight", "Versatile"],
160
- interactions=["Easily integrates with other languages and systems"],
161
- defense_ability="Evasion"
162
- ),
163
- # You can add more elements as needed
164
- Element(
165
- name="Diamond",
166
- symbol="D",
167
- representation="Kotlin",
168
- properties=["Modern", "Concise", "Safe"],
169
- interactions=["Used for Android development"],
170
- defense_ability="Adaptability"
171
- )
172
- ]
173
- return elements
174
-
175
- async def generate_response(self, question):
176
- responses = []
177
- tasks = []
178
-
179
- # Generate responses from perspectives concurrently
180
- for perspective in self.perspectives:
181
- if asyncio.iscoroutinefunction(perspective.generate_response):
182
- tasks.append(perspective.generate_response(question))
183
- else:
184
- # Wrap synchronous functions in coroutine
185
- async def sync_wrapper(perspective, question):
186
- return perspective.generate_response(question)
187
- tasks.append(sync_wrapper(perspective, question))
188
-
189
- perspective_results = await asyncio.gather(*tasks, return_exceptions=True)
190
-
191
- for perspective, result in zip(self.perspectives, perspective_results):
192
- if isinstance(result, Exception):
193
- logging.error(f"Error generating response from {perspective.__class__.__name__}: {result}")
194
- else:
195
- responses.append(result)
196
- logging.debug(f"Response from {perspective.__class__.__name__}: {result}")
197
-
198
- # Handle element defense logic
199
- recognizer_result = self.recognizer.recognize(question)
200
- top_intent = self.recognizer.get_top_intent(recognizer_result)
201
- if top_intent == "ElementDefense":
202
- element_name = recognizer_result.text.strip()
203
- element = next(
204
- (el for el in self.elements if el.name.lower() in element_name.lower()),
205
- None
206
- )
207
- if element:
208
- defense_message = element.execute_defense_function()
209
- responses.append(defense_message)
210
- else:
211
- logging.info(f"No matching element found for '{element_name}'")
212
-
213
- ethical_considerations = self.config.get(
214
- 'ethical_considerations',
215
- "Always act with transparency, fairness, and respect for privacy."
216
- )
217
- responses.append(f"**Ethical Considerations:**\n{ethical_considerations}")
218
-
219
- formatted_response = "\n\n".join(responses)
220
- return formatted_response
221
-
222
- def save_response(self, response):
223
- if self.config.get('enable_response_saving', False):
224
- save_path = self.config.get('response_save_path', 'responses.txt')
225
- try:
226
- with open(save_path, 'a', encoding='utf-8') as file:
227
- file.write(response + '\n')
228
- logging.info(f"Response saved to '{save_path}'.")
229
- except Exception as e:
230
- logging.error(f"Error saving response to '{save_path}': {e}")
231
-
232
- def backup_response(self, response):
233
- if self.config.get('backup_responses', {}).get('enabled', False):
234
- backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt')
235
- try:
236
- with open(backup_path, 'a', encoding='utf-8') as file:
237
- file.write(response + '\n')
238
- logging.info(f"Response backed up to '{backup_path}'.")
239
- except Exception as e:
240
- logging.error(f"Error backing up response to '{backup_path}': {e}")
241
-
242
- # Example usage
243
- if __name__ == "__main__":
244
- config = load_json_config('config.json')
245
- # Add Azure OpenAI configurations to the config
246
- config['azure_openai_api_key'] = azure_openai_api_key
247
- config['azure_openai_endpoint'] = azure_openai_endpoint
248
- setup_logging(config)
249
- universal_reasoning = UniversalReasoning(config)
250
- question = "Tell me about Hydrogen and its defense mechanisms."
251
- response = asyncio.run(universal_reasoning.generate_response(question))
252
- print(response)
253
- if response:
254
- universal_reasoning.save_response(response)
255
- universal_reasoning.backup_response(response)
 
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import logging
5
+ from typing import List
6
+
7
+ # Ensure vaderSentiment is installed
8
+ try:
9
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
10
+ except ModuleNotFoundError:
11
+ import subprocess
12
+ import sys
13
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "vaderSentiment"])
14
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
15
+
16
+ # Ensure nltk is installed and download required data
17
+ try:
18
+ import nltk
19
+ from nltk.tokenize import word_tokenize
20
+ nltk.download('punkt', quiet=True)
21
+ except ImportError:
22
+ import subprocess
23
+ import sys
24
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"])
25
+ import nltk
26
+ from nltk.tokenize import word_tokenize
27
+ nltk.download('punkt', quiet=True)
28
+
29
+ # Import perspectives
30
+ from perspectives import (
31
+ NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
32
+ NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
33
+ MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective
34
+ )
35
+
36
+ # Setup Logging
37
+ def setup_logging(config):
38
+ if config.get('logging_enabled', True):
39
+ log_level = config.get('log_level', 'DEBUG').upper()
40
+ numeric_level = getattr(logging, log_level, logging.DEBUG)
41
+ logging.basicConfig(
42
+ filename='universal_reasoning.log',
43
+ level=numeric_level,
44
+ format='%(asctime)s - %(levelname)s - %(message)s'
45
+ )
46
+ else:
47
+ logging.disable(logging.CRITICAL)
48
+
49
+ # Load JSON configuration
50
+ def load_json_config(file_path):
51
+ if not os.path.exists(file_path):
52
+ logging.error(f"Configuration file '{file_path}' not found.")
53
+ return {}
54
+ try:
55
+ with open(file_path, 'r') as file:
56
+ config = json.load(file)
57
+ logging.info(f"Configuration loaded from '{file_path}'.")
58
+ config['allow_network_calls'] = False # Lockdown
59
+ return config
60
+ except json.JSONDecodeError as e:
61
+ logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}")
62
+ return {}
63
+
64
+ # NLP Analyzer
65
+ def analyze_question(question):
66
+ tokens = word_tokenize(question)
67
+ logging.debug(f"Question tokens: {tokens}")
68
+ return tokens
69
+
70
+ # Element Class
71
+ class Element:
72
+ def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
73
+ self.name = name
74
+ self.symbol = symbol
75
+ self.representation = representation
76
+ self.properties = properties
77
+ self.interactions = interactions
78
+ self.defense_ability = defense_ability
79
+
80
+ def execute_defense_function(self):
81
+ message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}"
82
+ logging.info(message)
83
+ return message
84
+
85
+ # Recognizer Classes
86
+ class CustomRecognizer:
87
+ def recognize(self, question):
88
+ if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]):
89
+ return RecognizerResult(question)
90
+ return RecognizerResult(None)
91
+
92
+ def get_top_intent(self, recognizer_result):
93
+ if recognizer_result.text:
94
+ return "ElementDefense"
95
+ else:
96
+ return "None"
97
+
98
+ class RecognizerResult:
99
+ def __init__(self, text):
100
+ self.text = text
101
+
102
+ # Reasoning Engine
103
+ class UniversalReasoning:
104
+ def __init__(self, config):
105
+ self.config = config
106
+ self.perspectives = self.initialize_perspectives()
107
+ self.elements = self.initialize_elements()
108
+ self.recognizer = CustomRecognizer()
109
+ self.sentiment_analyzer = SentimentIntensityAnalyzer()
110
+
111
+ def initialize_perspectives(self):
112
+ perspective_names = self.config.get('enabled_perspectives', [
113
+ "newton", "davinci", "human_intuition", "neural_network", "quantum_computing",
114
+ "resilient_kindness", "mathematical", "philosophical", "copilot", "bias_mitigation"
115
+ ])
116
+ perspective_classes = {
117
+ "newton": NewtonPerspective,
118
+ "davinci": DaVinciPerspective,
119
+ "human_intuition": HumanIntuitionPerspective,
120
+ "neural_network": NeuralNetworkPerspective,
121
+ "quantum_computing": QuantumComputingPerspective,
122
+ "resilient_kindness": ResilientKindnessPerspective,
123
+ "mathematical": MathematicalPerspective,
124
+ "philosophical": PhilosophicalPerspective,
125
+ "copilot": CopilotPerspective,
126
+ "bias_mitigation": BiasMitigationPerspective
127
+ }
128
+ perspectives = []
129
+ for name in perspective_names:
130
+ cls = perspective_classes.get(name.lower())
131
+ if cls:
132
+ perspectives.append(cls(self.config))
133
+ logging.debug(f"Perspective '{name}' initialized.")
134
+ return perspectives
135
+
136
+ def initialize_elements(self):
137
+ return [
138
+ Element("Hydrogen", "H", "Lua", ["Simple", "Lightweight", "Versatile"],
139
+ ["Integrates with other languages"], "Evasion"),
140
+ Element("Diamond", "D", "Kotlin", ["Modern", "Concise", "Safe"],
141
+ ["Used for Android development"], "Adaptability")
142
+ ]
143
+
144
+ async def generate_response(self, question):
145
+ responses = []
146
+ tasks = []
147
+
148
+ for perspective in self.perspectives:
149
+ if asyncio.iscoroutinefunction(perspective.generate_response):
150
+ tasks.append(perspective.generate_response(question))
151
+ else:
152
+ async def sync_wrapper(perspective, question):
153
+ return perspective.generate_response(question)
154
+ tasks.append(sync_wrapper(perspective, question))
155
+
156
+ perspective_results = await asyncio.gather(*tasks, return_exceptions=True)
157
+
158
+ for perspective, result in zip(self.perspectives, perspective_results):
159
+ if isinstance(result, Exception):
160
+ logging.error(f"Error from {perspective.__class__.__name__}: {result}")
161
+ else:
162
+ responses.append(result)
163
+
164
+ recognizer_result = self.recognizer.recognize(question)
165
+ top_intent = self.recognizer.get_top_intent(recognizer_result)
166
+ if top_intent == "ElementDefense":
167
+ element_name = recognizer_result.text.strip()
168
+ element = next((el for el in self.elements if el.name.lower() in element_name.lower()), None)
169
+ if element:
170
+ responses.append(element.execute_defense_function())
171
+
172
+ ethical = self.config.get("ethical_considerations", "Act transparently and respectfully.")
173
+ responses.append(f"**Ethical Considerations:**\n{ethical}")
174
+
175
+ return "\n\n".join(responses)
176
+
177
+ def save_response(self, response):
178
+ if self.config.get('enable_response_saving', False):
179
+ path = self.config.get('response_save_path', 'responses.txt')
180
+ with open(path, 'a', encoding='utf-8') as file:
181
+ file.write(response + '\n')
182
+
183
+ def backup_response(self, response):
184
+ if self.config.get('backup_responses', {}).get('enabled', False):
185
+ backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt')
186
+ with open(backup_path, 'a', encoding='utf-8') as file:
187
+ file.write(response + '\n')
188
+
189
+ # Execution
190
+ if __name__ == "__main__":
191
+ config = load_json_config('config.json')
192
+ setup_logging(config)
193
+ ur = UniversalReasoning(config)
194
+ q = "Tell me about Hydrogen and its defense mechanisms."
195
+ result = asyncio.run(ur.generate_response(q))
196
+ print(result)
197
+ ur.save_response(result)
198
+ ur.backup_response(result)