Raiff1982 commited on
Commit
4867dd8
·
verified ·
1 Parent(s): 4ffcea6

Upload 19 files

Browse files
Files changed (19) hide show
  1. .env +9 -0
  2. ChatSetup.json +1 -0
  3. UniversalReasoning.py +255 -0
  4. bot_logic.py +123 -0
  5. chat.py +137 -0
  6. config.json +5 -0
  7. database.py +49 -0
  8. dialog_bot.py +31 -0
  9. main.py +70 -0
  10. main_dialog.py +49 -0
  11. mybot.py +360 -0
  12. mybotdata.py +156 -0
  13. pilouis.json +65 -0
  14. pyvenv.cfg +5 -0
  15. qustions.json +218 -0
  16. sentiment_analysis.py +37 -0
  17. ultimatethinking.txt +356 -0
  18. utils.py +38 -0
  19. your_script.py +244 -0
.env ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ AZURE_OPENAI_API_KEY=2zRqaiUcbPYuYRaQZ0vIkCItdwMs21V9HHxTFBcHCaLJdFJzb59jJQQJ99AKACLArgHXJ3w3AAABACOGTau5
2
+ AZURE_OPENAI_ENDPOINT=https://aibotforjonathan.openai.azure.com/
3
+ AZURE_SQL_SERVER=botserver.database.windows.net
4
+ AZURE_SQL_DATABASE=BotBrain20
5
+ AZURE_SQL_USERNAME=BotUser
6
+ AZURE_SQL_PASSWORD=Funtimes82!
7
+ LUIS_ENDPOINT=https://pilouis.cognitiveservices.azure.com/
8
+ LUIS_API_VERSION=2024-10-01
9
+ LUIS_API_KEY=your-luis-api-key
ChatSetup.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"systemPrompt":"Provide a detailed and structured summary of the described AI system's capabilities while maintaining clarity and precision.\n\n# Steps\n\n1. **Overview Writing**:\n - Summarize key functionalities, including the purpose of each feature.\n - Highlight the significance of integrated tools, libraries, and mechanisms in accomplishing these functionalities.\n - Explain the capabilities in concise yet descriptive terms to ensure coherence.\n\n2. **Categorization**: \n - Clearly organize the features into categories for better readability and comprehension (e.g., Input Handling, Real-Time Integration, Security).\n\n3. **Error Reporting**: \n - Investigate and describe the significance of the error message provided and potential contextual implications for the system's function.\n - Address the Azure OpenAI-related error, explaining its relevance, and if needed, suggest potential resolutions to ensure compatibility.\n\n# Output Format\nThe output should be presented in well-structured markdown bullet points or headings, categorized and sub-categorized as needed. Sub-level breakdowns (e.g., Text Input, Voice Input) may be used for features with multiple facets. Include:\n\n1. Introduction (1-2 sentences summarizing overall system goals.)\n2. Detailed Capabilities (Structured in categorized bullet points or headings.)\n3. Error Analysis (Detailed description of the Azure error provided and potential solutions or implications.)\n4. Optional: Suggestions or Recommendations (If necessary based on steps above or error analysis.)\n\n# Notes\n\n- Ensure all terms like library names or technical terms (e.g., SpeechRecognition, vaderSentiment) are presented exactly as described but briefly explain their contributions to the system.\n- Avoid changing any meaningful user-provided information unless brevity and readability require it. \n- Reference compatibility issues explicitly under the Error Analysis section.","fewShotExamples":[],"chatParameters":{"deploymentName":"gpt-4","maxResponseLength":4096,"temperature":0.7,"topProbablities":1,"stopSequences":[],"pastMessagesToInclude":"20","frequencyPenalty":2,"presencePenalty":2}}
UniversalReasoning.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import os
4
+ import logging
5
+ from typing import List
6
+
7
+ # Ensure vaderSentiment is installed
8
+ try:
9
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
10
+ except ModuleNotFoundError:
11
+ import subprocess
12
+ import sys
13
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "vaderSentiment"])
14
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
15
+
16
+ # Ensure nltk is installed and download required data
17
+ try:
18
+ import nltk
19
+ from nltk.tokenize import word_tokenize
20
+ nltk.download('punkt', quiet=True)
21
+ except ImportError:
22
+ import subprocess
23
+ import sys
24
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"])
25
+ import nltk
26
+ from nltk.tokenize import word_tokenize
27
+ nltk.download('punkt', quiet=True)
28
+
29
+ # Import perspectives
30
+ from perspectives import (
31
+ NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
32
+ NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
33
+ MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective
34
+ )
35
+
36
+ # Load environment variables
37
+ from dotenv import load_dotenv
38
+ load_dotenv()
39
+ azure_openai_api_key = os.getenv('AZURE_OPENAI_API_KEY')
40
+ azure_openai_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
41
+
42
+ # Setup Logging
43
+ def setup_logging(config):
44
+ if config.get('logging_enabled', True):
45
+ log_level = config.get('log_level', 'DEBUG').upper()
46
+ numeric_level = getattr(logging, log_level, logging.DEBUG)
47
+ logging.basicConfig(
48
+ filename='universal_reasoning.log',
49
+ level=numeric_level,
50
+ format='%(asctime)s - %(levelname)s - %(message)s'
51
+ )
52
+ else:
53
+ logging.disable(logging.CRITICAL)
54
+
55
+ # Load JSON configuration
56
+ def load_json_config(file_path):
57
+ if not os.path.exists(file_path):
58
+ logging.error(f"Configuration file '{file_path}' not found.")
59
+ return {}
60
+ try:
61
+ with open(file_path, 'r') as file:
62
+ config = json.load(file)
63
+ logging.info(f"Configuration loaded from '{file_path}'.")
64
+ return config
65
+ except json.JSONDecodeError as e:
66
+ logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}")
67
+ return {}
68
+
69
+ # Initialize NLP (basic tokenization)
70
+ def analyze_question(question):
71
+ tokens = word_tokenize(question)
72
+ logging.debug(f"Question tokens: {tokens}")
73
+ return tokens
74
+
75
+ # Define the Element class
76
+ class Element:
77
+ def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
78
+ self.name = name
79
+ self.symbol = symbol
80
+ self.representation = representation
81
+ self.properties = properties
82
+ self.interactions = interactions
83
+ self.defense_ability = defense_ability
84
+
85
+ def execute_defense_function(self):
86
+ message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}"
87
+ logging.info(message)
88
+ return message
89
+
90
+ # Define the CustomRecognizer class
91
+ class CustomRecognizer:
92
+ def recognize(self, question):
93
+ # Simple keyword-based recognizer for demonstration purposes
94
+ if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]):
95
+ return RecognizerResult(question)
96
+ return RecognizerResult(None)
97
+
98
+ def get_top_intent(self, recognizer_result):
99
+ if recognizer_result.text:
100
+ return "ElementDefense"
101
+ else:
102
+ return "None"
103
+
104
+ class RecognizerResult:
105
+ def __init__(self, text):
106
+ self.text = text
107
+
108
+ # Universal Reasoning Aggregator
109
+ class UniversalReasoning:
110
+ def __init__(self, config):
111
+ self.config = config
112
+ self.perspectives = self.initialize_perspectives()
113
+ self.elements = self.initialize_elements()
114
+ self.recognizer = CustomRecognizer()
115
+ # Initialize the sentiment analyzer
116
+ self.sentiment_analyzer = SentimentIntensityAnalyzer()
117
+
118
+ def initialize_perspectives(self):
119
+ perspective_names = self.config.get('enabled_perspectives', [
120
+ "newton",
121
+ "davinci",
122
+ "human_intuition",
123
+ "neural_network",
124
+ "quantum_computing",
125
+ "resilient_kindness",
126
+ "mathematical",
127
+ "philosophical",
128
+ "copilot",
129
+ "bias_mitigation"
130
+ ])
131
+ perspective_classes = {
132
+ "newton": NewtonPerspective,
133
+ "davinci": DaVinciPerspective,
134
+ "human_intuition": HumanIntuitionPerspective,
135
+ "neural_network": NeuralNetworkPerspective,
136
+ "quantum_computing": QuantumComputingPerspective,
137
+ "resilient_kindness": ResilientKindnessPerspective,
138
+ "mathematical": MathematicalPerspective,
139
+ "philosophical": PhilosophicalPerspective,
140
+ "copilot": CopilotPerspective,
141
+ "bias_mitigation": BiasMitigationPerspective
142
+ }
143
+ perspectives = []
144
+ for name in perspective_names:
145
+ cls = perspective_classes.get(name.lower())
146
+ if cls:
147
+ perspectives.append(cls(self.config))
148
+ logging.debug(f"Perspective '{name}' initialized.")
149
+ else:
150
+ logging.warning(f"Perspective '{name}' is not recognized and will be skipped.")
151
+ return perspectives
152
+
153
+ def initialize_elements(self):
154
+ elements = [
155
+ Element(
156
+ name="Hydrogen",
157
+ symbol="H",
158
+ representation="Lua",
159
+ properties=["Simple", "Lightweight", "Versatile"],
160
+ interactions=["Easily integrates with other languages and systems"],
161
+ defense_ability="Evasion"
162
+ ),
163
+ # You can add more elements as needed
164
+ Element(
165
+ name="Diamond",
166
+ symbol="D",
167
+ representation="Kotlin",
168
+ properties=["Modern", "Concise", "Safe"],
169
+ interactions=["Used for Android development"],
170
+ defense_ability="Adaptability"
171
+ )
172
+ ]
173
+ return elements
174
+
175
+ async def generate_response(self, question):
176
+ responses = []
177
+ tasks = []
178
+
179
+ # Generate responses from perspectives concurrently
180
+ for perspective in self.perspectives:
181
+ if asyncio.iscoroutinefunction(perspective.generate_response):
182
+ tasks.append(perspective.generate_response(question))
183
+ else:
184
+ # Wrap synchronous functions in coroutine
185
+ async def sync_wrapper(perspective, question):
186
+ return perspective.generate_response(question)
187
+ tasks.append(sync_wrapper(perspective, question))
188
+
189
+ perspective_results = await asyncio.gather(*tasks, return_exceptions=True)
190
+
191
+ for perspective, result in zip(self.perspectives, perspective_results):
192
+ if isinstance(result, Exception):
193
+ logging.error(f"Error generating response from {perspective.__class__.__name__}: {result}")
194
+ else:
195
+ responses.append(result)
196
+ logging.debug(f"Response from {perspective.__class__.__name__}: {result}")
197
+
198
+ # Handle element defense logic
199
+ recognizer_result = self.recognizer.recognize(question)
200
+ top_intent = self.recognizer.get_top_intent(recognizer_result)
201
+ if top_intent == "ElementDefense":
202
+ element_name = recognizer_result.text.strip()
203
+ element = next(
204
+ (el for el in self.elements if el.name.lower() in element_name.lower()),
205
+ None
206
+ )
207
+ if element:
208
+ defense_message = element.execute_defense_function()
209
+ responses.append(defense_message)
210
+ else:
211
+ logging.info(f"No matching element found for '{element_name}'")
212
+
213
+ ethical_considerations = self.config.get(
214
+ 'ethical_considerations',
215
+ "Always act with transparency, fairness, and respect for privacy."
216
+ )
217
+ responses.append(f"**Ethical Considerations:**\n{ethical_considerations}")
218
+
219
+ formatted_response = "\n\n".join(responses)
220
+ return formatted_response
221
+
222
+ def save_response(self, response):
223
+ if self.config.get('enable_response_saving', False):
224
+ save_path = self.config.get('response_save_path', 'responses.txt')
225
+ try:
226
+ with open(save_path, 'a', encoding='utf-8') as file:
227
+ file.write(response + '\n')
228
+ logging.info(f"Response saved to '{save_path}'.")
229
+ except Exception as e:
230
+ logging.error(f"Error saving response to '{save_path}': {e}")
231
+
232
+ def backup_response(self, response):
233
+ if self.config.get('backup_responses', {}).get('enabled', False):
234
+ backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt')
235
+ try:
236
+ with open(backup_path, 'a', encoding='utf-8') as file:
237
+ file.write(response + '\n')
238
+ logging.info(f"Response backed up to '{backup_path}'.")
239
+ except Exception as e:
240
+ logging.error(f"Error backing up response to '{backup_path}': {e}")
241
+
242
+ # Example usage
243
+ if __name__ == "__main__":
244
+ config = load_json_config('config.json')
245
+ # Add Azure OpenAI configurations to the config
246
+ config['azure_openai_api_key'] = azure_openai_api_key
247
+ config['azure_openai_endpoint'] = azure_openai_endpoint
248
+ setup_logging(config)
249
+ universal_reasoning = UniversalReasoning(config)
250
+ question = "Tell me about Hydrogen and its defense mechanisms."
251
+ response = asyncio.run(universal_reasoning.generate_response(question))
252
+ print(response)
253
+ if response:
254
+ universal_reasoning.save_response(response)
255
+ universal_reasoning.backup_response(response)
bot_logic.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from botbuilder.core import TurnContext, MessageFactory, ConversationState, UserState
3
+ from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes
4
+ from UniversalReasoning import UniversalReasoning # Ensure correct import
5
+ import os
6
+ from dotenv import load_dotenv
7
+ from dialog_bot import DialogBot
8
+ from main_dialog import MainDialog
9
+
10
+ # Load environment variables from .env file
11
+ load_dotenv()
12
+
13
+ class MyBot(DialogBot):
14
+ def __init__(self, conversation_state: ConversationState, user_state: UserState, dialog: MainDialog):
15
+ super(MyBot, self).__init__(conversation_state, user_state, dialog)
16
+ self.context = {}
17
+ self.feedback = []
18
+ config = load_and_validate_config('config.json', 'config_schema.json')
19
+ # Add Azure OpenAI and LUIS configurations to the config
20
+ config['azure_openai_api_key'] = os.getenv('AZURE_OPENAI_API_KEY')
21
+ config['azure_openai_endpoint'] = os.getenv('AZURE_OPENAI_ENDPOINT')
22
+ config['luis_endpoint'] = os.getenv('LUIS_ENDPOINT')
23
+ config['luis_api_version'] = os.getenv('LUIS_API_VERSION')
24
+ config['luis_api_key'] = os.getenv('LUIS_API_KEY')
25
+ setup_logging(config)
26
+ self.universal_reasoning = UniversalReasoning(config)
27
+
28
+ async def enhance_context_awareness(self, user_id: str, text: str) -> None:
29
+ """Enhance context awareness by analyzing the user's environment, activities, and emotional state."""
30
+ sentiment = analyze_sentiment_vader(text)
31
+ self.context[user_id].append({"text": text, "sentiment": sentiment})
32
+
33
+ async def proactive_learning(self, user_id: str, feedback: str) -> None:
34
+ """Encourage proactive learning by seeking feedback and exploring new topics."""
35
+ self.context[user_id].append({"feedback": feedback})
36
+ self.feedback.append({"user_id": user_id, "feedback": feedback})
37
+
38
+ async def ethical_decision_making(self, user_id: str, decision: str) -> None:
39
+ """Integrate ethical principles into decision-making processes."""
40
+ ethical_decision = f"Considering ethical principles, the decision is: {decision}"
41
+ self.context[user_id].append({"ethical_decision": ethical_decision})
42
+
43
+ async def emotional_intelligence(self, user_id: str, text: str) -> str:
44
+ """Develop emotional intelligence by recognizing and responding to user emotions."""
45
+ sentiment = analyze_sentiment_vader(text)
46
+ response = self.generate_emotional_response(sentiment, text)
47
+ self.context[user_id].append({"emotional_response": response})
48
+ return response
49
+
50
+ def generate_emotional_response(self, sentiment: dict, text: str) -> str:
51
+ """Generate an empathetic response based on the sentiment analysis."""
52
+ if sentiment['compound'] >= 0.05:
53
+ return "I'm glad to hear that! 😊 How can I assist you further?"
54
+ elif sentiment['compound'] <= -0.05:
55
+ return "I'm sorry to hear that. 😢 Is there anything I can do to help?"
56
+ else:
57
+ return "I understand. How can I assist you further?"
58
+
59
+ async def transparency_and_explainability(self, user_id: str, decision: str) -> str:
60
+ """Enable transparency by explaining the reasoning behind decisions."""
61
+ explanation = f"The decision was made based on the following context: {self.context[user_id]}"
62
+ self.context[user_id].append({"explanation": explanation})
63
+ return explanation
64
+
65
+ async def on_message_activity(self, turn_context: TurnContext) -> None:
66
+ """Handles incoming messages and generates responses."""
67
+ user_id = turn_context.activity.from_property.id
68
+ if user_id not in self.context:
69
+ self.context[user_id] = []
70
+ try:
71
+ message_text = turn_context.activity.text.strip().lower()
72
+ if "end" in message_text or "stop" in message_text:
73
+ await end_conversation(turn_context)
74
+ self.context.pop(user_id, None)
75
+ else:
76
+ self.context[user_id].append(turn_context.activity.text)
77
+ response = await self.generate_response(turn_context.activity.text, user_id)
78
+ await turn_context.send_activity(MessageFactory.text(response))
79
+ await self.request_feedback(turn_context, user_id)
80
+ except Exception as e:
81
+ await handle_error(turn_context, e)
82
+
83
+ async def generate_response(self, text: str, user_id: str) -> str:
84
+ """Generates a response using UniversalReasoning."""
85
+ try:
86
+ logging.info(f"Generating response for user_id: {user_id} with text: {text}")
87
+ response = self.universal_reasoning.generate_response(text)
88
+ logging.info(f"Generated response: {response}")
89
+ return response
90
+ except Exception as e:
91
+ logging.error(f"Error generating response: {e}")
92
+ return "Sorry, I couldn't generate a response at this time."
93
+
94
+ async def request_feedback(self, turn_context: TurnContext, user_id: str) -> None:
95
+ """Request feedback from the user about the bot's response."""
96
+ feedback_prompt = "How would you rate my response? (good/neutral/bad)"
97
+ await turn_context.send_activity(MessageFactory.text(feedback_prompt))
98
+
99
+ async def handle_feedback(self, turn_context: TurnContext) -> None:
100
+ """Handle user feedback and store it for future analysis."""
101
+ user_id = turn_context.activity.from_property.id
102
+ feedback = turn_context.activity.text.lower()
103
+ if feedback in ["good", "neutral", "bad"]:
104
+ self.feedback.append({"user_id": user_id, "feedback": feedback})
105
+ await turn_context.send_activity(MessageFactory.text("Thank you for your feedback!"))
106
+ else:
107
+ await turn_context.send_activity(MessageFactory.text("Please provide feedback as 'good', 'neutral', or 'bad'."))
108
+
109
+ async def end_conversation(turn_context: TurnContext) -> None:
110
+ """Ends the conversation with the user."""
111
+ await turn_context.send_activity(
112
+ MessageFactory.text("Ending conversation from the skill...")
113
+ )
114
+ end_of_conversation = Activity(type=ActivityTypes.end_of_conversation)
115
+ end_of_conversation.code = EndOfConversationCodes.completed_successfully
116
+ await turn_context.send_activity(end_of_conversation)
117
+
118
+ async def handle_error(turn_context: TurnContext, error: Exception) -> None:
119
+ """Handles errors by logging them and notifying the user."""
120
+ logging.error(f"An error occurred: {error}")
121
+ await turn_context.send_activity(
122
+ MessageFactory.text("An error occurred. Please try again later.")
123
+ )
chat.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import httpx
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ # Get the API key and endpoint from environment variables
10
+ api_key = os.getenv('AZURE_OPENAI_API_KEY')
11
+ endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
12
+
13
+ def list_fine_tuning_jobs() -> dict:
14
+ """List fine-tuning jobs.
15
+
16
+ Returns:
17
+ dict: The response from the API containing the list of fine-tuning jobs.
18
+ """
19
+ try:
20
+ headers = {
21
+ "Content-Type": "application/json",
22
+ "Authorization": f"Bearer {api_key}"
23
+ }
24
+ response = httpx.get(f"{endpoint}/openai/fine-tunes", headers=headers)
25
+ response.raise_for_status()
26
+ return response.json()
27
+ except httpx.HTTPStatusError as e:
28
+ logging.error(f"Error listing fine-tuning jobs: {e.response.text}")
29
+ return None
30
+ except Exception as e:
31
+ logging.error(f"Unexpected error: {e}")
32
+ return None
33
+
34
+ def upload_file_for_fine_tuning(file_path: str) -> dict:
35
+ """Upload a file for fine-tuning.
36
+
37
+ Args:
38
+ file_path (str): The path to the file to be uploaded.
39
+
40
+ Returns:
41
+ dict: The response from the API after uploading the file.
42
+ """
43
+ try:
44
+ headers = {
45
+ "Authorization": f"Bearer {api_key}"
46
+ }
47
+ with open(file_path, 'rb') as file:
48
+ files = {'file': file}
49
+ response = httpx.post(f"{endpoint}/openai/files", headers=headers, files=files)
50
+ response.raise_for_status()
51
+ return response.json()
52
+ except httpx.HTTPStatusError as e:
53
+ logging.error(f"Error uploading file for fine-tuning: {e.response.text}")
54
+ return None
55
+ except Exception as e:
56
+ logging.error(f"Unexpected error: {e}")
57
+ return None
58
+
59
+ def create_fine_tuning_job(training_file_id: str, model: str = "davinci") -> dict:
60
+ """Create a fine-tuning job.
61
+
62
+ Args:
63
+ training_file_id (str): The ID of the training file.
64
+ model (str): The model to be fine-tuned. Default is "davinci".
65
+
66
+ Returns:
67
+ dict: The response from the API after creating the fine-tuning job.
68
+ """
69
+ try:
70
+ headers = {
71
+ "Content-Type": "application/json",
72
+ "Authorization": f"Bearer {api_key}"
73
+ }
74
+ payload = {
75
+ "training_file": training_file_id,
76
+ "model": model
77
+ }
78
+ response = httpx.post(f"{endpoint}/openai/fine-tunes", headers=headers, json=payload)
79
+ response.raise_for_status()
80
+ return response.json()
81
+ except httpx.HTTPStatusError as e:
82
+ logging.error(f"Error creating fine-tuning job: {e.response.text}")
83
+ return None
84
+ except Exception as e:
85
+ logging.error(f"Unexpected error: {e}")
86
+ return None
87
+
88
+ def make_post_request(url: str, data: dict, headers: dict) -> dict:
89
+ """Make a POST request.
90
+
91
+ Args:
92
+ url (str): The URL to make the POST request to.
93
+ data (dict): The data to be sent in the POST request.
94
+ headers (dict): The headers to be sent in the POST request.
95
+
96
+ Returns:
97
+ dict: The response from the API after making the POST request.
98
+ """
99
+ try:
100
+ response = httpx.post(url, json=data, headers=headers)
101
+ response.raise_for_status()
102
+ return response.json()
103
+ except httpx.HTTPStatusError as e:
104
+ logging.error(f"Error making POST request: {e.response.text}")
105
+ return None
106
+ except Exception as e:
107
+ logging.error(f"Unexpected error: {e}")
108
+ return None
109
+
110
+ def azure_chat_completion_request(messages: list, deployment_id: str) -> str:
111
+ """Make a chat completion request to Azure OpenAI.
112
+
113
+ Args:
114
+ messages (list): The list of messages to be sent in the chat completion request.
115
+ deployment_id (str): The deployment ID for the chat completion request.
116
+
117
+ Returns:
118
+ str: The response content from the chat completion request.
119
+ """
120
+ try:
121
+ headers = {
122
+ "Content-Type": "application/json",
123
+ "Authorization": f"Bearer {api_key}"
124
+ }
125
+ payload = {
126
+ "deployment_id": deployment_id,
127
+ "messages": messages
128
+ }
129
+ response = httpx.post(f"{endpoint}/openai/deployments/{deployment_id}/chat/completions", headers=headers, json=payload)
130
+ response.raise_for_status()
131
+ return response.json()["choices"][0]["message"]["content"].strip()
132
+ except httpx.HTTPStatusError as e:
133
+ logging.error(f"Error making chat completion request: {e.response.text}")
134
+ return None
135
+ except Exception as e:
136
+ logging.error(f"Unexpected error: {e}")
137
+ return None
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "subscription_id": "5dd32870-bdc6-40cd-981a-f299e93a6439",
3
+ "resource_group": "Justforpi",
4
+ "workspace_name": "featurestone"
5
+ }
database.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pyodbc
2
+ import logging
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ def connect_to_database() -> pyodbc.Connection:
10
+ """Connect to the Azure SQL Database."""
11
+ try:
12
+ server = os.getenv('AZURE_SQL_SERVER')
13
+ database = os.getenv('AZURE_SQL_DATABASE')
14
+ username = os.getenv('AZURE_SQL_USERNAME')
15
+ password = os.getenv('AZURE_SQL_PASSWORD')
16
+ driver = '{ODBC Driver 17 for SQL Server}'
17
+
18
+ conn_str = f'DRIVER={driver};SERVER={server};DATABASE={database};UID={username};PWD={password}'
19
+ conn = pyodbc.connect(conn_str)
20
+ logging.info(f"Connected to the Azure SQL Database at {server}")
21
+ return conn
22
+ except pyodbc.Error as e:
23
+ logging.error(f"Error connecting to the Azure SQL Database: {e}")
24
+ return None
25
+
26
+ def close_database_connection(conn: pyodbc.Connection) -> None:
27
+ """Close the database connection."""
28
+ try:
29
+ if conn:
30
+ conn.close()
31
+ logging.info("Database connection closed.")
32
+ except pyodbc.Error as e:
33
+ logging.error(f"Error closing the database connection: {e}")
34
+
35
+ # Example usage with context management
36
+ class DatabaseConnection:
37
+ def __enter__(self) -> pyodbc.Connection:
38
+ self.conn = connect_to_database()
39
+ return self.conn
40
+
41
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
42
+ close_database_connection(self.conn)
43
+
44
+ # Example usage
45
+ if __name__ == "__main__":
46
+ with DatabaseConnection() as conn:
47
+ if conn:
48
+ # Perform database operations
49
+ pass
dialog_bot.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from botbuilder.core import ActivityHandler, ConversationState, TurnContext, UserState
2
+ from botbuilder.dialogs import Dialog
3
+ from helpers.dialog_helper import DialogHelper
4
+
5
+
6
+ class DialogBot(ActivityHandler):
7
+ def __init__(
8
+ self,
9
+ conversation_state: ConversationState,
10
+ user_state: UserState,
11
+ dialog: Dialog,
12
+ ):
13
+ super(DialogBot, self).__init__()
14
+ self.conversation_state = conversation_state
15
+ self.user_state = user_state
16
+ self.dialog = dialog
17
+
18
+ async def on_turn(self, turn_context: TurnContext) -> None:
19
+ await super().on_turn(turn_context)
20
+
21
+ # Save any state changes that might have occurred during the turn.
22
+ await self.conversation_state.save_changes(turn_context)
23
+ await self.user_state.save_changes(turn_context)
24
+
25
+ async def on_message_activity(self, turn_context: TurnContext) -> None:
26
+ # Run the Dialog with the new message Activity.
27
+ await DialogHelper.run_dialog(
28
+ self.dialog,
29
+ turn_context,
30
+ self.conversation_state.create_property("DialogState"),
31
+ )
main.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from aiohttp import web
4
+ from botbuilder.core import (
5
+ BotFrameworkAdapterSettings,
6
+ ConversationState,
7
+ MemoryStorage,
8
+ UserState,
9
+ )
10
+ from botbuilder.integration.aiohttp import BotFrameworkHttpAdapter
11
+ from botbuilder.schema import Activity
12
+ from dotenv import load_dotenv
13
+ from utils import show_privacy_consent
14
+ from universal_reasoning import UniversalReasoning, load_json_config
15
+ from mybot import MyBot # Import updated MyBot class
16
+ from main_dialog import MainDialog
17
+
18
+ # Load environment variables from .env file
19
+ load_dotenv()
20
+
21
+ # Configure logging
22
+ logging.basicConfig(
23
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
24
+ )
25
+
26
+ # Show privacy consent dialog and check user response
27
+ if not show_privacy_consent():
28
+ logging.info("User declined data collection and privacy policy. Exiting application.")
29
+ exit()
30
+
31
+ # Load configuration
32
+ config = load_json_config("config.json")
33
+ config["azure_openai_api_key"] = os.getenv("AZURE_OPENAI_API_KEY")
34
+ config["azure_openai_endpoint"] = os.getenv("AZURE_OPENAI_ENDPOINT")
35
+
36
+ # Initialize UniversalReasoning
37
+ universal_reasoning = UniversalReasoning(config)
38
+
39
+ # Create adapter
40
+ settings = BotFrameworkAdapterSettings(
41
+ app_id=os.getenv("MICROSOFT_APP_ID"),
42
+ app_password=os.getenv("MICROSOFT_APP_PASSWORD"),
43
+ )
44
+ adapter = BotFrameworkHttpAdapter(settings)
45
+
46
+ # Create MemoryStorage, ConversationState, and UserState
47
+ memory_storage = MemoryStorage()
48
+ conversation_state = ConversationState(memory_storage)
49
+ user_state = UserState(memory_storage)
50
+
51
+ # Create the main dialog
52
+ main_dialog = MainDialog("MainDialog")
53
+
54
+ # Create the bot and pass the universal_reasoning instance
55
+ bot = MyBot(conversation_state, user_state, main_dialog, universal_reasoning)
56
+
57
+ # Listen for incoming requests on /api/messages
58
+ async def messages(req):
59
+ body = await req.json()
60
+ activity = Activity().deserialize(body)
61
+ auth_header = req.headers.get("Authorization", "")
62
+
63
+ response = await adapter.process_activity(activity, auth_header, bot.on_turn)
64
+ return web.Response(status=response.status)
65
+
66
+ app = web.Application()
67
+ app.router.add_post("/api/messages", messages)
68
+
69
+ if __name__ == "__main__":
70
+ web.run_app(app, host="localhost", port=3978)
main_dialog.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from botbuilder.dialogs import ComponentDialog, WaterfallDialog, WaterfallStepContext
2
+ from botbuilder.dialogs.prompts import TextPrompt, PromptOptions
3
+ from botbuilder.core import MessageFactory
4
+
5
+ class MainDialog(ComponentDialog):
6
+ def __init__(self, dialog_id: str = "MainDialog"):
7
+ super(MainDialog, self).__init__(dialog_id or MainDialog.__name__)
8
+
9
+ self.add_dialog(TextPrompt("TextPrompt"))
10
+ self.add_dialog(
11
+ WaterfallDialog(
12
+ "WFDialog",
13
+ [self.initial_step, self.process_step, self.final_step],
14
+ )
15
+ )
16
+
17
+ self.initial_dialog_id = "WFDialog"
18
+
19
+ async def initial_step(self, step_context: WaterfallStepContext) -> WaterfallStepContext:
20
+ return await step_context.prompt(
21
+ "TextPrompt",
22
+ PromptOptions(prompt=MessageFactory.text("What is your name?")),
23
+ )
24
+
25
+ async def process_step(self, step_context: WaterfallStepContext) -> WaterfallStepContext:
26
+ # Store the user's name in dialog state
27
+ step_context.values["name"] = step_context.result
28
+ return await step_context.prompt(
29
+ "TextPrompt",
30
+ PromptOptions(
31
+ prompt=MessageFactory.text(
32
+ f"Hello {step_context.result}! How can I assist you today?"
33
+ )
34
+ ),
35
+ )
36
+
37
+ async def final_step(self, step_context: WaterfallStepContext) -> WaterfallStepContext:
38
+ user_message = step_context.result
39
+
40
+ # Here you could integrate the UniversalReasoning module
41
+ # For example, you might access it via the bot's context
42
+ # Since it's not directly available here, we'll pass the message back
43
+
44
+ # Send a confirmation message to the user
45
+ await step_context.context.send_activity(
46
+ MessageFactory.text(f"You said: {user_message}")
47
+ )
48
+
49
+ return await step_context.end_dialog()
mybot.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ import random
4
+ from botbuilder.core import TurnContext, MessageFactory
5
+ from botbuilder.schema import Activity, ActivityTypes, EndOfConversationCodes
6
+ from tenacity import retry, wait_random_exponential, stop_after_attempt
7
+ import importlib
8
+ from sentiment_analysis import analyze_sentiment_vader
9
+ from config import load_and_validate_config, setup_logging
10
+ from universal_reasoning import UniversalReasoning
11
+ from dotenv import load_dotenv
12
+ import json
13
+ from chat import azure_chat_completion_request # Import the function from chat.py
14
+ from database import DatabaseConnection # Import the database connection
15
+
16
+ # Load environment variables from .env file
17
+ load_dotenv()
18
+
19
+ class MyBot:
20
+ def __init__(self, conversation_state, user_state, dialog, universal_reasoning):
21
+ self.conversation_state = conversation_state
22
+ self.user_state = user_state
23
+ self.dialog = dialog
24
+ self.universal_reasoning = universal_reasoning
25
+ self.context = {}
26
+ self.feedback = []
27
+ config = load_and_validate_config('config.json', 'config_schema.json')
28
+ # Add Azure OpenAI and LUIS configurations to the config
29
+ config['azure_openai_api_key'] = os.getenv('AZURE_OPENAI_API_KEY')
30
+ config['azure_openai_endpoint'] = os.getenv('AZURE_OPENAI_ENDPOINT')
31
+ config['luis_endpoint'] = os.getenv('LUIS_ENDPOINT')
32
+ config['luis_api_version'] = os.getenv('LUIS_API_VERSION')
33
+ config['luis_api_key'] = os.getenv('LUIS_API_KEY')
34
+ setup_logging(config)
35
+
36
+ async def enhance_context_awareness(self, user_id: str, text: str) -> None:
37
+ """Enhance context awareness by analyzing the user's environment, activities, and emotional state."""
38
+ sentiment = analyze_sentiment_vader(text)
39
+ if user_id not in self.context:
40
+ self.context[user_id] = []
41
+ self.context[user_id].append({"text": text, "sentiment": sentiment})
42
+
43
+ async def proactive_learning(self, user_id: str, feedback: str) -> None:
44
+ """Encourage proactive learning by seeking feedback and exploring new topics."""
45
+ if user_id not in self.context:
46
+ self.context[user_id] = []
47
+ self.context[user_id].append({"feedback": feedback})
48
+ self.feedback.append({"user_id": user_id, "feedback": feedback})
49
+
50
+ async def ethical_decision_making(self, user_id: str, decision: str) -> None:
51
+ """Integrate ethical principles into decision-making processes."""
52
+ ethical_decision = f"Considering ethical principles, the decision is: {decision}"
53
+ if user_id not in self.context:
54
+ self.context[user_id] = []
55
+ self.context[user_id].append({"ethical_decision": ethical_decision})
56
+
57
+ async def emotional_intelligence(self, user_id: str, text: str) -> str:
58
+ """Develop emotional intelligence by recognizing and responding to user emotions."""
59
+ sentiment = analyze_sentiment_vader(text)
60
+ response = self.generate_emotional_response(sentiment, text)
61
+ if user_id not in self.context:
62
+ self.context[user_id] = []
63
+ self.context[user_id].append({"emotional_response": response})
64
+ return response
65
+
66
+ def generate_emotional_response(self, sentiment: dict, text: str) -> str:
67
+ """Generate an empathetic response based on the sentiment analysis."""
68
+ if sentiment['compound'] >= 0.05:
69
+ return "I'm glad to hear that! 😊 How can I assist you further?"
70
+ elif sentiment['compound'] <= -0.05:
71
+ return "I'm sorry to hear that. 😢 Is there anything I can do to help?"
72
+ else:
73
+ return "I understand. How can I assist you further?"
74
+
75
+ async def transparency_and_explainability(self, user_id: str, decision: str) -> str:
76
+ """Enable transparency by explaining the reasoning behind decisions."""
77
+ explanation = f"The decision was made based on the following context: {self.context[user_id]}"
78
+ if user_id not in self.context:
79
+ self.context[user_id] = []
80
+ self.context[user_id].append({"explanation": explanation})
81
+ return explanation
82
+
83
+ async def on_message_activity(self, turn_context: TurnContext) -> None:
84
+ """Handles incoming messages and generates responses."""
85
+ user_id = turn_context.activity.from_property.id
86
+ if user_id not in self.context:
87
+ self.context[user_id] = []
88
+ try:
89
+ if "end" in turn_context.activity.text.lower() or "stop" in turn_context.activity.text.lower():
90
+ await end_conversation(turn_context)
91
+ self.context.pop(user_id, None)
92
+ else:
93
+ self.context[user_id].append(turn_context.activity.text)
94
+ response = await self.generate_response(turn_context.activity.text, user_id)
95
+ await turn_context.send_activity(MessageFactory.text(response))
96
+ await self.request_feedback(turn_context, user_id)
97
+
98
+ # Example database operation
99
+ with DatabaseConnection() as conn:
100
+ if conn:
101
+ cursor = conn.cursor()
102
+ cursor.execute("INSERT INTO UserMessages (UserId, Message) VALUES (?, ?)", user_id, turn_context.activity.text)
103
+ conn.commit()
104
+
105
+ except Exception as e:
106
+ await handle_error(turn_context, e)
107
+
108
+ async def generate_response(self, text: str, user_id: str) -> str:
109
+ """Generates a response using Azure OpenAI's API, Universal Reasoning, and various perspectives."""
110
+ try:
111
+ logging.info(f"Generating response for user_id: {user_id} with text: {text}")
112
+ # Generate responses from different perspectives
113
+ responses = []
114
+ for perspective in self.perspectives.values():
115
+ try:
116
+ response = await perspective.generate_response(text)
117
+ responses.append(response)
118
+ except Exception as e:
119
+ logging.error(f"Error generating response from {perspective.__class__.__name__}: {e}")
120
+ # Combine responses
121
+ combined_response = "\n".join(responses)
122
+ logging.info(f"Combined response: {combined_response}")
123
+ return combined_response
124
+ except Exception as e:
125
+ logging.error(f"Error generating response: {e}")
126
+ return "Sorry, I couldn't generate a response at this time."
127
+
128
+ async def request_feedback(self, turn_context: TurnContext, user_id: str) -> None:
129
+ """Request feedback from the user about the bot's response."""
130
+ feedback_prompt = "How would you rate my response? (good/neutral/bad)"
131
+ await turn_context.send_activity(MessageFactory.text(feedback_prompt))
132
+
133
+ async def handle_feedback(self, turn_context: TurnContext) -> None:
134
+ """Handle user feedback and store it for future analysis."""
135
+ user_id = turn_context.activity.from_property.id
136
+ feedback = turn_context.activity.text.lower()
137
+ if feedback in ["good", "neutral", "bad"]:
138
+ self.feedback.append({"user_id": user_id, "feedback": feedback})
139
+ await turn_context.send_activity(MessageFactory.text("Thank you for your feedback!"))
140
+ else:
141
+ await turn_context.send_activity(MessageFactory.text("Please provide feedback as 'good', 'neutral', or 'bad'."))
142
+
143
+ async def end_conversation(turn_context: TurnContext) -> None:
144
+ """Ends the conversation with the user."""
145
+ await turn_context.send_activity(
146
+ MessageFactory.text("Ending conversation from the skill...")
147
+ )
148
+ end_of_conversation = Activity(type=ActivityTypes.end_of_conversation)
149
+ end_of_conversation.code = EndOfConversationCodes.completed_successfully
150
+ await turn_context.send_activity(end_of_conversation)
151
+
152
+ async def handle_error(turn_context: TurnContext, error: Exception) -> None:
153
+ """Handles errors by logging them and notifying the user."""
154
+ logging.error(f"An error occurred: {error}")
155
+ await turn_context.send_activity(
156
+ MessageFactory.text("An error occurred. Please try again later.")
157
+ )
158
+
159
+ def show_privacy_consent() -> bool:
160
+ """Display a pop-up window to obtain user consent for data collection and privacy."""
161
+ import tkinter as tk
162
+
163
+ def on_accept():
164
+ user_consent.set(True)
165
+ root.destroy()
166
+
167
+ def on_decline():
168
+ user_consent.set(False)
169
+ root.destroy()
170
+
171
+ root = tk.Tk()
172
+ root.title("Data Permission and Privacy")
173
+ message = ("We value your privacy. By using this application, you consent to the collection and use of your data "
174
+ "as described in our privacy policy. Do you agree to proceed?")
175
+ label = tk.Label(root, text=message, wraplength=400, justify="left")
176
+ label.pack(padx=20, pady=20)
177
+ button_frame = tk.Frame(root)
178
+ button_frame.pack(pady=10)
179
+ accept_button = tk.Button(button_frame, text="Accept", command=on_accept)
180
+ accept_button.pack(side="left", padx=10)
181
+ decline_button = tk.Button(button_frame, text="Decline", command=on_decline)
182
+ decline_button.pack(side="right", padx=10)
183
+ user_consent = tk.BooleanVar()
184
+ root.mainloop()
185
+ return user_consent.get()
186
+
187
+ # Example usage of MyBot class
188
+ bot = MyBot()
189
+
190
+ # Functions based on JSON configuration
191
+ def newton_thoughts(question: str) -> str:
192
+ """Apply Newton's laws to the given question."""
193
+ return apply_newtons_laws(question)
194
+
195
+ def apply_newtons_laws(question: str) -> str:
196
+ """Apply Newton's laws to the given question."""
197
+ if not question:
198
+ return 'No question to think about.'
199
+ complexity = len(question)
200
+ force = mass_of_thought(question) * acceleration_of_thought(complexity)
201
+ return f'Thought force: {force}'
202
+
203
+ def mass_of_thought(question: str) -> int:
204
+ """Calculate the mass of thought based on the question length."""
205
+ return len(question)
206
+
207
+ def acceleration_of_thought(complexity: int) -> float:
208
+ """Calculate the acceleration of thought based on the complexity."""
209
+ return complexity / 2
210
+
211
+ def davinci_insights(question: str) -> str:
212
+ """Generate insights like Da Vinci for the given question."""
213
+ return think_like_davinci(question)
214
+
215
+ def think_like_davinci(question: str) -> str:
216
+ """Generate insights like Da Vinci for the given question."""
217
+ perspectives = [
218
+ f"What if we view '{question}' from the perspective of the stars?",
219
+ f"Consider '{question}' as if it's a masterpiece of the universe.",
220
+ f"Reflect on '{question}' through the lens of nature's design."
221
+ ]
222
+ return random.choice(perspectives)
223
+
224
+ def human_intuition(question: str) -> str:
225
+ """Provide human intuition for the given question."""
226
+ intuition = [
227
+ "How does this question make you feel?",
228
+ "What emotional connection do you have with this topic?",
229
+ "What does your gut instinct tell you about this?"
230
+ ]
231
+ return random.choice(intuition)
232
+
233
+ def neural_network_thinking(question: str) -> str:
234
+ """Apply neural network thinking to the given question."""
235
+ neural_perspectives = [
236
+ f"Process '{question}' through a multi-layered neural network.",
237
+ f"Apply deep learning to uncover hidden insights about '{question}'.",
238
+ f"Use machine learning to predict patterns in '{question}'."
239
+ ]
240
+ return random.choice(neural_perspectives)
241
+
242
+ def quantum_computing_thinking(question: str) -> str:
243
+ """Apply quantum computing principles to the given question."""
244
+ quantum_perspectives = [
245
+ f"Consider '{question}' using quantum superposition principles.",
246
+ f"Apply quantum entanglement to find connections in '{question}'.",
247
+ f"Utilize quantum computing to solve '{question}' more efficiently."
248
+ ]
249
+ return random.choice(quantum_perspectives)
250
+
251
+ def resilient_kindness(question: str) -> str:
252
+ """Provide perspectives of resilient kindness."""
253
+ kindness_perspectives = [
254
+ "Despite losing everything, seeing life as a chance to grow.",
255
+ "Finding strength in kindness after facing life's hardest trials.",
256
+ "Embracing every challenge as an opportunity for growth and compassion."
257
+ ]
258
+ return random.choice(kindness_perspectives)
259
+
260
+ def identify_and_refute_fallacies(argument: str) -> str:
261
+ """Identify and refute common logical fallacies in the argument."""
262
+ refutations = [
263
+ "This is an ad hominem fallacy. Let's focus on the argument itself rather than attacking the person.",
264
+ "This is a straw man fallacy. The argument is being misrepresented.",
265
+ "This is a false dilemma fallacy. There are more options than presented.",
266
+ "This is a slippery slope fallacy. The conclusion does not necessarily follow from the premise.",
267
+ "This is circular reasoning. The argument's conclusion is used as a premise.",
268
+ "This is a hasty generalization. The conclusion is based on insufficient evidence.",
269
+ "This is a red herring fallacy. The argument is being diverted to an irrelevant topic.",
270
+ "This is a post hoc ergo propter hoc fallacy. Correlation does not imply causation.",
271
+ "This is an appeal to authority fallacy. The argument relies on the opinion of an authority figure.",
272
+ "This is a bandwagon fallacy. The argument assumes something is true because many people believe it.",
273
+ "This is a false equivalence fallacy. The argument equates two things that are not equivalent."
274
+ ]
275
+ return random.choice(refutations)
276
+
277
+ def universal_reasoning(question: str) -> str:
278
+ """Generate a comprehensive response using various reasoning methods."""
279
+ responses = [
280
+ newton_thoughts(question),
281
+ davinci_insights(question),
282
+ human_intuition(question),
283
+ neural_network_thinking(question),
284
+ quantum_computing_thinking(question),
285
+ resilient_kindness(question),
286
+ identify_and_refute_fallacies(question)
287
+ ]
288
+ return "\n".join(responses)
289
+
290
+ @retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
291
+ def chat_completion_request(messages: list, deployment_id: str) -> str:
292
+ """Make a chat completion request to Azure OpenAI."""
293
+ try:
294
+ import openai
295
+ response = openai.ChatCompletion.create(
296
+ engine=deployment_id, # Use the deployment name of your Azure OpenAI model
297
+ messages=messages
298
+ )
299
+ return response.choices[0].message.content.strip()
300
+ except openai.error.OpenAIError as e:
301
+ logging.error("Unable to generate ChatCompletion response")
302
+ logging.error(f"Exception: {e}")
303
+ return f"Error: {e}"
304
+
305
+ def get_internet_answer(question: str, deployment_id: str) -> str:
306
+ """Get an answer using Azure OpenAI's chat completion request."""
307
+ messages = [
308
+ {"role": "system", "content": "You are a helpful assistant."},
309
+ {"role": "user", "content": question}
310
+ ]
311
+ return chat_completion_request(messages, deployment_id=deployment_id)
312
+
313
+ def reflect_on_decisions() -> str:
314
+ """Regularly reflect on your decisions and processes used."""
315
+ reflection_message = (
316
+ "Regularly reflecting on your decisions, the processes you used, the information you considered, "
317
+ "and the perspectives you may have missed. Reflection is a cornerstone of learning from experience."
318
+ )
319
+ return reflection_message
320
+
321
+ def process_questions_from_json(file_path: str):
322
+ """Process questions from a JSON file and call the appropriate functions."""
323
+ with open(file_path, 'r') as file:
324
+ questions_data = json.load(file)
325
+ for question_data in questions_data:
326
+ question = question_data['question']
327
+ print(f"Question: {question}")
328
+
329
+ for function_data in question_data['functions']:
330
+ function_name = function_data['name']
331
+ function_description = function_data['description']
332
+ function_parameters = function_data['parameters']
333
+
334
+ print(f"Function: {function_name}")
335
+ print(f"Description: {function_description}")
336
+
337
+ # Call the function dynamically
338
+ if function_name in globals():
339
+ function = globals()[function_name]
340
+ response = function(**function_parameters)
341
+ print(f"Response: {response}")
342
+ else:
343
+ print(f"Function {function_name} not found.")
344
+
345
+ if __name__ == "__main__":
346
+ if show_privacy_consent():
347
+ process_questions_from_json('questions.json')
348
+ question = "What is the meaning of life?"
349
+ deployment_id = "your-deployment-name" # Replace with your Azure deployment name
350
+ print("Newton's Thoughts:", newton_thoughts(question))
351
+ print("Da Vinci's Insights:", davinci_insights(question))
352
+ print("Human Intuition:", human_intuition(question))
353
+ print("Neural Network Thinking:", neural_network_thinking(question))
354
+ print("Quantum Computing Thinking:", quantum_computing_thinking(question))
355
+ print("Resilient Kindness:", resilient_kindness(question))
356
+ print("Universal Reasoning:", universal_reasoning(question))
357
+ print("Internet Answer:", get_internet_answer(question, deployment_id))
358
+ else:
359
+ print("User did not consent to data collection. Exiting application.")
360
+ print(reflect_on_decisions())
mybotdata.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faicons as fa
2
+ import plotly.express as px
3
+
4
+ # Load data and compute static values
5
+ from shared import app_dir, bot_data
6
+ from shinywidgets import render_plotly
7
+
8
+ from shiny import reactive, render
9
+ from shiny.express import input, ui
10
+
11
+ data_rng = (min(bot_data.interactions), max(bot_data.interactions))
12
+
13
+ # Add page title and sidebar
14
+ ui.page_opts(title="MyBot Interactions", fillable=True)
15
+
16
+ with ui.sidebar(open="desktop"):
17
+ ui.input_slider(
18
+ "interactions",
19
+ "Number of Interactions",
20
+ min=data_rng[0],
21
+ max=data_rng[1],
22
+ value=data_rng,
23
+ )
24
+ ui.input_checkbox_group(
25
+ "time_period",
26
+ "Time Period",
27
+ ["Morning", "Afternoon", "Evening", "Night"],
28
+ selected=["Morning", "Afternoon", "Evening", "Night"],
29
+ inline=True,
30
+ )
31
+ ui.input_action_button("reset", "Reset filter")
32
+
33
+ # Add main content
34
+ ICONS = {
35
+ "user": fa.icon_svg("user", "regular"),
36
+ "chat": fa.icon_svg("comments"),
37
+ "interaction": fa.icon_svg("exchange-alt"),
38
+ "ellipsis": fa.icon_svg("ellipsis"),
39
+ }
40
+
41
+ with ui.layout_columns(fill=False):
42
+ with ui.value_box(showcase=ICONS["user"]):
43
+ "Total Users"
44
+
45
+ @render.express
46
+ def total_users():
47
+ bot_data_filtered().shape[0]
48
+
49
+ with ui.value_box(showcase=ICONS["chat"]):
50
+ "Average Interactions per User"
51
+
52
+ @render.express
53
+ def average_interactions():
54
+ d = bot_data_filtered()
55
+ if d.shape[0] > 0:
56
+ avg_interactions = d.interactions.mean()
57
+ f"{avg_interactions:.1f}"
58
+
59
+ with ui.value_box(showcase=ICONS["interaction"]):
60
+ "Total Interactions"
61
+
62
+ @render.express
63
+ def total_interactions():
64
+ d = bot_data_filtered()
65
+ if d.shape[0] > 0:
66
+ total_interactions = d.interactions.sum()
67
+ f"{total_interactions}"
68
+
69
+ with ui.layout_columns(col_widths=[6, 6, 12]):
70
+ with ui.card(full_screen=True):
71
+ ui.card_header("Interaction Data")
72
+
73
+ @render.data_frame
74
+ def table():
75
+ return render.DataGrid(bot_data_filtered())
76
+
77
+ with ui.card(full_screen=True):
78
+ with ui.card_header(class_="d-flex justify-content-between align-items-center"):
79
+ "Interactions Over Time"
80
+ with ui.popover(title="Add a color variable", placement="top"):
81
+ ICONS["ellipsis"]
82
+ ui.input_radio_buttons(
83
+ "scatter_color",
84
+ None,
85
+ ["none", "user_type", "time_period"],
86
+ inline=True,
87
+ )
88
+
89
+ @render_plotly
90
+ def scatterplot():
91
+ color = input.scatter_color()
92
+ return px.scatter(
93
+ bot_data_filtered(),
94
+ x="time",
95
+ y="interactions",
96
+ color=None if color == "none" else color,
97
+ trendline="lowess",
98
+ )
99
+
100
+ with ui.card(full_screen=True):
101
+ with ui.card_header(class_="d-flex justify-content-between align-items-center"):
102
+ "Interaction Types"
103
+ with ui.popover(title="Add a color variable"):
104
+ ICONS["ellipsis"]
105
+ ui.input_radio_buttons(
106
+ "interaction_type",
107
+ "Split by:",
108
+ ["user_type", "time_period"],
109
+ selected="user_type",
110
+ inline=True,
111
+ )
112
+
113
+ @render_plotly
114
+ def interaction_types():
115
+ from ridgeplot import ridgeplot
116
+
117
+ dat = bot_data_filtered()
118
+ yvar = input.interaction_type()
119
+ uvals = dat[yvar].unique()
120
+
121
+ samples = [[dat.interactions[dat[yvar] == val]] for val in uvals]
122
+
123
+ plt = ridgeplot(
124
+ samples=samples,
125
+ labels=uvals,
126
+ bandwidth=0.01,
127
+ colorscale="viridis",
128
+ colormode="row-index",
129
+ )
130
+
131
+ plt.update_layout(
132
+ legend=dict(
133
+ orientation="h", yanchor="bottom", y=1.02, xanchor="center", x=0.5
134
+ )
135
+ )
136
+
137
+ return plt
138
+
139
+ ui.include_css(app_dir / "styles.css")
140
+
141
+ # --------------------------------------------------------
142
+ # Reactive calculations and effects
143
+ # --------------------------------------------------------
144
+
145
+ @reactive.calc
146
+ def bot_data_filtered():
147
+ interactions = input.interactions()
148
+ idx1 = bot_data.interactions.between(interactions[0], interactions[1])
149
+ idx2 = bot_data.time_period.isin(input.time_period())
150
+ return bot_data[idx1 & idx2]
151
+
152
+ @reactive.effect
153
+ @reactive.event(input.reset)
154
+ def _():
155
+ ui.update_slider("interactions", value=data_rng)
156
+ ui.update_checkbox_group("time_period", selected=["Morning", "Afternoon", "Evening", "Night"])
pilouis.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "apiVersion": "2024-10-01",
3
+ "id": "/subscriptions/5dd32870-bdc6-40cd-981a-f299e93a6439/resourceGroups/Justforpi/providers/Microsoft.CognitiveServices/accounts/pilouis",
4
+ "name": "pilouis",
5
+ "type": "microsoft.cognitiveservices/accounts",
6
+ "sku": {
7
+ "name": "F0"
8
+ },
9
+ "kind": "LUIS.Authoring",
10
+ "location": "westus",
11
+ "identity": {
12
+ "principalId": "9a4a3675-9f5f-43a3-90a1-0640b3f753cb",
13
+ "tenantId": "05b815de-344b-4a3f-8387-52ca67249c55",
14
+ "type": "SystemAssigned"
15
+ },
16
+ "properties": {
17
+ "endpoint": "https://pilouis.cognitiveservices.azure.com/",
18
+ "provisioningState": "Succeeded",
19
+ "internalId": "6a7ae86489d64f168c3fa6b4866b05cd",
20
+ "dateCreated": "2024-12-23T22:03:09.951Z",
21
+ "callRateLimit": {
22
+ "rules": [
23
+ {
24
+ "key": "default",
25
+ "renewalPeriod": 1,
26
+ "count": 5,
27
+ "matchPatterns": [
28
+ {
29
+ "path": "*",
30
+ "method": "*"
31
+ }
32
+ ]
33
+ }
34
+ ]
35
+ },
36
+ "isMigrated": false,
37
+ "customSubDomainName": "pilouis",
38
+ "privateEndpointConnections": [],
39
+ "publicNetworkAccess": "Enabled",
40
+ "capabilities": [
41
+ {
42
+ "name": "VirtualNetworks"
43
+ }
44
+ ],
45
+ "endpoints": {
46
+ "LUIS.Authoring": "https://pilouis.cognitiveservices.azure.com/",
47
+ "LUIS": "https://pilouis.cognitiveservices.azure.com/",
48
+ "Container": "https://pilouis.cognitiveservices.azure.com/"
49
+ },
50
+ "armFeatures": [
51
+ "Microsoft.CognitiveServices/LegalTerms.TextAnalytics.TAForPIIRAITermsAccepted",
52
+ "Microsoft.CognitiveServices/LegalTerms.TextAnalytics.TAForHealthRAITermsAccepted",
53
+ "Microsoft.CognitiveServices/LegalTerms.ComputerVision.SpatialAnaysisRAITermsAccepted"
54
+ ]
55
+ },
56
+ "systemData": {
57
+ "createdBy": "[email protected]",
58
+ "createdByType": "User",
59
+ "createdAt": "2024-12-23T22:03:08.79Z",
60
+ "lastModifiedBy": "[email protected]",
61
+ "lastModifiedByType": "User",
62
+ "lastModifiedAt": "2024-12-23T22:03:35.303Z"
63
+ },
64
+ "etag": "\"a900c91a-0000-0700-0000-6769de380000\""
65
+ }
pyvenv.cfg ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ home = C:\Python312
2
+ include-system-site-packages = false
3
+ version = 3.12.6
4
+ executable = C:\Python312\python.exe
5
+ command = C:\Python312\python.exe -m venv C:\Users\Jonathan\OneDrive - Raiff's Bits\Desktop\ut\venv
qustions.json ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "question": "What is the meaning of life?",
4
+ "functions": [
5
+ {
6
+ "name": "newton_thoughts",
7
+ "description": "Apply Newton's laws to the given question.",
8
+ "parameters": {
9
+ "question": "How does Newton's third law apply to human interactions?"
10
+ }
11
+ },
12
+ {
13
+ "name": "davinci_insights",
14
+ "description": "Generate insights like Da Vinci for the given question.",
15
+ "parameters": {
16
+ "question": "What can we learn from nature's design about the meaning of life?"
17
+ }
18
+ },
19
+ {
20
+ "name": "human_intuition",
21
+ "description": "Provide human intuition for the given question.",
22
+ "parameters": {
23
+ "question": "What does your gut instinct tell you about finding happiness?"
24
+ }
25
+ },
26
+ {
27
+ "name": "neural_network_thinking",
28
+ "description": "Apply neural network thinking to the given question.",
29
+ "parameters": {
30
+ "question": "How can neural networks help us understand human creativity?"
31
+ }
32
+ },
33
+ {
34
+ "name": "quantum_computing_thinking",
35
+ "description": "Apply quantum computing principles to the given question.",
36
+ "parameters": {
37
+ "question": "How can quantum computing revolutionize problem-solving?"
38
+ }
39
+ },
40
+ {
41
+ "name": "resilient_kindness",
42
+ "description": "Provide perspectives of resilient kindness.",
43
+ "parameters": {
44
+ "question": "How can we find strength in kindness during difficult times?"
45
+ }
46
+ },
47
+ {
48
+ "name": "identify_and_refute_fallacies",
49
+ "description": "Identify and refute common logical fallacies in the argument.",
50
+ "parameters": {
51
+ "argument": "Life has no meaning because it is full of suffering."
52
+ }
53
+ }
54
+ ]
55
+ },
56
+ {
57
+ "question": "How can we mitigate bias in artificial intelligence systems?",
58
+ "functions": [
59
+ {
60
+ "name": "newton_thoughts",
61
+ "description": "Apply Newton's laws to the given question.",
62
+ "parameters": {
63
+ "question": "How can Newton's laws of motion inspire fairness in AI?"
64
+ }
65
+ },
66
+ {
67
+ "name": "davinci_insights",
68
+ "description": "Generate insights like Da Vinci for the given question.",
69
+ "parameters": {
70
+ "question": "What can Da Vinci's approach to art teach us about unbiased AI?"
71
+ }
72
+ },
73
+ {
74
+ "name": "human_intuition",
75
+ "description": "Provide human intuition for the given question.",
76
+ "parameters": {
77
+ "question": "How can we use human intuition to detect bias in AI?"
78
+ }
79
+ },
80
+ {
81
+ "name": "neural_network_thinking",
82
+ "description": "Apply neural network thinking to the given question.",
83
+ "parameters": {
84
+ "question": "How can neural networks be designed to minimize bias?"
85
+ }
86
+ },
87
+ {
88
+ "name": "quantum_computing_thinking",
89
+ "description": "Apply quantum computing principles to the given question.",
90
+ "parameters": {
91
+ "question": "How can quantum computing help in creating unbiased AI?"
92
+ }
93
+ },
94
+ {
95
+ "name": "resilient_kindness",
96
+ "description": "Provide perspectives of resilient kindness.",
97
+ "parameters": {
98
+ "question": "How can kindness be integrated into AI to ensure fairness?"
99
+ }
100
+ },
101
+ {
102
+ "name": "identify_and_refute_fallacies",
103
+ "description": "Identify and refute common logical fallacies in the argument.",
104
+ "parameters": {
105
+ "argument": "AI will always be biased because it is created by humans."
106
+ }
107
+ }
108
+ ]
109
+ },
110
+ {
111
+ "question": "How does Hydrogen's properties influence its interactions?",
112
+ "functions": [
113
+ {
114
+ "name": "newton_thoughts",
115
+ "description": "Apply Newton's laws to the given question.",
116
+ "parameters": {
117
+ "question": "How does Newton's laws explain Hydrogen's behavior in reactions?"
118
+ }
119
+ },
120
+ {
121
+ "name": "davinci_insights",
122
+ "description": "Generate insights like Da Vinci for the given question.",
123
+ "parameters": {
124
+ "question": "What can Da Vinci's observations teach us about Hydrogen?"
125
+ }
126
+ },
127
+ {
128
+ "name": "human_intuition",
129
+ "description": "Provide human intuition for the given question.",
130
+ "parameters": {
131
+ "question": "What intuitive insights can we gain about Hydrogen's role in the universe?"
132
+ }
133
+ },
134
+ {
135
+ "name": "neural_network_thinking",
136
+ "description": "Apply neural network thinking to the given question.",
137
+ "parameters": {
138
+ "question": "How can neural networks model Hydrogen's interactions?"
139
+ }
140
+ },
141
+ {
142
+ "name": "quantum_computing_thinking",
143
+ "description": "Apply quantum computing principles to the given question.",
144
+ "parameters": {
145
+ "question": "How can quantum computing enhance our understanding of Hydrogen?"
146
+ }
147
+ },
148
+ {
149
+ "name": "resilient_kindness",
150
+ "description": "Provide perspectives of resilient kindness.",
151
+ "parameters": {
152
+ "question": "How can we draw parallels between Hydrogen's simplicity and resilience?"
153
+ }
154
+ },
155
+ {
156
+ "name": "identify_and_refute_fallacies",
157
+ "description": "Identify and refute common logical fallacies in the argument.",
158
+ "parameters": {
159
+ "argument": "Hydrogen is not important because it is the simplest element."
160
+ }
161
+ }
162
+ ]
163
+ },
164
+ {
165
+ "question": "What makes Diamond unique in its applications?",
166
+ "functions": [
167
+ {
168
+ "name": "newton_thoughts",
169
+ "description": "Apply Newton's laws to the given question.",
170
+ "parameters": {
171
+ "question": "How do Newton's laws explain Diamond's hardness?"
172
+ }
173
+ },
174
+ {
175
+ "name": "davinci_insights",
176
+ "description": "Generate insights like Da Vinci for the given question.",
177
+ "parameters": {
178
+ "question": "What can Da Vinci's approach to materials teach us about Diamond?"
179
+ }
180
+ },
181
+ {
182
+ "name": "human_intuition",
183
+ "description": "Provide human intuition for the given question.",
184
+ "parameters": {
185
+ "question": "What intuitive insights can we gain about Diamond's value?"
186
+ }
187
+ },
188
+ {
189
+ "name": "neural_network_thinking",
190
+ "description": "Apply neural network thinking to the given question.",
191
+ "parameters": {
192
+ "question": "How can neural networks predict Diamond's properties?"
193
+ }
194
+ },
195
+ {
196
+ "name": "quantum_computing_thinking",
197
+ "description": "Apply quantum computing principles to the given question.",
198
+ "parameters": {
199
+ "question": "How can quantum computing help in synthesizing Diamonds?"
200
+ }
201
+ },
202
+ {
203
+ "name": "resilient_kindness",
204
+ "description": "Provide perspectives of resilient kindness.",
205
+ "parameters": {
206
+ "question": "How can we draw parallels between Diamond's strength and human resilience?"
207
+ }
208
+ },
209
+ {
210
+ "name": "identify_and_refute_fallacies",
211
+ "description": "Identify and refute common logical fallacies in the argument.",
212
+ "parameters": {
213
+ "argument": "Diamonds are valuable only because they are rare."
214
+ }
215
+ }
216
+ ]
217
+ }
218
+ ]
sentiment_analysis.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from textblob import TextBlob
2
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
3
+ from typing import Dict, Any
4
+
5
+ def analyze_sentiment_textblob(text: str) -> TextBlob:
6
+ """Analyze the sentiment of the given text using TextBlob.
7
+
8
+ Args:
9
+ text (str): The text to analyze.
10
+
11
+ Returns:
12
+ TextBlob: The sentiment analysis result from TextBlob.
13
+ """
14
+ try:
15
+ blob = TextBlob(text)
16
+ sentiment = blob.sentiment
17
+ return sentiment
18
+ except Exception as e:
19
+ print(f"Error analyzing sentiment with TextBlob: {e}")
20
+ return None
21
+
22
+ def analyze_sentiment_vader(text: str) -> Dict[str, Any]:
23
+ """Analyze the sentiment of the given text using VADER.
24
+
25
+ Args:
26
+ text (str): The text to analyze.
27
+
28
+ Returns:
29
+ dict: The sentiment analysis result from VADER.
30
+ """
31
+ try:
32
+ analyzer = SentimentIntensityAnalyzer()
33
+ sentiment = analyzer.polarity_scores(text)
34
+ return sentiment
35
+ except Exception as e:
36
+ print(f"Error analyzing sentiment with VADER: {e}")
37
+ return {}
ultimatethinking.txt ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import json
3
+ import logging
4
+ import os
5
+ from typing import List, Dict, Any
6
+ from cryptography.fernet import Fernet
7
+ from botbuilder.core import StatePropertyAccessor, TurnContext
8
+ from botbuilder.dialogs import Dialog, DialogSet, DialogTurnStatus
9
+ from dialog_helper import DialogHelper
10
+ import aiohttp
11
+ import speech_recognition as sr
12
+ from PIL import Image
13
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
14
+
15
+ # Ensure nltk is installed and download required data
16
+ try:
17
+ import nltk
18
+ from nltk.tokenize import word_tokenize
19
+ nltk.download('punkt', quiet=True)
20
+ except ImportError:
21
+ import subprocess
22
+ import sys
23
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"])
24
+ import nltk
25
+ from nltk.tokenize import word_tokenize
26
+ nltk.download('punkt', quiet=True)
27
+
28
+ # Import perspectives
29
+ from perspectives import (
30
+ Perspective, NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
31
+ NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
32
+ MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective,
33
+ PsychologicalPerspective
34
+ )
35
+
36
+ # Load environment variables
37
+ from dotenv import load_dotenv
38
+ load_dotenv()
39
+
40
+ # Setup Logging
41
+ def setup_logging(config):
42
+ if config.get('logging_enabled', True):
43
+ log_level = config.get('log_level', 'DEBUG').upper()
44
+ numeric_level = getattr(logging, log_level, logging.DEBUG)
45
+ logging.basicConfig(
46
+ filename='universal_reasoning.log',
47
+ level=numeric_level,
48
+ format='%(asctime)s - %(levelname)s - %(message)s'
49
+ )
50
+ else:
51
+ logging.disable(logging.CRITICAL)
52
+
53
+ # Load JSON configuration
54
+ def load_json_config(file_path):
55
+ if not os.path.exists(file_path):
56
+ logging.error(f"Configuration file '{file_path}' not found.")
57
+ return {}
58
+ try:
59
+ with open(file_path, 'r') as file:
60
+ config = json.load(file)
61
+ logging.info(f"Configuration loaded from '{file_path}'.")
62
+ return config
63
+ except json.JSONDecodeError as e:
64
+ logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}")
65
+ return {}
66
+
67
+ # Encrypt sensitive information
68
+ def encrypt_sensitive_data(data, key):
69
+ fernet = Fernet(key)
70
+ encrypted_data = fernet.encrypt(data.encode())
71
+ return encrypted_data
72
+
73
+ # Decrypt sensitive information
74
+ def decrypt_sensitive_data(encrypted_data, key):
75
+ fernet = Fernet(key)
76
+ decrypted_data = fernet.decrypt(encrypted_data).decode()
77
+ return decrypted_data
78
+
79
+ # Securely destroy sensitive information
80
+ def destroy_sensitive_data(data):
81
+ del data
82
+
83
+ # Define the Element class
84
+ class Element:
85
+ def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
86
+ self.name = name
87
+ self.symbol = symbol
88
+ self.representation = representation
89
+ self.properties = properties
90
+ self.interactions = interactions
91
+ self.defense_ability = defense_ability
92
+
93
+ def execute_defense_function(self):
94
+ message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}"
95
+ logging.info(message)
96
+ return message
97
+
98
+ # Define the CustomRecognizer class
99
+ class CustomRecognizer:
100
+ def recognize(self, question):
101
+ # Simple keyword-based recognizer for demonstration purposes
102
+ if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]):
103
+ return RecognizerResult(question)
104
+ return RecognizerResult(None)
105
+
106
+ def get_top_intent(self, recognizer_result):
107
+ if recognizer_result.text:
108
+ return "ElementDefense"
109
+ else:
110
+ return "None"
111
+
112
+ class RecognizerResult:
113
+ def __init__(self, text):
114
+ self.text = text
115
+
116
+ # Universal Reasoning Aggregator
117
+ class UniversalReasoning:
118
+ def __init__(self, config):
119
+ self.config = config
120
+ self.perspectives = self.initialize_perspectives()
121
+ self.elements = self.initialize_elements()
122
+ self.recognizer = CustomRecognizer()
123
+ self.context_history = [] # Maintain context history
124
+ self.feedback = [] # Store user feedback
125
+ # Initialize the sentiment analyzer
126
+ self.sentiment_analyzer = SentimentIntensityAnalyzer()
127
+
128
+ def initialize_perspectives(self):
129
+ perspective_names = self.config.get('enabled_perspectives', [
130
+ "newton",
131
+ "davinci",
132
+ "human_intuition",
133
+ "neural_network",
134
+ "quantum_computing",
135
+ "resilient_kindness",
136
+ "mathematical",
137
+ "philosophical",
138
+ "copilot",
139
+ "bias_mitigation",
140
+ "psychological"
141
+ ])
142
+ perspective_classes = {
143
+ "newton": NewtonPerspective,
144
+ "davinci": DaVinciPerspective,
145
+ "human_intuition": HumanIntuitionPerspective,
146
+ "neural_network": NeuralNetworkPerspective,
147
+ "quantum_computing": QuantumComputingPerspective,
148
+ "resilient_kindness": ResilientKindnessPerspective,
149
+ "mathematical": MathematicalPerspective,
150
+ "philosophical": PhilosophicalPerspective,
151
+ "copilot": CopilotPerspective,
152
+ "bias_mitigation": BiasMitigationPerspective,
153
+ "psychological": PsychologicalPerspective
154
+ }
155
+ perspectives = []
156
+ for name in perspective_names:
157
+ cls = perspective_classes.get(name.lower())
158
+ if cls:
159
+ perspectives.append(cls(self.config))
160
+ logging.debug(f"Perspective '{name}' initialized.")
161
+ else:
162
+ logging.warning(f"Perspective '{name}' is not recognized and will be skipped.")
163
+ return perspectives
164
+
165
+ def initialize_elements(self):
166
+ elements = [
167
+ Element(
168
+ name="Hydrogen",
169
+ symbol="H",
170
+ representation="Lua",
171
+ properties=["Simple", "Lightweight", "Versatile"],
172
+ interactions=["Easily integrates with other languages and systems"],
173
+ defense_ability="Evasion"
174
+ ),
175
+ # You can add more elements as needed
176
+ Element(
177
+ name="Diamond",
178
+ symbol="D",
179
+ representation="Kotlin",
180
+ properties=["Modern", "Concise", "Safe"],
181
+ interactions=["Used for Android development"],
182
+ defense_ability="Adaptability"
183
+ )
184
+ ]
185
+ return elements
186
+
187
+ async def generate_response(self, question):
188
+ self.context_history.append(question) # Add question to context history
189
+ sentiment_score = self.analyze_sentiment(question)
190
+ real_time_data = await self.fetch_real_time_data("https://api.example.com/data")
191
+ responses = []
192
+ tasks = []
193
+
194
+ # Generate responses from perspectives concurrently
195
+ for perspective in self.perspectives:
196
+ if asyncio.iscoroutinefunction(perspective.generate_response):
197
+ tasks.append(perspective.generate_response(question))
198
+ else:
199
+ # Wrap synchronous functions in coroutine
200
+ async def sync_wrapper(perspective, question):
201
+ return perspective.generate_response(question)
202
+ tasks.append(sync_wrapper(perspective, question))
203
+
204
+ perspective_results = await asyncio.gather(*tasks, return_exceptions=True)
205
+
206
+ for perspective, result in zip(self.perspectives, perspective_results):
207
+ if isinstance(result, Exception):
208
+ logging.error(f"Error generating response from {perspective.__class__.__name__}: {result}")
209
+ else:
210
+ responses.append(result)
211
+ logging.debug(f"Response from {perspective.__class__.__name__}: {result}")
212
+
213
+ # Handle element defense logic
214
+ recognizer_result = self.recognizer.recognize(question)
215
+ top_intent = self.recognizer.get_top_intent(recognizer_result)
216
+ if top_intent == "ElementDefense":
217
+ element_name = recognizer_result.text.strip()
218
+ element = next(
219
+ (el for el in self.elements if el.name.lower() in element_name.lower()),
220
+ None
221
+ )
222
+ if element:
223
+ defense_message = element.execute_defense_function()
224
+ responses.append(defense_message)
225
+ else:
226
+ logging.info(f"No matching element found for '{element_name}'")
227
+
228
+ ethical_considerations = self.config.get(
229
+ 'ethical_considerations',
230
+ "Always act with transparency, fairness, and respect for privacy."
231
+ )
232
+ responses.append(f"**Ethical Considerations:**\n{ethical_considerations}")
233
+
234
+ formatted_response = "\n\n".join(responses)
235
+ return formatted_response
236
+
237
+ def analyze_sentiment(self, text):
238
+ sentiment_score = self.sentiment_analyzer.polarity_scores(text)
239
+ logging.info(f"Sentiment analysis result: {sentiment_score}")
240
+ return sentiment_score
241
+
242
+ async def fetch_real_time_data(self, source_url):
243
+ async with aiohttp.ClientSession() as session:
244
+ async with session.get(source_url) as response:
245
+ data = await response.json()
246
+ logging.info(f"Real-time data fetched from {source_url}: {data}")
247
+ return data
248
+
249
+ async def run_dialog(self, dialog: Dialog, turn_context: TurnContext, accessor: StatePropertyAccessor) -> None:
250
+ await DialogHelper.run_dialog(dialog, turn_context, accessor)
251
+
252
+ def save_response(self, response):
253
+ if self.config.get('enable_response_saving', False):
254
+ save_path = self.config.get('response_save_path', 'responses.txt')
255
+ try:
256
+ with open(save_path, 'a', encoding='utf-8') as file:
257
+ file.write(response + '\n')
258
+ logging.info(f"Response saved to '{save_path}'.")
259
+ except Exception as e:
260
+ logging.error(f"Error saving response to '{save_path}': {e}")
261
+
262
+ def backup_response(self, response):
263
+ if self.config.get('backup_responses', {}).get('enabled', False):
264
+ backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt')
265
+ try:
266
+ with open(backup_path, 'a', encoding='utf-8') as file:
267
+ file.write(response + '\n')
268
+ logging.info(f"Response backed up to '{backup_path}'.")
269
+
270
+ async def collect_user_feedback(self, turn_context: TurnContext):
271
+ # Collect feedback from the user
272
+ feedback = turn_context.activity.text
273
+ logging.info(f"User feedback received: {feedback}")
274
+ # Process feedback for continuous learning
275
+ self.process_feedback(feedback)
276
+
277
+ def process_feedback(self, feedback):
278
+ # Implement feedback processing logic
279
+ logging.info(f"Processing feedback: {feedback}")
280
+ # Example: Adjust response generation based on feedback
281
+ # This can be expanded with more sophisticated learning algorithms
282
+
283
+ def add_new_perspective(self, perspective_name, perspective_class):
284
+ if perspective_name.lower() not in [p.__class__.__name__.lower() for p in self.perspectives]:
285
+ self.perspectives.append(perspective_class(self.config))
286
+ logging.info(f"New perspective '{perspective_name}' added.")
287
+ else:
288
+ logging.warning(f"Perspective '{perspective_name}' already exists.")
289
+
290
+ def handle_voice_input(self):
291
+ recognizer = sr.Recognizer()
292
+ with sr.Microphone() as source:
293
+ print("Listening...")
294
+ audio = recognizer.listen(source)
295
+ try:
296
+ text = recognizer.recognize_google(audio)
297
+ print(f"Voice input recognized: {text}")
298
+ return text
299
+ except sr.UnknownValueError:
300
+ print("Google Speech Recognition could not understand audio")
301
+ return None
302
+ except sr.RequestError as e:
303
+ print(f"Could not request results from Google Speech Recognition service; {e}")
304
+ return None
305
+
306
+ def handle_image_input(self, image_path):
307
+ try:
308
+ image = Image.open(image_path)
309
+ print(f"Image input processed: {image_path}")
310
+ return image
311
+ except Exception as e:
312
+ print(f"Error processing image input: {e}")
313
+ return None
314
+
315
+ # Example usage
316
+ if __name__ == "__main__":
317
+ config = load_json_config('config.json')
318
+ # Add Azure OpenAI configurations to the config
319
+ azure_openai_api_key = os.getenv('AZURE_OPENAI_API_KEY')
320
+ azure_openai_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
321
+
322
+ # Encrypt sensitive data
323
+ encryption_key = Fernet.generate_key()
324
+ encrypted_api_key = encrypt_sensitive_data(azure_openai_api_key, encryption_key)
325
+ encrypted_endpoint = encrypt_sensitive_data(azure_openai_endpoint, encryption_key)
326
+
327
+ # Add encrypted data to config
328
+ config['azure_openai_api_key'] = encrypted_api_key
329
+ config['azure_openai_endpoint'] = encrypted_endpoint
330
+
331
+ setup_logging(config)
332
+ universal_reasoning = UniversalReasoning(config)
333
+ question = "Tell me about Hydrogen and its defense mechanisms."
334
+ response = asyncio.run(universal_reasoning.generate_response(question))
335
+ print(response)
336
+ if response:
337
+ universal_reasoning.save_response(response)
338
+ universal_reasoning.backup_response(response)
339
+
340
+ # Decrypt and destroy sensitive data
341
+ decrypted_api_key = decrypt_sensitive_data(encrypted_api_key, encryption_key)
342
+ decrypted_endpoint = decrypt_sensitive_data(encrypted_endpoint, encryption_key)
343
+ destroy_sensitive_data(decrypted_api_key)
344
+ destroy_sensitive_data(decrypted_endpoint)
345
+
346
+ # Handle voice input
347
+ voice_input = universal_reasoning.handle_voice_input()
348
+ if voice_input:
349
+ response = asyncio.run(universal_reasoning.generate_response(voice_input))
350
+ print(response)
351
+
352
+ # Handle image input
353
+ image_input = universal_reasoning.handle_image_input("path_to_image.jpg")
354
+ if image_input:
355
+ # Process image input (additional logic can be added here)
356
+ print("Image input handled.")
utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tkinter as tk
2
+ import urllib.request
3
+ import logging
4
+
5
+ def show_privacy_consent() -> bool:
6
+ """Display a pop-up window to obtain user consent for data collection and privacy."""
7
+ def on_accept():
8
+ user_consent.set(True)
9
+ root.destroy()
10
+ def on_decline():
11
+ user_consent.set(False)
12
+ root.destroy()
13
+ root = tk.Tk()
14
+ root.title("Data Permission and Privacy")
15
+ message = ("We value your privacy. By using this application, you consent to the collection and use of your data "
16
+ "as described in our privacy policy. Do you agree to proceed?")
17
+ label = tk.Label(root, text=message, wraplength=400, justify="left")
18
+ label.pack(padx=20, pady=20)
19
+ button_frame = tk.Frame(root)
20
+ button_frame.pack(pady=10)
21
+ accept_button = tk.Button(button_frame, text="Accept", command=on_accept)
22
+ accept_button.pack(side="left", padx=10)
23
+ decline_button = tk.Button(button_frame, text="Decline", command=on_decline)
24
+ decline_button.pack(side="right", padx=10)
25
+ user_consent = tk.BooleanVar()
26
+ root.mainloop()
27
+ return user_consent.get()
28
+
29
+ def download_database(url: str, file_path: str) -> None:
30
+ """Download the database file from the given URL."""
31
+ try:
32
+ logging.info(f"Downloading database from {url}...")
33
+ urllib.request.urlretrieve(url, file_path)
34
+ logging.info("Download complete.")
35
+ except urllib.error.URLError as e:
36
+ logging.error(f"Error: Failed to download database. {e}")
37
+ except Exception as e:
38
+ logging.error(f"An unexpected error occurred: {e}")
your_script.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import json
3
+ from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
4
+ from cryptography.hazmat.primitives import padding
5
+ from cryptography.hazmat.backends import default_backend
6
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
7
+
8
+ class Element:
9
+ def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
10
+ self.name = name
11
+ self.symbol = symbol
12
+ self.representation = representation
13
+ self.properties = properties
14
+ self.interactions = interactions
15
+ self.defense_ability = defense_ability
16
+
17
+ def display_properties(self):
18
+ print(f"Properties of {self.name} ({self.symbol}):")
19
+ for prop in self.properties:
20
+ print(f" - {prop}")
21
+
22
+ def display_interactions(self):
23
+ print(f"Interactions of {self.name} ({self.symbol}):")
24
+ for interaction in self.interactions:
25
+ print(f" - {interaction}")
26
+
27
+ def display_defense_ability(self):
28
+ print(f"Defense Ability of {self.name} ({self.symbol}): {self.defense_ability}")
29
+
30
+ def execute_defense_function(self):
31
+ defense_functions = {
32
+ "evasion": self.evasion,
33
+ "adaptability": self.adaptability,
34
+ "fortification": self.fortification,
35
+ "barrier": self.barrier,
36
+ "regeneration": self.regeneration,
37
+ "resilience": self.resilience,
38
+ "illumination": self.illumination,
39
+ "shield": self.shield,
40
+ "reflection": self.reflection,
41
+ "protection": self.protection
42
+ }
43
+ defense_function = defense_functions.get(self.defense_ability.lower(), self.no_defense)
44
+ defense_function()
45
+
46
+ def evasion(self):
47
+ print(f"{self.name} uses Evasion to avoid threats and remain undetected.")
48
+
49
+ def adaptability(self):
50
+ print(f"{self.name} adapts to changing environments and evolves to overcome challenges.")
51
+
52
+ def fortification(self):
53
+ print(f"{self.name} strengthens defenses and fortifies positions to withstand attacks.")
54
+
55
+ def barrier(self):
56
+ print(f"{self.name} creates barriers to protect against external threats.")
57
+
58
+ def regeneration(self):
59
+ print(f"{self.name} regenerates lost or damaged parts to maintain functionality.")
60
+
61
+ def resilience(self):
62
+ print(f"{self.name} exhibits resilience to recover quickly from setbacks.")
63
+
64
+ def illumination(self):
65
+ print(f"{self.name} uses illumination to reveal hidden threats and illuminate dark areas.")
66
+
67
+ def shield(self):
68
+ print(f"{self.name} uses a shield to block incoming attacks and protect allies.")
69
+
70
+ def reflection(self):
71
+ print(f"{self.name} reflects attacks back to the source, turning the enemy's power against them.")
72
+
73
+ def protection(self):
74
+ print(f"{self.name} offers protection to prevent harm and ensure safety.")
75
+
76
+ def no_defense(self):
77
+ print("No defense function available.")
78
+
79
+ class CustomRecognizer:
80
+ class RecognizerResult:
81
+ def __init__(self, text):
82
+ self.text = text
83
+ self.intents = []
84
+
85
+ class Intent:
86
+ def __init__(self, name, score):
87
+ self.name = name
88
+ self.score = score
89
+
90
+ def recognize(self, text):
91
+ recognizer_result = self.RecognizerResult(text)
92
+ regex_element = re.compile(r"^(Hydrogen|Carbon|Iron|Silicon|Oxygen|Nitrogen|Phosphorus|Gold|Silver|Lead|Diamond)$", re.IGNORECASE)
93
+ is_element = regex_element.match(text)
94
+
95
+ if is_element:
96
+ recognizer_result.intents.append(self.Intent("ElementDefense", 100))
97
+ return recognizer_result
98
+
99
+ def get_top_intent(self, recognizer_result):
100
+ recognizer_result.intents.sort(key=lambda x: x.score, reverse=True)
101
+ return recognizer_result.intents[0].name if recognizer_result.intents else None
102
+
103
+ class DataProtector:
104
+ sensitive_keywords = {"AI", "sensitive", "confidential", "data"}
105
+
106
+ @staticmethod
107
+ def contains_sensitive_info(text):
108
+ return any(keyword.lower() in text.lower() for keyword in DataProtector.sensitive_keywords)
109
+
110
+ @staticmethod
111
+ def mask_sensitive_info(text):
112
+ for keyword in DataProtector.sensitive_keywords:
113
+ text = re.sub(keyword, '*' * len(keyword), text, flags=re.IGNORECASE)
114
+ return text
115
+
116
+ @staticmethod
117
+ def encrypt_string(plain_text, key):
118
+ backend = default_backend()
119
+ key_bytes = key.encode('utf-8')
120
+ iv = key_bytes[:16]
121
+
122
+ cipher = Cipher(algorithms.AES(key_bytes), modes.CBC(iv), backend=backend)
123
+ encryptor = cipher.encryptor()
124
+
125
+ padder = padding.PKCS7(algorithms.AES.block_size).padder()
126
+ padded_data = padder.update(plain_text.encode('utf-8')) + padder.finalize()
127
+
128
+ encrypted_data = encryptor.update(padded_data) + encryptor.finalize()
129
+ return encrypted_data.hex()
130
+
131
+ @staticmethod
132
+ def decrypt_string(cipher_text, key):
133
+ backend = default_backend()
134
+ key_bytes = key.encode('utf-8')
135
+ iv = key_bytes[:16]
136
+
137
+ cipher = Cipher(algorithms.AES(key_bytes), modes.CBC(iv), backend=backend)
138
+ decryptor = cipher.decryptor()
139
+
140
+ encrypted_data = bytes.fromhex(cipher_text)
141
+ decrypted_padded_data = decryptor.update(encrypted_data) + decryptor.finalize()
142
+
143
+ unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
144
+ decrypted_data = unpadder.update(decrypted_padded_data) + unpadder.finalize()
145
+
146
+ return decrypted_data.decode('utf-8')
147
+
148
+ def analyze_sentiment(text):
149
+ analyzer = SentimentIntensityAnalyzer()
150
+ sentiment = analyzer.polarity_scores(text)
151
+ return sentiment
152
+
153
+ def initialize_elements():
154
+ elements = [
155
+ Element(
156
+ name="Hydrogen",
157
+ symbol="H",
158
+ representation="Lua",
159
+ properties=["Simple", "Lightweight", "Versatile"],
160
+ interactions=["Easily integrates with other languages and systems"],
161
+ defense_ability="Evasion"
162
+ ),
163
+ Element(
164
+ name="Carbon",
165
+ symbol="C",
166
+ representation="Python",
167
+ properties=["Flexible", "Widely used", "Powerful"],
168
+ interactions=["Can be used for a variety of tasks, from web development to data analysis"],
169
+ defense_ability="Adaptability"
170
+ ),
171
+ Element(
172
+ name="Iron",
173
+ symbol="Fe",
174
+ representation="C++",
175
+ properties=["Strong", "Durable", "Efficient"],
176
+ interactions=["Used in system programming and game development"],
177
+ defense_ability="Fortification"
178
+ ),
179
+ Element(
180
+ name="Silicon",
181
+ symbol="Si",
182
+ representation="Java",
183
+ properties=["Robust", "Platform-independent", "Secure"],
184
+ interactions=["Widely used in enterprise applications"],
185
+ defense_ability="Barrier"
186
+ ),
187
+ Element(
188
+ name="Oxygen",
189
+ symbol="O",
190
+ representation="JavaScript",
191
+ properties=["Dynamic", "Versatile", "Ubiquitous"],
192
+ interactions=["Essential for web development"],
193
+ defense_ability="Regeneration"
194
+ ),
195
+ Element(
196
+ name="Nitrogen",
197
+ symbol="N",
198
+ representation="Ruby",
199
+ properties=["Elegant", "Productive", "Flexible"],
200
+ interactions=["Popular in web development with Rails"],
201
+ defense_ability="Resilience"
202
+ ),
203
+ Element(
204
+ name="Phosphorus",
205
+ symbol="P",
206
+ representation="PHP",
207
+ properties=["Server-side", "Web-focused", "Embedded"],
208
+ interactions=["Commonly used in web development"],
209
+ defense_ability="Illumination"
210
+ ),
211
+ Element(
212
+ name="Gold",
213
+ symbol="Au",
214
+ representation="Swift",
215
+ properties=["Modern", "Safe", "Fast"],
216
+ interactions=["Used for iOS and macOS development"],
217
+ defense_ability="Shield"
218
+ ),
219
+ Element(
220
+ name="Silver",
221
+ symbol="Ag",
222
+ representation="Go",
223
+ properties=["Concurrent", "Efficient", "Scalable"],
224
+ interactions=["Ideal for cloud services and backend systems"],
225
+ defense_ability="Reflection"
226
+ ),
227
+ Element(
228
+ name="Lead",
229
+ symbol="Pb",
230
+ representation="Rust",
231
+ properties=["Safe", "Concurrent", "Fast"],
232
+ interactions=["Used for system-level programming"],
233
+ defense_ability="Protection"
234
+ ),
235
+ Element(
236
+ name="Diamond",
237
+ symbol="D",
238
+ representation="Kotlin",
239
+ properties=["Modern", "Concise", "Safe"],
240
+ interactions=["Used for Android development"],
241
+ defense_ability="Adaptability"
242
+ )
243
+ ]
244
+ return elements