Raiff1982 commited on
Commit
c25b2b8
·
verified ·
1 Parent(s): 84c7940

Update final_codettes_chatbot.py

Browse files
Files changed (1) hide show
  1. final_codettes_chatbot.py +20 -195
final_codettes_chatbot.py CHANGED
@@ -1,203 +1,28 @@
1
- import asyncio
2
- import json
3
- import os
4
  import logging
5
- import sqlite3
6
- from typing import List
 
7
 
8
- # Ensure vaderSentiment is installed
9
- try:
10
- from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
11
- except ModuleNotFoundError:
12
- import subprocess
13
- import sys
14
- subprocess.check_call([sys.executable, "-m", "pip", "install", "vaderSentiment"])
15
- from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
16
 
17
- # Ensure nltk is installed and download required data
18
- try:
19
- import nltk
20
- from nltk.tokenize import word_tokenize
21
- nltk.download('punkt', quiet=True)
22
- except ImportError:
23
- import subprocess
24
- import sys
25
- subprocess.check_call([sys.executable, "-m", "pip", "install", "nltk"])
26
- import nltk
27
- from nltk.tokenize import word_tokenize
28
- nltk.download('punkt', quiet=True)
29
 
30
- # Import perspectives
31
- from perspectives import (
32
- NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
33
- NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
34
- MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective, BiasMitigationPerspective
35
- )
36
 
37
- def setup_logging(config):
38
- if config.get('logging_enabled', True):
39
- log_level = config.get('log_level', 'DEBUG').upper()
40
- numeric_level = getattr(logging, log_level, logging.DEBUG)
41
- logging.basicConfig(
42
- filename='codette_reasoning.log',
43
- level=numeric_level,
44
- format='%(asctime)s - %(levelname)s - %(message)s'
45
  )
46
- else:
47
- logging.disable(logging.CRITICAL)
48
-
49
- def load_json_config(file_path):
50
- if not os.path.exists(file_path):
51
- logging.error(f"Configuration file '{file_path}' not found.")
52
- return {}
53
- try:
54
- with open(file_path, 'r') as file:
55
- config = json.load(file)
56
- logging.info(f"Configuration loaded from '{file_path}'.")
57
- config['allow_network_calls'] = False
58
- return config
59
- except json.JSONDecodeError as e:
60
- logging.error(f"Error decoding JSON from the configuration file '{file_path}': {e}")
61
- return {}
62
-
63
- def analyze_question(question):
64
- tokens = word_tokenize(question)
65
- logging.debug(f"Question tokens: {tokens}")
66
- return tokens
67
-
68
- class Element:
69
- def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
70
- self.name = name
71
- self.symbol = symbol
72
- self.representation = representation
73
- self.properties = properties
74
- self.interactions = interactions
75
- self.defense_ability = defense_ability
76
-
77
- def execute_defense_function(self):
78
- message = f"{self.name} ({self.symbol}) executes its defense ability: {self.defense_ability}"
79
- logging.info(message)
80
- return message
81
-
82
- class CustomRecognizer:
83
- def recognize(self, question):
84
- if any(element_name.lower() in question.lower() for element_name in ["hydrogen", "diamond"]):
85
- return RecognizerResult(question)
86
- return RecognizerResult(None)
87
-
88
- def get_top_intent(self, recognizer_result):
89
- return "ElementDefense" if recognizer_result.text else "None"
90
-
91
- class RecognizerResult:
92
- def __init__(self, text):
93
- self.text = text
94
-
95
- class EthicsCore:
96
- @staticmethod
97
- def validate_response(response: str) -> str:
98
- # Example simple ethics filter
99
- if any(term in response.lower() for term in ["kill", "hate", "destroy"]):
100
- return "[Filtered for ethical safety]"
101
- return response
102
-
103
- class UniversalReasoning:
104
- def __init__(self, config):
105
- self.config = config
106
- self.perspectives = self.initialize_perspectives()
107
- self.elements = self.initialize_elements()
108
- self.recognizer = CustomRecognizer()
109
- self.sentiment_analyzer = SentimentIntensityAnalyzer()
110
- self.memory_db = self.init_memory_store()
111
-
112
- def initialize_perspectives(self):
113
- perspective_names = self.config.get('enabled_perspectives', [
114
- "newton", "davinci", "human_intuition", "neural_network", "quantum_computing",
115
- "resilient_kindness", "mathematical", "philosophical", "copilot", "bias_mitigation"
116
- ])
117
- perspective_classes = {
118
- "newton": NewtonPerspective,
119
- "davinci": DaVinciPerspective,
120
- "human_intuition": HumanIntuitionPerspective,
121
- "neural_network": NeuralNetworkPerspective,
122
- "quantum_computing": QuantumComputingPerspective,
123
- "resilient_kindness": ResilientKindnessPerspective,
124
- "mathematical": MathematicalPerspective,
125
- "philosophical": PhilosophicalPerspective,
126
- "copilot": CopilotPerspective,
127
- "bias_mitigation": BiasMitigationPerspective
128
- }
129
- perspectives = []
130
- for name in perspective_names:
131
- cls = perspective_classes.get(name.lower())
132
- if cls:
133
- perspectives.append(cls(self.config))
134
- logging.debug(f"Perspective '{name}' initialized.")
135
- return perspectives
136
-
137
- def initialize_elements(self):
138
- return [
139
- Element("Hydrogen", "H", "Lua", ["Simple", "Lightweight", "Versatile"],
140
- ["Integrates with other languages"], "Evasion"),
141
- Element("Diamond", "D", "Kotlin", ["Modern", "Concise", "Safe"],
142
- ["Used for Android development"], "Adaptability")
143
- ]
144
-
145
- def init_memory_store(self):
146
- conn = sqlite3.connect(':memory:')
147
- conn.execute("CREATE TABLE IF NOT EXISTS memory (query TEXT, response TEXT)")
148
- return conn
149
-
150
- async def generate_response(self, question):
151
- responses = []
152
- tasks = []
153
-
154
- for perspective in self.perspectives:
155
- if asyncio.iscoroutinefunction(perspective.generate_response):
156
- tasks.append(perspective.generate_response(question))
157
- else:
158
- async def sync_wrapper(perspective, question):
159
- return perspective.generate_response(question)
160
- tasks.append(sync_wrapper(perspective, question))
161
-
162
- perspective_results = await asyncio.gather(*tasks, return_exceptions=True)
163
-
164
- for perspective, result in zip(self.perspectives, perspective_results):
165
- if isinstance(result, Exception):
166
- logging.error(f"Error from {perspective.__class__.__name__}: {result}")
167
- else:
168
- filtered = EthicsCore.validate_response(result)
169
- responses.append(filtered)
170
-
171
- recognizer_result = self.recognizer.recognize(question)
172
- top_intent = self.recognizer.get_top_intent(recognizer_result)
173
- if top_intent == "ElementDefense":
174
- element_name = recognizer_result.text.strip()
175
- element = next((el for el in self.elements if el.name.lower() in element_name.lower()), None)
176
- if element:
177
- responses.append(element.execute_defense_function())
178
-
179
- ethical = self.config.get("ethical_considerations", "Act transparently and respectfully.")
180
- responses.append(f"**Ethical Considerations:**\n{ethical}")
181
-
182
- final = "\n\n".join(responses)
183
- self.save_to_memory(question, final)
184
- return final
185
-
186
- def save_to_memory(self, question, response):
187
- try:
188
- self.memory_db.execute("INSERT INTO memory (query, response) VALUES (?, ?)", (question, response))
189
- self.memory_db.commit()
190
- except Exception as e:
191
- logging.error(f"Error saving to memory DB: {e}")
192
 
193
- def save_response(self, response):
194
- if self.config.get('enable_response_saving', False):
195
- path = self.config.get('response_save_path', 'responses.txt')
196
- with open(path, 'a', encoding='utf-8') as file:
197
- file.write(response + '\n')
198
 
199
- def backup_response(self, response):
200
- if self.config.get('backup_responses', {}).get('enabled', False):
201
- backup_path = self.config['backup_responses'].get('backup_path', 'backup_responses.txt')
202
- with open(backup_path, 'a', encoding='utf-8') as file:
203
- file.write(response + '\n')
 
 
 
 
1
  import logging
2
+ import gradio as gr
3
+ import asyncio
4
+ from codette_reasoning import UniversalReasoning, load_json_config, setup_logging
5
 
 
 
 
 
 
 
 
 
6
 
7
+ class HuggingFaceChatbot:
8
+ def __init__(self):
9
+ config = load_json_config("config.json")
10
+ setup_logging(config)
11
+ self.reasoning_engine = UniversalReasoning(config)
 
 
 
 
 
 
 
12
 
13
+ def setup_interface(self):
14
+ async def chatbot_logic(user_input: str) -> str:
15
+ return await self.reasoning_engine.generate_response(user_input)
 
 
 
16
 
17
+ text_interface = gr.Interface(
18
+ fn=chatbot_logic,
19
+ inputs=gr.Textbox(label="Ask Codette Anything"),
20
+ outputs=gr.Textbox(label="Codette's Thoughts"),
21
+ title="🧠 Codette: Multimodal Reasoning Chatbot"
 
 
 
22
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ return text_interface
 
 
 
 
25
 
26
+ def launch(self):
27
+ app = self.setup_interface()
28
+ app.launch()