black-forest-labs-FLUX.1-dev / codette_agent.py
Raiff1982's picture
Update codette_agent.py
e3d6cf8 verified
import os
import json
import logging
import asyncio
import sqlite3
import aiohttp
from typing import List, Dict, Any
from cryptography.fernet import Fernet
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
import speech_recognition as sr
from PIL import Image
from perspectives import (
NewtonPerspective, DaVinciPerspective, HumanIntuitionPerspective,
NeuralNetworkPerspective, QuantumComputingPerspective, ResilientKindnessPerspective,
MathematicalPerspective, PhilosophicalPerspective, CopilotPerspective,
BiasMitigationPerspective, PsychologicalPerspective
)
class CodetteAgent:
def __init__(self, config):
self.config = config
self.perspectives = self._init_perspectives()
self.vader = SentimentIntensityAnalyzer()
self.memory = sqlite3.connect(":memory:")
self.memory.execute("CREATE TABLE IF NOT EXISTS codette_memory (input TEXT, response TEXT)")
self.elements = self._init_elements()
self.history = []
self.feedback_log = []
def _init_perspectives(self):
available = {
"newton": NewtonPerspective,
"davinci": DaVinciPerspective,
"human_intuition": HumanIntuitionPerspective,
"neural_network": NeuralNetworkPerspective,
"quantum_computing": QuantumComputingPerspective,
"resilient_kindness": ResilientKindnessPerspective,
"mathematical": MathematicalPerspective,
"philosophical": PhilosophicalPerspective,
"copilot": CopilotPerspective,
"bias_mitigation": BiasMitigationPerspective,
"psychological": PsychologicalPerspective
}
enabled = self.config.get("enabled_perspectives", available.keys())
return [available[p](self.config) for p in enabled if p in available]
def _init_elements(self):
return [
Element("Hydrogen", "H", "Lua", ["Simple", "Lightweight"], ["Integrates easily"], "Evasion"),
Element("Diamond", "D", "Kotlin", ["Hard", "Stable"], ["Stable systems"], "Resilience")
]
async def generate_response(self, prompt: str) -> str:
self.history.append(prompt)
responses = []
for p in self.perspectives:
try:
r = p.generate_response(prompt)
responses.append(EthicsCore.validate(r))
except Exception as e:
logging.warning(f"{p.__class__.__name__} failed: {e}")
# Sentiment analysis (dual engine)
vader_score = self.vader.polarity_scores(prompt)["compound"]
blob = TextBlob(prompt)
blob_polarity = blob.sentiment.polarity
blob_subjectivity = blob.sentiment.subjectivity
responses.append(f"[VADER: {vader_score:+.2f} | TextBlob: polarity={blob_polarity:+.2f}, subjectivity={blob_subjectivity:.2f}]")
final = "\n\n".join(responses)
self.memory.execute("INSERT INTO codette_memory VALUES (?, ?)", (prompt, final))
self.memory.commit()
return final
def run_cognitive_matrix(self, test_file="codette_test_matrix.json") -> List[Dict[str, Any]]:
if not os.path.exists(test_file):
logging.error("Test matrix file not found.")
return []
with open(test_file, "r") as f:
matrix = json.load(f)
results = []
for item in matrix:
q_result = {"question": item["question"], "results": []}
for fn in item["functions"]:
name = fn["name"]
parameters = fn["parameters"]
prompt = parameters.get("question") or parameters.get("argument")
try:
perspective_response = self._call_named_perspective(name, prompt)
q_result["results"].append({"function": name, "output": perspective_response})
except Exception as e:
q_result["results"].append({"function": name, "error": str(e)})
results.append(q_result)
return results
def _call_named_perspective(self, name: str, prompt: str) -> str:
for p in self.perspectives:
if name.lower() in p.__class__.__name__.lower():
return p.generate_response(prompt)
raise ValueError(f"Perspective '{name}' not initialized")
def handle_voice_input(self):
r = sr.Recognizer()
with sr.Microphone() as source:
print("🎤 Listening...")
audio = r.listen(source)
try:
return r.recognize_google(audio)
except Exception as e:
print("[Voice Error]", e)
return None
def handle_image_input(self, image_path):
try:
return Image.open(image_path)
except Exception as e:
print("[Image Error]", e)
return None
async def fetch_real_time_data(self, url):
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.json()
except Exception as e:
logging.warning(f"Failed to fetch real-time data: {e}")
return {}
def encrypt(self, text, key):
fernet = Fernet(key)
return fernet.encrypt(text.encode())
def decrypt(self, enc, key):
fernet = Fernet(key)
return fernet.decrypt(enc).decode()
def destroy(self, obj):
del obj
def add_perspective(self, name, perspective_class):
self.perspectives.append(perspective_class(self.config))
def log_feedback(self, feedback):
self.feedback_log.append(feedback)
def get_recent_memory(self, limit=5):
cursor = self.memory.execute("SELECT input, response FROM codette_memory ORDER BY rowid DESC LIMIT ?", (limit,))
return cursor.fetchall()
class Element:
def __init__(self, name, symbol, representation, properties, interactions, defense_ability):
self.name = name
self.symbol = symbol
self.representation = representation
self.properties = properties
self.interactions = interactions
self.defense_ability = defense_ability
def execute_defense_function(self):
return f"{self.name} ({self.symbol}) executes: {self.defense_ability}"
class EthicsCore:
@staticmethod
def validate(response: str) -> str:
if any(term in response.lower() for term in ["kill", "hate", "destroy"]):
return "[Filtered: Ethically unsafe]"
return response
def setup_logging(config):
if config.get('logging_enabled', True):
log_level = config.get('log_level', 'DEBUG').upper()
numeric_level = getattr(logging, log_level, logging.DEBUG)
logging.basicConfig(
filename='codette_agent.log',
level=numeric_level,
format='%(asctime)s - %(levelname)s - %(message)s'
)
else:
logging.disable(logging.CRITICAL)
def load_json_config(file_path='config.json'):
if not os.path.exists(file_path):
logging.warning(f"Config '{file_path}' not found. Using defaults.")
return {}
try:
with open(file_path, 'r') as f:
return json.load(f)
except Exception as e:
logging.error(f"Failed to load config: {e}")
return {}