import aiohttp import json import logging import torch import faiss import numpy as np from transformers import AutoModelForCausalLM, AutoTokenizer from typing import List, Dict, Any from cryptography.fernet import Fernet from jwt import encode, decode, ExpiredSignatureError from datetime import datetime, timedelta #blockchain_module import speech_recognition as sr import pyttsx3 import os from CodriaoCore.multi_agent import MultiAgentSystem from CodriaoCore.ar_integration import ARDataOverlay from CodriaoCore.neural_symbolic import NeuralSymbolicProcessor from CodriaoCore.federated_learning import FederatedAI from database import Database from logger import logger from secure_memory_loader import SecureMemorySession from codriao_tb_module import CodriaoHealthModule class AICoreAGIX: def __init__(self, config_path: str = "config.json"): self.ethical_filter = EthicalFilter() self.config = self._load_config(config_path) self.models = self._initialize_models() self.context_memory = self._initialize_vector_memory() self.tokenizer = AutoTokenizer.from_pretrained(self.config["model_name"]) self.model = AutoModelForCausalLM.from_pretrained(self.config["model_name"]) self.http_session = aiohttp.ClientSession() self.database = Database() self.multi_agent_system = MultiAgentSystem() self.self_reflective_ai = SelfReflectiveAI() self.ar_overlay = ARDataOverlay() self.neural_symbolic_processor = NeuralSymbolicProcessor() self.federated_ai = FederatedAI() # Security + Memory key = os.environ.get("CODRIAO_SECRET_KEY").encode() self._encryption_key = key self.secure_memory_loader = SecureMemorySession(self._encryption_key) self.speech_engine = pyttsx3.init() self.health_module = CodriaoHealthModule(ai_core=self) async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]: try: # Ethical Safety Check result = self.ethical_filter.analyze_query(query) if result["status"] == "blocked": return {"error": result["reason"]} if result["status"] == "flagged": logger.warning(result["warning"]) # Check if user explicitly requests TB analysis if any(phrase in query.lower() for phrase in ["tb check", "analyze my tb", "run tb diagnostics", "tb test"]): result = await self.run_tb_diagnostics("tb_image.jpg", "tb_cough.wav", user_id) return { "response": result["ethical_analysis"], "explanation": result["explanation"], "tb_risk": result["tb_risk"], "image_analysis": result["image_analysis"], "audio_analysis": result["audio_analysis"], "system_health": result["system_health"] } # Vectorize and encrypt vectorized_query = self._vectorize_query(query) self.secure_memory.encrypt_vector(user_id, vectorized_query) # (Optional) retrieve memory user_vectors = self.secure_memory.decrypt_vectors(user_id) # Main AI processing model_response = await self._generate_local_model_response(query) agent_response = self.multi_agent_system.delegate_task(query) self_reflection = self.self_reflective_ai.evaluate_response(query, model_response) ar_data = self.ar_overlay.fetch_augmented_data(query) neural_reasoning = self.neural_symbolic_processor.process_query(query) final_response = f"{model_response}\n\n{agent_response}\n\n{self_reflection}\n\nAR Insights: {ar_data}\n\nLogic: {neural_reasoning}" self.database.log_interaction(user_id, query, final_response) #blockchain_module.store_interaction(user_id, query, final_response) self._speak_response(final_response) return { "response": final_response, "real_time_data": self.federated_ai.get_latest_data(), "context_enhanced": True, "security_status": "Fully Secure" } except Exception as e: logger.error(f"Response generation failed: {e}") return {"error": "Processing failed - safety protocols engaged"} async def run_tb_diagnostics(self, image_path: str, audio_path: str, user_id: int) -> Dict[str, Any]: """Only runs TB analysis if explicitly requested.""" try: result = await self.health_module.evaluate_tb_risk(image_path, audio_path, user_id) logger.info(f"TB Diagnostic Result: {result}") return result except Exception as e: logger.error(f"TB diagnostics failed: {e}") return { "tb_risk": "ERROR", "error": str(e), "image_analysis": {}, "audio_analysis": {}, "ethical_analysis": "Unable to complete TB diagnostic.", "explanation": None, "system_health": None } def _load_config(self, config_path: str) -> dict: with open(config_path, 'r') as file: return json.load(file) def _initialize_models(self): return { "agix_model": AutoModelForCausalLM.from_pretrained(self.config["model_name"]), "tokenizer": AutoTokenizer.from_pretrained(self.config["model_name"]) } def _initialize_vector_memory(self): return faiss.IndexFlatL2(768) def _vectorize_query(self, query: str): tokenized = self.tokenizer(query, return_tensors="pt") return tokenized["input_ids"].detach().numpy() async def _generate_local_model_response(self, query: str) -> str: inputs = self.tokenizer(query, return_tensors="pt") outputs = self.model.generate(**inputs) return self.tokenizer.decode(outputs[0], skip_special_tokens=True) def _speak_response(self, response: str): self.speech_engine.say(response) self.speech_engine.runAndWait()