Spaces:
Sleeping
Sleeping
File size: 8,078 Bytes
28b3c8a f372e0a 28b3c8a f372e0a 28b3c8a f372e0a 28b3c8a f372e0a 28b3c8a 507a920 45cfc26 979d590 507a920 4fbf7fa 507a920 4fbf7fa f372e0a 45cfc26 979d590 28b3c8a e58377a 45cfc26 979d590 b298218 45cfc26 979d590 b298218 979d590 45cfc26 b298218 1ebd803 45cfc26 b298218 f372e0a 45cfc26 b298218 45cfc26 b298218 45cfc26 b298218 45cfc26 b298218 45cfc26 b298218 13c058e b298218 45cfc26 13c058e b298218 45cfc26 f372e0a 45cfc26 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
import warnings
# Suppress FutureWarnings
warnings.filterwarnings("ignore", category=FutureWarning)
# --- Monkey Patch for Gradio Schema Parsing ---
# This patch prevents APIInfoParseError by handling boolean schema values.
try:
import gradio_client.utils as client_utils
# Patch the helper function to handle bool types in the schema.
original_json_schema_to_python_type = client_utils._json_schema_to_python_type
def patched_json_schema_to_python_type(schema, defs=None):
if isinstance(schema, bool):
# If the schema is a boolean, simply return a generic type.
return "Any"
return original_json_schema_to_python_type(schema, defs)
client_utils._json_schema_to_python_type = patched_json_schema_to_python_type
# Also patch get_type to be extra safe.
original_get_type = client_utils.get_type
def patched_get_type(schema):
if isinstance(schema, bool):
return "Any"
if not isinstance(schema, dict):
return "Any"
return original_get_type(schema)
client_utils.get_type = patched_get_type
except Exception as e:
print("Warning: Failed to patch gradio_client schema utils:", e)
import random
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import nltk
import gradio as gr
from nltk.sentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
import torch
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
AutoModelForSequenceClassification,
)
# Download necessary NLTK data
nltk.download("vader_lexicon", quiet=True)
# --- Emotion Analyzer ---
class EmotionalAnalyzer:
def __init__(self):
try:
self.model = AutoModelForSequenceClassification.from_pretrained(
"bhadresh-savani/distilbert-base-uncased-emotion"
)
self.tokenizer = AutoTokenizer.from_pretrained(
"bhadresh-savani/distilbert-base-uncased-emotion"
)
except Exception:
self.model = None
self.tokenizer = None
self.labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
self.sia = SentimentIntensityAnalyzer()
def predict_emotion(self, text):
try:
if self.model is None or self.tokenizer is None:
raise ValueError("Model or tokenizer not initialized properly.")
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = self.model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
return self.labels[torch.argmax(probs).item()]
except Exception:
return "Unknown"
def analyze(self, text):
try:
vader_scores = self.sia.polarity_scores(text)
blob = TextBlob(text)
blob_data = {
"polarity": blob.sentiment.polarity,
"subjectivity": blob.sentiment.subjectivity,
"word_count": len(blob.words),
"sentence_count": len(blob.sentences),
}
return {
"emotion": self.predict_emotion(text),
"vader": vader_scores,
"textblob": blob_data,
}
except Exception:
return {"emotion": "Unknown", "vader": {}, "textblob": {}}
def plot_emotions(self):
try:
simulated_emotions = {
"joy": random.randint(10, 30),
"sadness": random.randint(5, 20),
"anger": random.randint(10, 25),
"fear": random.randint(5, 15),
"love": random.randint(10, 30),
"surprise": random.randint(5, 20),
}
df = pd.DataFrame(list(simulated_emotions.items()), columns=["Emotion", "Percentage"])
plt.figure(figsize=(8, 4))
sns.barplot(x="Emotion", y="Percentage", data=df)
plt.title("Simulated Emotional State")
plt.tight_layout()
path = "emotions.png"
plt.savefig(path)
plt.close()
return path
except Exception:
return None # Ensures that if there's an issue, we return None
# --- Text Completion LLM ---
tokenizer = AutoTokenizer.from_pretrained("diabolic6045/ELN-Llama-1B-base")
model = AutoModelForCausalLM.from_pretrained("diabolic6045/ELN-Llama-1B-base")
def generate_completion(message, temperature, max_length):
try:
inputs = tokenizer(message, return_tensors="pt", truncation=True, max_length=512)
input_ids = inputs["input_ids"]
current_text = message
for _ in range(max_length - input_ids.shape[1]):
with torch.no_grad():
outputs = model(input_ids)
logits = outputs.logits[:, -1, :] / temperature
probs = torch.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
if next_token.item() == tokenizer.eos_token_id:
break
input_ids = torch.cat([input_ids, next_token], dim=-1)
new_token_text = tokenizer.decode(next_token[0], skip_special_tokens=True)
current_text += new_token_text
return current_text
except Exception:
return "Error generating text."
# --- Emotion-Aware LLM Response ---
def emotion_aware_response(input_text):
try:
analyzer = EmotionalAnalyzer()
results = analyzer.analyze(input_text)
image_path = analyzer.plot_emotions() # This could return None if plotting fails
prompt = (
f"Input: {input_text}\n"
f"Detected Emotion: {results['emotion']}\n"
f"VADER Scores: {results['vader']}\n"
f"Respond thoughtfully and emotionally aware:"
)
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
output_ids = model.generate(
inputs.input_ids,
max_length=512,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
summary = (
f"Emotion: {results['emotion']}\n"
f"VADER: {results['vader']}\n"
f"TextBlob: {results['textblob']}\n\n"
f"LLM Response:\n{response}"
)
return summary, image_path if image_path else None
except Exception:
return "Error processing emotion-aware response", None
# --- Gradio Interface ---
with gr.Blocks(title="ELN LLaMA 1B Enhanced Demo") as app:
gr.Markdown("## 🧠 ELN-LLaMA Emotion-Aware & Completion Interface")
with gr.Tab("💬 Emotion-Aware Response"):
with gr.Row():
input_text = gr.Textbox(label="Input Text", lines=4, placeholder="Type something with emotion or meaning...")
with gr.Row():
text_output = gr.Textbox(label="Response", lines=8)
img_output = gr.Image(label="Emotional Visualization")
emotion_btn = gr.Button("Generate Emotion-Aware Response")
emotion_btn.click(emotion_aware_response, inputs=input_text, outputs=[text_output, img_output])
with gr.Tab("📝 Text Completion"):
comp_text = gr.Textbox(label="Prompt", lines=4)
comp_temp = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
comp_len = gr.Slider(minimum=50, maximum=500, value=200, step=50, label="Max Length")
comp_output = gr.Textbox(label="Generated Completion", lines=8)
comp_button = gr.Button("Complete Text")
comp_button.click(generate_completion, inputs=[comp_text, comp_temp, comp_len], outputs=comp_output)
# Launch the Gradio app (remove share=True if running in an environment that doesn't support it)
app.launch(share=True)
|