Sephfox commited on
Commit
45cfc26
·
verified ·
1 Parent(s): 00837e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -164
app.py CHANGED
@@ -1,202 +1,154 @@
1
  import warnings
2
  import os
3
- import json
4
  import random
5
  import gradio as gr
6
  import torch
 
7
  import matplotlib.pyplot as plt
8
  import seaborn as sns
9
- import pandas as pd
10
  import nltk
11
  from nltk.sentiment import SentimentIntensityAnalyzer
12
  from textblob import TextBlob
13
- from transformers import AutoModelForSequenceClassification, AutoTokenizer, AutoModelForCausalLM, LlamaConfig
 
 
 
 
14
 
 
15
  warnings.filterwarnings('ignore', category=FutureWarning)
16
-
17
- # --- Monkey Patch for Gradio Client JSON Schema Bug ---
18
- import gradio_client.utils as client_utils
19
-
20
- original_get_type = client_utils.get_type
21
- def patched_get_type(schema):
22
- if not isinstance(schema, dict):
23
- return type(schema).__name__
24
- return original_get_type(schema)
25
- client_utils.get_type = patched_get_type
26
-
27
- if not hasattr(client_utils, "_original_json_schema_to_python_type"):
28
- client_utils._original_json_schema_to_python_type = client_utils._json_schema_to_python_type
29
-
30
- def patched_json_schema_to_python_type(schema, defs=None):
31
- if isinstance(schema, bool):
32
- return "bool"
33
- return client_utils._original_json_schema_to_python_type(schema, defs)
34
- client_utils._json_schema_to_python_type = patched_json_schema_to_python_type
35
- # --- End of Monkey Patch ---
36
-
37
- # Download necessary NLTK data
38
  nltk.download('vader_lexicon', quiet=True)
39
 
40
- # ---------------------------
41
- # Backend Support for GGUF Models
42
- # ---------------------------
43
- try:
44
- from llama_cpp import Llama
45
- BACKEND = "llama_cpp"
46
- except ImportError:
47
- BACKEND = "transformers"
48
-
49
- # ---------------------------
50
- # Emotional Analysis Module
51
- # ---------------------------
52
  class EmotionalAnalyzer:
53
  def __init__(self):
54
- self.emotion_model = AutoModelForSequenceClassification.from_pretrained(
55
  "bhadresh-savani/distilbert-base-uncased-emotion"
56
  )
57
- self.emotion_tokenizer = AutoTokenizer.from_pretrained(
58
  "bhadresh-savani/distilbert-base-uncased-emotion"
59
  )
60
- self.emotion_labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
61
  self.sia = SentimentIntensityAnalyzer()
62
 
63
  def predict_emotion(self, text):
64
- inputs = self.emotion_tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
65
  with torch.no_grad():
66
- outputs = self.emotion_model(**inputs)
67
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
68
- predicted_idx = torch.argmax(probabilities, dim=-1).item()
69
- return self.emotion_labels[predicted_idx]
70
-
71
- def sentiment_analysis(self, text):
72
- return self.sia.polarity_scores(text)
73
 
74
- def detailed_emotional_analysis(self, text):
75
- vader_scores = self.sentiment_analysis(text)
76
  blob = TextBlob(text)
77
- textblob_analysis = {
78
- 'polarity': blob.sentiment.polarity,
79
- 'subjectivity': blob.sentiment.subjectivity,
80
- 'word_count': len(blob.words),
81
- 'sentence_count': len(blob.sentences)
82
  }
83
- predicted_emotion = self.predict_emotion(text)
84
  return {
85
- 'predicted_emotion': predicted_emotion,
86
- 'vader': vader_scores,
87
- 'textblob': textblob_analysis
88
  }
89
 
90
- def visualize_emotions(self, emotions_dict):
91
- emotions_df = pd.DataFrame(list(emotions_dict.items()), columns=['Emotion', 'Percentage'])
 
 
 
 
 
 
 
 
92
  plt.figure(figsize=(8, 4))
93
- sns.barplot(x='Emotion', y='Percentage', data=emotions_df)
94
- plt.title('Current Emotional State')
95
  plt.tight_layout()
96
- image_path = 'emotional_state.png'
97
- plt.savefig(image_path)
98
  plt.close()
99
- return image_path
100
-
101
- # ---------------------------
102
- # LLM Response Generator Module
103
- # ---------------------------
104
- class LLMResponder:
105
- def __init__(self, model_name="SicariusSicariiStuff/Impish_LLAMA_3B_GGUF"):
106
- self.model_name = model_name
107
- if BACKEND == "llama_cpp":
108
- # Replace with the actual path to your GGUF file.
109
- self.llm = Llama(model_path="path/to/your/gguf/file.gguf", n_ctx=1024)
110
- self.backend = "llama_cpp"
111
- else:
112
- # Create a dummy config using LlamaConfig so the model loads despite missing keys.
113
- dummy_config = LlamaConfig.from_dict({"model_type": "llama"})
114
- try:
115
- self.llm_tokenizer = AutoTokenizer.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
116
- except Exception as e:
117
- print(f"Error loading tokenizer from {model_name}; using fallback tokenizer.")
118
- fallback_model = "sentence-transformers/all-MiniLM-L6-v2"
119
- self.llm_tokenizer = AutoTokenizer.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True)
120
- try:
121
- self.llm_model = AutoModelForCausalLM.from_pretrained(model_name, config=dummy_config, trust_remote_code=True)
122
- except Exception as e:
123
- print(f"Error loading model from {model_name}; using fallback model.")
124
- fallback_model = "sentence-transformers/all-MiniLM-L6-v2"
125
- self.llm_model = AutoModelForCausalLM.from_pretrained(fallback_model, config=dummy_config, trust_remote_code=True)
126
- self.backend = "transformers"
127
-
128
- def generate_response(self, prompt):
129
- if self.backend == "llama_cpp":
130
- result = self.llm(prompt=prompt, max_tokens=256, temperature=0.95, top_p=0.95)
131
- response = result.get("response", "")
132
- else:
133
- inputs = self.llm_tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
134
- with torch.no_grad():
135
- output_ids = self.llm_model.generate(
136
- inputs.input_ids,
137
- max_length=1024,
138
- do_sample=True,
139
- top_p=0.95,
140
- top_k=50,
141
- pad_token_id=self.llm_tokenizer.eos_token_id
142
- )
143
- response = self.llm_tokenizer.decode(output_ids[0], skip_special_tokens=True)
144
- return response
145
-
146
- # ---------------------------
147
- # Main Interactive Interface Function
148
- # ---------------------------
149
- def interactive_interface(input_text):
150
- emotion_analyzer = EmotionalAnalyzer()
151
- llm_responder = LLMResponder()
152
-
153
- emotional_data = emotion_analyzer.detailed_emotional_analysis(input_text)
154
- current_emotions = {
155
- 'joy': random.randint(10, 30),
156
- 'sadness': random.randint(5, 20),
157
- 'anger': random.randint(10, 25),
158
- 'fear': random.randint(5, 15),
159
- 'love': random.randint(10, 30),
160
- 'surprise': random.randint(5, 20)
161
- }
162
- emotion_image = emotion_analyzer.visualize_emotions(current_emotions)
163
 
164
  prompt = (
165
  f"Input: {input_text}\n"
166
- f"Detected Emotion: {emotional_data['predicted_emotion']}\n"
167
- f"VADER Scores: {emotional_data['vader']}\n"
168
- "Provide a thoughtful, emotionally aware response that reflects the above data:"
169
  )
170
- llm_response = llm_responder.generate_response(prompt)
171
-
172
- result = {
173
- 'detailed_emotional_analysis': emotional_data,
174
- 'llm_response': llm_response,
175
- 'emotion_visualization': emotion_image
176
- }
177
- return result
178
-
179
- def gradio_interface(input_text):
180
- result = interactive_interface(input_text)
181
- output_text = (
182
- f"Detailed Emotional Analysis:\n"
183
- f" - Predicted Emotion: {result['detailed_emotional_analysis']['predicted_emotion']}\n"
184
- f" - VADER: {result['detailed_emotional_analysis']['vader']}\n"
185
- f" - TextBlob: {result['detailed_emotional_analysis']['textblob']}\n\n"
186
- f"LLM Response:\n{result['llm_response']}"
187
- )
188
- return output_text, result['emotion_visualization']
189
-
190
- # ---------------------------
191
- # Create Gradio Interface
192
- # ---------------------------
193
- iface = gr.Interface(
194
- fn=gradio_interface,
195
- inputs="text",
196
- outputs=["text", gr.Image(type="filepath")],
197
- title="Enhanced Emotional Analysis with GGUF LLM Support",
198
- description="Enter text to perform detailed emotional analysis and generate an emotionally aware response using the Impish_LLAMA_3B_GGUF model."
199
- )
200
 
201
- if __name__ == "__main__":
202
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import warnings
2
  import os
 
3
  import random
4
  import gradio as gr
5
  import torch
6
+ import pandas as pd
7
  import matplotlib.pyplot as plt
8
  import seaborn as sns
 
9
  import nltk
10
  from nltk.sentiment import SentimentIntensityAnalyzer
11
  from textblob import TextBlob
12
+ from transformers import (
13
+ AutoTokenizer,
14
+ AutoModelForCausalLM,
15
+ AutoModelForSequenceClassification,
16
+ )
17
 
18
+ # Suppress warnings and fix Gradio schema bug
19
  warnings.filterwarnings('ignore', category=FutureWarning)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  nltk.download('vader_lexicon', quiet=True)
21
 
22
+ # --- Emotion Analyzer ---
 
 
 
 
 
 
 
 
 
 
 
23
  class EmotionalAnalyzer:
24
  def __init__(self):
25
+ self.model = AutoModelForSequenceClassification.from_pretrained(
26
  "bhadresh-savani/distilbert-base-uncased-emotion"
27
  )
28
+ self.tokenizer = AutoTokenizer.from_pretrained(
29
  "bhadresh-savani/distilbert-base-uncased-emotion"
30
  )
31
+ self.labels = ["sadness", "joy", "love", "anger", "fear", "surprise"]
32
  self.sia = SentimentIntensityAnalyzer()
33
 
34
  def predict_emotion(self, text):
35
+ inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
36
  with torch.no_grad():
37
+ outputs = self.model(**inputs)
38
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
39
+ return self.labels[torch.argmax(probs).item()]
 
 
 
 
40
 
41
+ def analyze(self, text):
42
+ vader_scores = self.sia.polarity_scores(text)
43
  blob = TextBlob(text)
44
+ blob_data = {
45
+ "polarity": blob.sentiment.polarity,
46
+ "subjectivity": blob.sentiment.subjectivity,
47
+ "word_count": len(blob.words),
48
+ "sentence_count": len(blob.sentences),
49
  }
 
50
  return {
51
+ "emotion": self.predict_emotion(text),
52
+ "vader": vader_scores,
53
+ "textblob": blob_data,
54
  }
55
 
56
+ def plot_emotions(self):
57
+ simulated_emotions = {
58
+ "joy": random.randint(10, 30),
59
+ "sadness": random.randint(5, 20),
60
+ "anger": random.randint(10, 25),
61
+ "fear": random.randint(5, 15),
62
+ "love": random.randint(10, 30),
63
+ "surprise": random.randint(5, 20),
64
+ }
65
+ df = pd.DataFrame(list(simulated_emotions.items()), columns=["Emotion", "Percentage"])
66
  plt.figure(figsize=(8, 4))
67
+ sns.barplot(x="Emotion", y="Percentage", data=df)
68
+ plt.title("Simulated Emotional State")
69
  plt.tight_layout()
70
+ path = "emotions.png"
71
+ plt.savefig(path)
72
  plt.close()
73
+ return path
74
+
75
+ # --- Text Completion LLM ---
76
+ tokenizer = AutoTokenizer.from_pretrained("diabolic6045/ELN-Llama-1B-base")
77
+ model = AutoModelForCausalLM.from_pretrained("diabolic6045/ELN-Llama-1B-base")
78
+
79
+ def generate_completion(message, temperature, max_length):
80
+ inputs = tokenizer(message, return_tensors="pt", truncation=True, max_length=512)
81
+ input_ids = inputs["input_ids"]
82
+ current_text = message
83
+
84
+ for _ in range(max_length - input_ids.shape[1]):
85
+ with torch.no_grad():
86
+ outputs = model(input_ids)
87
+ logits = outputs.logits[:, -1, :] / temperature
88
+ probs = torch.softmax(logits, dim=-1)
89
+ next_token = torch.multinomial(probs, num_samples=1)
90
+
91
+ if next_token.item() == tokenizer.eos_token_id:
92
+ break
93
+
94
+ input_ids = torch.cat([input_ids, next_token], dim=-1)
95
+ new_token_text = tokenizer.decode(next_token[0], skip_special_tokens=True)
96
+ current_text += new_token_text
97
+ yield current_text
98
+
99
+ # --- Emotion-Aware LLM Response ---
100
+ def emotion_aware_response(input_text):
101
+ analyzer = EmotionalAnalyzer()
102
+ results = analyzer.analyze(input_text)
103
+ image_path = analyzer.plot_emotions()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  prompt = (
106
  f"Input: {input_text}\n"
107
+ f"Detected Emotion: {results['emotion']}\n"
108
+ f"VADER Scores: {results['vader']}\n"
109
+ f"Respond thoughtfully and emotionally aware:"
110
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
113
+ with torch.no_grad():
114
+ output_ids = model.generate(
115
+ inputs.input_ids,
116
+ max_length=512,
117
+ do_sample=True,
118
+ temperature=0.7,
119
+ top_k=50,
120
+ top_p=0.95,
121
+ pad_token_id=tokenizer.eos_token_id
122
+ )
123
+ response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
124
+
125
+ summary = (
126
+ f"Emotion: {results['emotion']}\n"
127
+ f"VADER: {results['vader']}\n"
128
+ f"TextBlob: {results['textblob']}\n\n"
129
+ f"LLM Response:\n{response}"
130
+ )
131
+ return summary, image_path
132
+
133
+ # --- Gradio Interface ---
134
+ with gr.Blocks(title="ELN LLaMA 1B Enhanced Demo") as app:
135
+ gr.Markdown("## 🧠 ELN-LLaMA Emotion-Aware & Completion Interface")
136
+
137
+ with gr.Tab("💬 Emotion-Aware Response"):
138
+ with gr.Row():
139
+ input_text = gr.Textbox(label="Input Text", lines=4, placeholder="Type something with emotion or meaning...")
140
+ with gr.Row():
141
+ text_output = gr.Textbox(label="Response", lines=8)
142
+ img_output = gr.Image(label="Emotional Visualization")
143
+ emotion_btn = gr.Button("Generate Emotion-Aware Response")
144
+ emotion_btn.click(emotion_aware_response, inputs=input_text, outputs=[text_output, img_output])
145
+
146
+ with gr.Tab("📝 Text Completion"):
147
+ comp_text = gr.Textbox(label="Prompt", lines=4)
148
+ comp_temp = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature")
149
+ comp_len = gr.Slider(minimum=50, maximum=500, value=200, step=50, label="Max Length")
150
+ comp_output = gr.Textbox(label="Generated Completion", lines=8)
151
+ comp_button = gr.Button("Complete Text")
152
+ comp_button.click(generate_completion, inputs=[comp_text, comp_temp, comp_len], outputs=comp_output)
153
+
154
+ app.launch(share=True)