aaronzwe commited on
Commit
3d624b6
·
verified ·
1 Parent(s): 73382a4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +525 -0
app.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from huggingface_hub import hf_hub_download
4
+ from llama_cpp import Llama
5
+ import tempfile
6
+ import gtts
7
+ from textblob import TextBlob
8
+ import json
9
+ import psutil
10
+ import logging
11
+ from tenacity import retry, stop_after_attempt, wait_exponential
12
+ from contextlib import contextmanager
13
+ import plotly.graph_objects as go
14
+ import gc
15
+
16
+ # Setup logging
17
+ logging.basicConfig(filename="meroni.log", level=logging.INFO,
18
+ format="%(asctime)s - %(levelname)s - %(message)s")
19
+
20
+ # Configuration
21
+ MODEL_REPO = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
22
+ MODEL_FILE = "tinyllama-1.1b-chat-v1.0.Q2_K.gguf" # Q2_K for minimal memory
23
+ MODELS_DIR = "models"
24
+ MAX_HISTORY = 5
25
+ DEFAULT_N_CTX = 512 # Low to avoid crashes
26
+ DEFAULT_N_BATCH = 64 # Low for stability
27
+
28
+ # Ensure models directory exists
29
+ os.makedirs(MODELS_DIR, exist_ok=True)
30
+
31
+ # Dependency check
32
+ def check_dependencies():
33
+ required = ["gradio", "huggingface_hub", "llama_cpp", "gtts", "textblob", "psutil", "tenacity", "plotly"]
34
+ missing = []
35
+ for module in required:
36
+ try:
37
+ __import__(module)
38
+ except ImportError:
39
+ missing.append(module)
40
+ if missing:
41
+ return (False, f"Missing dependencies: {', '.join(missing)}. Please include them in requirements.txt.")
42
+ return (True, "All dependencies installed.")
43
+
44
+ # Check system resources
45
+ def suggest_performance_mode():
46
+ try:
47
+ mem = psutil.virtual_memory()
48
+ available_gb = mem.available / (1024 ** 3)
49
+ return available_gb > 4, available_gb
50
+ except Exception as e:
51
+ logging.error(f"Resource check failed: {e}")
52
+ return False, 0
53
+
54
+ # Download model
55
+ def download_model():
56
+ try:
57
+ model_path = os.path.join(MODELS_DIR, MODEL_FILE)
58
+ if not os.path.exists(model_path):
59
+ logging.info(f"Downloading model {MODEL_FILE}...")
60
+ model_path = hf_hub_download(
61
+ repo_id=MODEL_REPO,
62
+ filename=MODEL_FILE,
63
+ local_dir=MODELS_DIR
64
+ )
65
+ logging.info(f"Model downloaded to {model_path}")
66
+ return model_path
67
+ except Exception as e:
68
+ logging.error(f"Model download failed: {e}")
69
+ raise Exception("Failed to download model. Check internet connection.")
70
+
71
+ # Context manager for Llama
72
+ @contextmanager
73
+ def llama_context(*args, **kwargs):
74
+ llm = None
75
+ try:
76
+ llm = Llama(*args, **kwargs)
77
+ yield llm
78
+ except MemoryError:
79
+ logging.error("Out of memory during model loading.")
80
+ raise Exception("Not enough memory.")
81
+ except Exception as e:
82
+ logging.error(f"Model loading failed: {e}")
83
+ raise Exception("Failed to load model. Check meroni.log.")
84
+ finally:
85
+ if llm is not None:
86
+ try:
87
+ llm.close()
88
+ del llm
89
+ gc.collect()
90
+ except Exception as e:
91
+ logging.error(f"Model cleanup failed: {e}")
92
+
93
+ # Initialize model with retry
94
+ @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=5))
95
+ def initialize_model(n_ctx=DEFAULT_N_CTX, n_batch=DEFAULT_N_BATCH):
96
+ model_path = download_model()
97
+ try:
98
+ with llama_context(
99
+ model_path=model_path,
100
+ n_ctx=n_ctx,
101
+ n_threads=2,
102
+ n_batch=n_batch,
103
+ verbose=False
104
+ ) as llm:
105
+ return llm
106
+ except Exception as e:
107
+ logging.warning(f"Model initialization failed: {e}")
108
+ raise
109
+
110
+ # Fallback response
111
+ def fallback_response(message):
112
+ return f"I'm having trouble processing, but I hear you saying: '{message}'. How can I support you further?"
113
+
114
+ # System prompt
115
+ SYSTEM_PROMPT = """You are Meroni, a gentle, emotionally supportive AI companion.
116
+ Listen attentively, validate the user's feelings, and respond with warmth, empathy, and understanding.
117
+ Provide thoughtful, concise replies (2-4 sentences) that avoid generic phrases or emojis unless requested.
118
+ Use the conversation history and summary to make responses personal, and adapt to the user's emotional tone."""
119
+
120
+ # Sentiment analysis with keywords
121
+ def get_sentiment(message):
122
+ try:
123
+ keywords = {
124
+ "sad": ["sad", "down", "hurt"],
125
+ "angry": ["mad", "angry", "frustrated"],
126
+ "anxious": ["worried", "nervous", "scared"],
127
+ }
128
+ message_lower = message.lower()
129
+ for tone, words in keywords.items():
130
+ if any(w in message_lower for w in words):
131
+ return tone
132
+ analysis = TextBlob(message)
133
+ if analysis.sentiment.polarity > 0:
134
+ return "positive"
135
+ elif analysis.sentiment.polarity < 0:
136
+ return "negative"
137
+ return "neutral"
138
+ except Exception as e:
139
+ logging.error(f"Sentiment analysis error: {e}")
140
+ return "neutral"
141
+
142
+ # Summarize history
143
+ def summarize_history(history):
144
+ if not history:
145
+ return ""
146
+ summary = []
147
+ negative_count = 0
148
+ for msg in history[-3:]:
149
+ if msg["role"] == "user":
150
+ sentiment = get_sentiment(msg["content"])
151
+ if sentiment in ["sad", "angry", "anxious"]:
152
+ negative_count += 1
153
+ if negative_count > 1:
154
+ summary.append(f"User has expressed {negative_count} negative feelings recently.")
155
+ return "Summary: " + " ".join(summary) + "\n" if summary else ""
156
+
157
+ # Format prompt
158
+ def format_prompt(message, history, sentiment):
159
+ prompt = SYSTEM_PROMPT + "\n\n"
160
+ prompt += summarize_history(history)
161
+ if sentiment in ["sad", "angry", "anxious"]:
162
+ prompt += f"The user seems {sentiment}. Provide extra comfort and support.\n"
163
+ elif sentiment == "positive":
164
+ prompt += "The user seems happy. Reflect their positive mood.\n"
165
+ for msg in history[-MAX_HISTORY:]:
166
+ if msg["role"] == "user" and msg["content"]:
167
+ prompt += f"User: {msg['content']}\n"
168
+ if msg["role"] == "assistant" and msg["content"]:
169
+ prompt += f"Meroni: {msg['content']}\n\n"
170
+ prompt += f"User: {message}\nMeroni: "
171
+ return prompt
172
+
173
+ # Generate response
174
+ def generate_response(message, history, llm):
175
+ if llm is None:
176
+ logging.warning("Model unavailable, using fallback.")
177
+ return fallback_response(message)
178
+ try:
179
+ sentiment = get_sentiment(message)
180
+ prompt = format_prompt(message, history, sentiment)
181
+ response = llm(
182
+ prompt,
183
+ max_tokens=128,
184
+ temperature=0.7,
185
+ top_p=0.9,
186
+ stop=["User:", "\n\n"],
187
+ echo=False
188
+ )
189
+ reply = response['choices'][0]['text'].strip()
190
+ return reply or "I'm here for you. Could you share a bit more?"
191
+ except MemoryError:
192
+ logging.error("Memory error during generation.")
193
+ return "I'm running low on memory. Try a shorter message."
194
+ except Exception as e:
195
+ logging.error(f"Response generation error: {e}")
196
+ return fallback_response(message)
197
+
198
+ # Generate reflective prompt
199
+ def generate_reflective_prompt(history):
200
+ try:
201
+ if not history:
202
+ return "What's on your mind today? How are you feeling about it?"
203
+ last_user_msg = next((m["content"] for m in reversed(history) if m["role"] == "user"), "")
204
+ sentiment = get_sentiment(last_user_msg)
205
+ if sentiment in ["sad", "angry", "anxious"]:
206
+ return f"Why do you think you're feeling {sentiment}? What might help you feel a bit better?"
207
+ return f"What’s been going well for you lately? How can we explore that more?"
208
+ except Exception as e:
209
+ logging.error(f"Reflective prompt error: {e}")
210
+ return "Let’s reflect together. What’s on your mind?"
211
+
212
+ # Text-to-speech with retry
213
+ @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
214
+ def text_to_speech(text):
215
+ try:
216
+ if not text:
217
+ return None
218
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as fp:
219
+ temp_filename = fp.name
220
+ tts = gtts.gTTS(text)
221
+ tts.save(temp_filename)
222
+ return temp_filename
223
+ except Exception as e:
224
+ logging.error(f"Text-to-speech error: {e}")
225
+ raise Exception("Text-to-speech failed.")
226
+
227
+ # Gradio interface
228
+ def create_interface():
229
+ global llm
230
+ llm = None # Lazy loading
231
+ with gr.Blocks(css="""
232
+ footer {visibility: hidden}
233
+ :root {--primary-color: #4CAF50;}
234
+ .dark {--primary-color: #333; --background: #222;}
235
+ .blue {--primary-color: #2196F3;}
236
+ .yellow {--primary-color: #FFC107;}
237
+ """) as demo:
238
+ gr.Markdown("""
239
+ # 🌸 Meroni – Your Calm AI Companion
240
+
241
+ A gentle AI for mental wellness. Share your thoughts, feelings, or simply chat.
242
+ Meroni remembers your conversations privately on your device!
243
+ """)
244
+
245
+ # Dependency status
246
+ dep_status, dep_message = check_dependencies()
247
+ if not dep_status:
248
+ gr.Markdown(f"⚠️ {dep_message}")
249
+
250
+ # Settings
251
+ with gr.Row():
252
+ n_ctx_slider = gr.Slider(minimum=256, maximum=1024, step=256, label="Context Size", value=DEFAULT_N_CTX)
253
+ n_batch_slider = gr.Slider(minimum=32, maximum=128, step=32, label="Batch Size", value=DEFAULT_N_BATCH)
254
+ theme = gr.Dropdown(choices=["light", "dark", "blue", "yellow"], label="Theme", value="light")
255
+
256
+ # Hidden state for history
257
+ history_loader = gr.State(value="[]")
258
+
259
+ # Mood tracker
260
+ mood_plot = gr.Plot(label="Mood Trends")
261
+
262
+ # Chatbot
263
+ chatbot = gr.Chatbot(
264
+ label="Conversation with Meroni",
265
+ elem_id="meroni-chat",
266
+ height=400,
267
+ type="messages"
268
+ )
269
+
270
+ # Audio output
271
+ audio_output = gr.Audio(
272
+ label="Meroni's Voice",
273
+ autoplay=True
274
+ )
275
+
276
+ # File output for export
277
+ file_output = gr.File(label="Exported History")
278
+
279
+ with gr.Row():
280
+ msg = gr.Textbox(
281
+ placeholder="Type your thoughts here...",
282
+ lines=2,
283
+ max_lines=10,
284
+ container=True
285
+ )
286
+ submit_btn = gr.Button("Send", variant="primary")
287
+
288
+ with gr.Row():
289
+ clear_btn = gr.Button("New Conversation", variant="secondary")
290
+ load_btn = gr.Button("Load Previous Conversation", variant="secondary")
291
+ reflect_btn = gr.Button("Reflect", variant="secondary")
292
+ export_btn = gr.Button("Download History", variant="secondary")
293
+ speak_toggle = gr.Checkbox(label="Enable Speech", value=True)
294
+
295
+ # JavaScript for local storage, encryption, and theme
296
+ gr.HTML("""
297
+ <script src="https://cdnjs.cloudflare.com/ajax/libs/crypto-js/4.1.1/crypto-js.min.js"></script>
298
+ <script>
299
+ // Encryption key
300
+ let encryptionKey = localStorage.getItem('meroni_key');
301
+ if (!encryptionKey) {
302
+ encryptionKey = prompt('Set a password for your chat history:') || 'default';
303
+ localStorage.setItem('meroni_key', encryptionKey);
304
+ }
305
+
306
+ // Local storage
307
+ document.addEventListener('DOMContentLoaded', function() {
308
+ let savedHistory = localStorage.getItem('meroni_history') || "[]";
309
+ try {
310
+ savedHistory = CryptoJS.AES.decrypt(savedHistory, encryptionKey).toString(CryptoJS.enc.Utf8);
311
+ } catch (e) {
312
+ console.error('Decryption failed:', e);
313
+ }
314
+ try {
315
+ document.getElementById('history_loader').value = savedHistory;
316
+ } catch (e) {
317
+ console.error('History loader not found:', e);
318
+ }
319
+ });
320
+ function saveHistory() {
321
+ try {
322
+ const history = document.querySelector('[data-testid="chatbot"]').value;
323
+ const encrypted = CryptoJS.AES.encrypt(JSON.stringify(history), encryptionKey).toString();
324
+ localStorage.setItem('meroni_history', encrypted);
325
+ } catch (e) {
326
+ console.error('Failed to save history:', e);
327
+ alert('Could not save history. Your browser may restrict local storage.');
328
+ }
329
+ }
330
+ const observer = new MutationObserver(saveHistory);
331
+ try {
332
+ observer.observe(document.querySelector('[data-testid="chatbot"]'), { childList: true, subtree: true });
333
+ } catch (e) {
334
+ console.error('Observer setup failed:', e);
335
+ }
336
+
337
+ // Theme switching
338
+ document.getElementById('theme').addEventListener('change', function() {
339
+ const theme = this.value;
340
+ document.body.className = theme;
341
+ localStorage.setItem('meroni_theme', theme);
342
+ });
343
+
344
+ // Browser TTS fallback
345
+ function browserTTS(text) {
346
+ try {
347
+ const utterance = new SpeechSynthesisUtterance(text);
348
+ utterance.lang = 'en-US';
349
+ window.speechSynthesis.speak(utterance);
350
+ } catch (e) {
351
+ console.error('Browser TTS failed:', e);
352
+ }
353
+ }
354
+ </script>
355
+ """)
356
+
357
+ # Onboarding modal
358
+ gr.HTML("""
359
+ <script>
360
+ if (!localStorage.getItem('meroni_onboarded')) {
361
+ alert('Welcome to Meroni!\\n1. Chat with your AI companion.\\n2. Save chats privately on your device.\\n3. Reflect anytime with journal prompts.');
362
+ localStorage.setItem('meroni_onboarded', 'true');
363
+ }
364
+ </script>
365
+ """)
366
+
367
+ # Event handlers
368
+ def user_input(message, history):
369
+ if not message.strip():
370
+ return "", history or []
371
+ history = history or []
372
+ if history and not all("role" in m and "content" in m for m in history):
373
+ logging.error("Invalid history format")
374
+ history = []
375
+ return "", history + [{"role": "user", "content": message}]
376
+
377
+ def bot_response(history, speak_enabled, n_ctx, n_batch):
378
+ global llm
379
+ if not history or history[-1].get("role") != "user":
380
+ return history or [], None
381
+ try:
382
+ # Lazy load model
383
+ if llm is None:
384
+ llm = initialize_model(int(n_ctx), int(n_batch))
385
+ user_message = history[-1]["content"]
386
+ bot_message = generate_response(user_message, history[:-1], llm)
387
+ history.append({"role": "assistant", "content": bot_message})
388
+ if speak_enabled:
389
+ try:
390
+ speech_file = text_to_speech(bot_message)
391
+ return history, speech_file
392
+ except Exception:
393
+ logging.warning("gTTS failed, trying browser TTS.")
394
+ return history, "browser_tts:" + bot_message
395
+ return history, None
396
+ except Exception as e:
397
+ logging.error(f"Bot response error: {e}")
398
+ history.append({"role": "assistant", "content": f"Sorry, something went wrong: {str(e)}. Try a shorter message."})
399
+ return history, None
400
+
401
+ def update_mood_tracker(history):
402
+ try:
403
+ moods = [get_sentiment(m["content"]) for m in history if m["role"] == "user"]
404
+ data = [{"mood": m, "count": moods.count(m)} for m in set(moods) if m != "neutral"]
405
+ if not data:
406
+ return None
407
+ fig = go.Figure(data=[
408
+ go.Bar(x=[d["mood"] for d in data], y=[d["count"] for d in data])
409
+ ])
410
+ fig.update_layout(title="Mood Trends", xaxis_title="Mood", yaxis_title="Count")
411
+ return fig
412
+ except Exception as e:
413
+ logging.error(f"Mood tracker error: {e}")
414
+ return None
415
+
416
+ def clear_history():
417
+ return []
418
+
419
+ def load_history(history_loader):
420
+ try:
421
+ history = json.loads(history_loader)
422
+ return history, history
423
+ except Exception as e:
424
+ logging.error(f"History load error: {e}")
425
+ return [], []
426
+
427
+ def reflect(history):
428
+ try:
429
+ prompt = generate_reflective_prompt(history)
430
+ history = history or []
431
+ history.append({"role": "assistant", "content": prompt})
432
+ return history, None
433
+ except Exception as e:
434
+ logging.error(f"Reflect error: {e}")
435
+ return history or [], None
436
+
437
+ def welcome(speak_enabled):
438
+ welcome_msg = "Hello! I'm Meroni, your calm AI companion. How are you feeling today?"
439
+ history = [{"role": "assistant", "content": welcome_msg}]
440
+ if speak_enabled:
441
+ try:
442
+ speech_file = text_to_speech(welcome_msg)
443
+ return history, speech_file
444
+ except Exception:
445
+ history[0]["content"] += " (Sorry, no audio.)"
446
+ return history, None
447
+ return history, None
448
+
449
+ def export_history(history):
450
+ try:
451
+ if not history:
452
+ return None
453
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.json', mode='w') as f:
454
+ json.dump(history, f)
455
+ return f.name
456
+ except Exception as e:
457
+ logging.error(f"Export history error: {e}")
458
+ return None
459
+
460
+ def update_theme(theme):
461
+ return None
462
+
463
+ # Connect components
464
+ submit_event = msg.submit(
465
+ user_input, [msg, chatbot], [msg, chatbot], queue=False
466
+ ).then(
467
+ bot_response, [chatbot, speak_toggle, n_ctx_slider, n_batch_slider], [chatbot, audio_output], queue=True
468
+ ).then(
469
+ update_mood_tracker, chatbot, mood_plot
470
+ )
471
+
472
+ submit_btn.click(
473
+ user_input, [msg, chatbot], [msg, chatbot], queue=False
474
+ ).then(
475
+ bot_response, [chatbot, speak_toggle, n_ctx_slider, n_batch_slider], [chatbot, audio_output], queue=True
476
+ ).then(
477
+ update_mood_tracker, chatbot, mood_plot
478
+ )
479
+
480
+ clear_btn.click(clear_history, None, chatbot).then(
481
+ welcome, speak_toggle, [chatbot, audio_output]
482
+ ).then(
483
+ update_mood_tracker, chatbot, mood_plot
484
+ )
485
+
486
+ load_btn.click(
487
+ load_history, history_loader, [chatbot, history_loader]
488
+ ).then(
489
+ update_mood_tracker, chatbot, mood_plot
490
+ )
491
+
492
+ reflect_btn.click(
493
+ reflect, chatbot, [chatbot, audio_output]
494
+ ).then(
495
+ update_mood_tracker, chatbot, mood_plot
496
+ )
497
+
498
+ export_btn.click(
499
+ export_history, chatbot, file_output
500
+ )
501
+
502
+ theme.change(
503
+ update_theme, theme, None
504
+ )
505
+
506
+ demo.load(welcome, speak_toggle, [chatbot, audio_output]).then(
507
+ update_mood_tracker, chatbot, mood_plot
508
+ )
509
+
510
+ return demo
511
+
512
+ if __name__ == "__main__":
513
+ try:
514
+ # Check dependencies
515
+ dep_status, dep_message = check_dependencies()
516
+ if not dep_status:
517
+ print(dep_message)
518
+ exit(1)
519
+
520
+ # Launch interface
521
+ demo = create_interface()
522
+ demo.launch()
523
+ except Exception as e:
524
+ logging.error(f"App launch failed: {e}")
525
+ print(f"Failed to launch Meroni: {e}. Check meroni.log for details.")