siddhartharyaai commited on
Commit
8f14416
·
verified ·
1 Parent(s): 491cccc

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +261 -248
utils.py CHANGED
@@ -18,21 +18,34 @@ import sys
18
 
19
  # --- Add the cloned repository to the Python path ---
20
  repo_path = os.path.join('/home', 'user', 'open_deep_research')
21
- print(f"DEBUG: repo_path = {repo_path}") # Debug print - keep this for now
 
 
 
 
 
 
 
 
 
 
 
22
  if repo_path not in sys.path:
23
- print("DEBUG: Adding repo_path to sys.path") # Debug print - keep this
24
- sys.path.insert(0, repo_path)
25
  else:
26
- print("DEBUG: repo_path already in sys.path") # Debug print - keep this for now
27
- print(f"DEBUG: sys.path = {sys.path}") # Debug print - keep this for now
 
28
 
29
  # --- CORRECT IMPORT (for local cloned repo) ---
30
  try:
31
- from open_deep_research.agent import OpenDeepResearchAgent
32
  print("DEBUG: Import successful!")
33
  except ImportError as e:
34
  print(f"DEBUG: Import failed: {e}")
35
- raise
 
36
  from report_structure import generate_report
37
 
38
 
@@ -250,254 +263,254 @@ def generate_script(
250
  print("[ERROR] JSON decoding failed:", e)
251
  raise ValueError(f"Failed to parse dialogue: {str(e)}")
252
 
253
- def transcribe_youtube_video(video_url: str) -> str:
254
- print("[LOG] Transcribing YouTube video via RapidAPI:", video_url)
255
- video_id_match = re.search(r"(?:v=|\/)([0-9A-Za-z_-]{11})", video_url)
256
- if not video_id_match:
257
- raise ValueError(f"Invalid YouTube URL: {video_url}, cannot extract video ID.")
258
-
259
- video_id = video_id_match.group(1)
260
- print("[LOG] Extracted video ID:", video_id)
261
-
262
- base_url = "https://youtube-transcriptor.p.rapidapi.com/transcript"
263
- params = {"video_id": video_id, "lang": "en"}
264
- headers = {
265
- "x-rapidapi-host": "youtube-transcriptor.p.rapidapi.com",
266
- "x-rapidapi-key": os.environ.get("RAPIDAPI_KEY")
267
- }
268
-
269
- try:
270
- response = requests.get(base_url, headers=headers, params=params, timeout=30)
271
- print("[LOG] RapidAPI Response Status Code:", response.status_code)
272
- print("[LOG] RapidAPI Response Body:", response.text)
273
-
274
- if response.status_code != 200:
275
- raise ValueError(f"RapidAPI transcription error: {response.status_code}, {response.text}")
276
-
277
- data = response.json()
278
- if not isinstance(data, list) or not data:
279
- raise ValueError(f"Unexpected transcript format or empty transcript: {data}")
280
 
281
- transcript_as_text = data[0].get('transcriptionAsText', '').strip()
282
- if not transcript_as_text:
283
- raise ValueError("transcriptionAsText field is missing or empty.")
284
 
285
- print("[LOG] Transcript retrieval successful.")
286
- print(f"[DEBUG] Transcript Length: {len(transcript_as_text)} characters.")
287
- snippet = transcript_as_text[:200] + "..." if len(transcript_as_text) > 200 else transcript_as_text
288
- print(f"[DEBUG] Transcript Snippet: {snippet}")
 
 
289
 
290
- return transcript_as_text
291
- except Exception as e:
292
- print("[ERROR] RapidAPI transcription error:", e)
293
- raise ValueError(f"Error transcribing YouTube video via RapidAPI: {str(e)}")
294
 
295
- def generate_audio_mp3(text: str, speaker: str) -> str:
296
- try:
297
- import streamlit as st
298
- print(f"[LOG] Generating audio for speaker: {speaker}")
299
- language_selection = st.session_state.get("language_selection", "English (American)")
300
- if language_selection == "English (American)":
301
- print(f"[LOG] Using Deepgram for English (American)")
302
- if speaker in ["John", "Jane"]:
303
- processed_text = text
304
- else:
305
- processed_text = _preprocess_text_for_tts(text, speaker)
306
- deepgram_api_url = "https://api.deepgram.com/v1/speak"
307
- params = {"model": "aura-asteria-en"}
308
- if speaker == "John":
309
- params["model"] = "aura-zeus-en"
310
- headers = {
311
- "Accept": "audio/mpeg",
312
- "Content-Type": "application/json",
313
- "Authorization": f"Token {os.environ.get('DEEPGRAM_API_KEY')}"
314
- }
315
- body = {"text": processed_text}
316
- response = requests.post(deepgram_api_url, params=params, headers=headers, json=body, stream=True)
317
  if response.status_code != 200:
318
- raise ValueError(f"Deepgram TTS error: {response.status_code}, {response.text}")
319
- content_type = response.headers.get('Content-Type', '')
320
- if 'audio/mpeg' not in content_type:
321
- raise ValueError("Unexpected Content-Type from Deepgram.")
322
- with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as mp3_file:
323
- for chunk in response.iter_content(chunk_size=8192):
324
- if chunk:
325
- mp3_file.write(chunk)
326
- mp3_path = mp3_file.name
327
- audio_seg = AudioSegment.from_file(mp3_path, format="mp3")
328
- audio_seg = effects.normalize(audio_seg)
329
- final_mp3_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3").name
330
- audio_seg.export(final_mp3_path, format="mp3")
331
- if os.path.exists(mp3_path):
332
- os.remove(mp3_path)
333
- return final_mp3_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
  else:
335
- print(f"[LOG] Using Murf API for language: {language_selection}")
336
- if language_selection == "Hinglish":
337
- from indic_transliteration.sanscript import transliterate, DEVANAGARI, IAST
338
- text = transliterate(text, DEVANAGARI, IAST)
339
- api_key = os.environ.get("MURF_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  headers = {
 
341
  "Content-Type": "application/json",
342
- "Accept": "application/json",
343
- "api-key": api_key
344
  }
345
- multi_native_locale = "hi-IN" if language_selection in ["Hinglish", "Hindi"] else "en-IN"
346
- if language_selection == "English (Indian)":
347
- voice_id = "en-IN-aarav" if speaker == "John" else "en-IN-isha"
348
- elif language_selection == "Hindi":
349
- voice_id = "hi-IN-kabir" if speaker == "John" else "hi-IN-shweta"
350
- elif language_selection == "Hinglish":
351
- voice_id = "hi-IN-kabir" if speaker == "John" else "hi-IN-shweta"
352
- else:
353
- voice_id = "en-IN-aarav" if speaker == "John" else "en-IN-isha"
354
- payload = {
355
- "audioDuration": 0,
356
- "channelType": "MONO",
357
- "encodeAsBase64": False,
358
- "format": "WAV",
359
- "modelVersion": "GEN2",
360
- "multiNativeLocale": multi_native_locale,
361
- "pitch": 0,
362
- "pronunciationDictionary": {},
363
- "rate": 0,
364
- "sampleRate": 48000,
365
- "style": "Conversational",
366
- "text": text,
367
- "variation": 1,
368
- "voiceId": voice_id
369
  }
370
- response = requests.post("https://api.murf.ai/v1/speech/generate", headers=headers, json=payload)
371
- if response.status_code != 200:
372
- raise ValueError(f"Murf API error: {response.status_code}, {response.text}")
373
- json_resp = response.json()
374
- audio_url = json_resp.get("audioFile")
375
- if not audio_url:
376
- raise ValueError("No audio file URL returned by Murf API")
377
- audio_response = requests.get(audio_url)
378
- if audio_response.status_code != 200:
379
- raise ValueError(f"Error fetching audio from {audio_url}")
380
- with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as wav_file:
381
- wav_file.write(audio_response.content)
382
- wav_path = wav_file.name
383
- audio_seg = AudioSegment.from_file(wav_path, format="wav")
384
- audio_seg = effects.normalize(audio_seg)
385
- final_mp3_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3").name
386
- audio_seg.export(final_mp3_path, format="mp3")
387
- os.remove(wav_path)
388
- return final_mp3_path
389
- except Exception as e:
390
- print("[ERROR] Error generating audio:", e)
391
- raise ValueError(f"Error generating audio: {str(e)}")
392
-
393
- def transcribe_youtube_video_OLD_YTDLP(video_url: str) -> str:
394
- pass
395
-
396
- def _preprocess_text_for_tts(text: str, speaker: str) -> str:
397
- text = re.sub(r"\bNo\.\b", "Number", text)
398
- text = re.sub(r"\b(?i)SaaS\b", "sass", text)
399
- abbreviations_as_words = {"NASA", "NATO", "UNESCO"}
400
- def insert_periods_for_abbrev(m):
401
- abbr = m.group(0)
402
- if abbr in abbreviations_as_words:
403
- return abbr
404
- return ".".join(list(abbr)) + "."
405
- text = re.sub(r"\b([A-Z]{2,})\b", insert_periods_for_abbrev, text)
406
- text = re.sub(r"\.\.", ".", text)
407
- def remove_periods_for_tts(m):
408
- return m.group().replace(".", " ").strip()
409
- text = re.sub(r"[A-Z]\.[A-Z](?:\.[A-Z])*\.", remove_periods_for_tts, text)
410
- text = re.sub(r"-", " ", text)
411
- text = re.sub(r"\b(ha(ha)?|heh|lol)\b", "(* laughs *)", text, flags=re.IGNORECASE)
412
- text = re.sub(r"\bsigh\b", "(* sighs *)", text, flags=re.IGNORECASE)
413
- text = re.sub(r"\b(groan|moan)\b", "(* groans *)", text, flags=re.IGNORECASE)
414
- if speaker != "Jane":
415
- def insert_thinking_pause(m):
416
- word = m.group(1)
417
- if random.random() < 0.3:
418
- filler = random.choice(['hmm,', 'well,', 'let me see,'])
419
- return f"{word}..., {filler}"
420
- else:
421
- return f"{word}...,"
422
- keywords_pattern = r"\b(important|significant|crucial|point|topic)\b"
423
- text = re.sub(keywords_pattern, insert_thinking_pause, text, flags=re.IGNORECASE)
424
- conj_pattern = r"\b(and|but|so|because|however)\b"
425
- text = re.sub(conj_pattern, lambda m: f"{m.group()}...", text, flags=re.IGNORECASE)
426
- text = re.sub(r"\b(uh|um|ah)\b", "", text, flags=re.IGNORECASE)
427
- def capitalize_match(m):
428
- return m.group().upper()
429
- text = re.sub(r'(^\s*\w)|([.!?]\s*\w)', capitalize_match, text)
430
- return text.strip()
431
-
432
- def _spell_digits(d: str) -> str:
433
- digit_map = {
434
- '0': 'zero', '1': 'one', '2': 'two', '3': 'three',
435
- '4': 'four', '5': 'five', '6': 'six', '7': 'seven',
436
- '8': 'eight', '9': 'nine'
437
- }
438
- return " ".join(digit_map[ch] for ch in d if ch in digit_map)
439
-
440
- def mix_with_bg_music(spoken: AudioSegment, custom_music_path=None) -> AudioSegment:
441
- if custom_music_path:
442
- music_path = custom_music_path
443
- else:
444
- music_path = "bg_music.mp3"
445
-
446
- try:
447
- bg_music = AudioSegment.from_file(music_path, format="mp3")
448
- except Exception as e:
449
- print("[ERROR] Failed to load background music:", e)
450
- return spoken
451
-
452
- bg_music = bg_music - 18.0
453
- total_length_ms = len(spoken) + 2000
454
- looped_music = AudioSegment.empty()
455
- while len(looped_music) < total_length_ms:
456
- looped_music += bg_music
457
- looped_music = looped_music[:total_length_ms]
458
- final_mix = looped_music.overlay(spoken, position=2000)
459
- return final_mix
460
-
461
- def call_groq_api_for_qa(system_prompt: str) -> str:
462
- #Kept for use, Changed model
463
- try:
464
- headers = {
465
- "Authorization": f"Bearer {os.environ.get('GROQ_API_KEY')}", # Use GROQ API KEY
466
- "Content-Type": "application/json",
467
- "Accept": "application/json"
468
- }
469
- data = {
470
- "model": "deepseek-r1-distill-llama-70b", #Using Deepseek
471
- "messages": [{"role": "user", "content": system_prompt}],
472
- "max_tokens": 512,
473
- "temperature": 0.7
474
- }
475
- response = requests.post("https://api.groq.com/openai/v1/chat/completions", #Using groq endpoint
476
- headers=headers, data=json.dumps(data))
477
- response.raise_for_status()
478
- return response.json()["choices"][0]["message"]["content"].strip()
479
- except Exception as e:
480
- print("[ERROR] Groq API error:", e)
481
- fallback = {"speaker": "John", "text": "I'm sorry, I'm having trouble answering right now."}
482
- return json.dumps(fallback)
483
-
484
- # --- Agent and Tavily Integration ---
485
- def run_research_agent(topic: str, report_type: str = "research_report", max_results: int = 20) -> str:
486
- """
487
- Runs the new research agent to generate a research report.
488
- """
489
- print(f"[LOG] Starting research agent for topic: {topic}")
490
- try:
491
- # Use the Groq API key here
492
- agent = OpenDeepResearchAgent(query=topic, max_results=max_results, api_key=os.environ.get("TAVILY_API_KEY"))
493
- report_content = agent.run()
494
- print("[LOG] Research agent completed successfully.")
495
-
496
- # Now, use the report_structure module to generate the structured report.
497
- structured_report = generate_report(report_content)
498
- return structured_report
499
-
500
-
501
- except Exception as e:
502
- print(f"[ERROR] Error in research agent: {e}")
503
- return f"Sorry, I encountered an error during research: {e}"
 
18
 
19
  # --- Add the cloned repository to the Python path ---
20
  repo_path = os.path.join('/home', 'user', 'open_deep_research')
21
+ print(f"DEBUG: repo_path = {repo_path}")
22
+
23
+ # Remove /home/user/app and app.py from sys.path if they are present
24
+ # This is crucial to avoid import conflicts.
25
+ if '/home/user/app' in sys.path:
26
+ sys.path.remove('/home/user/app')
27
+ print("DEBUG: Removed /home/user/app from sys.path")
28
+ if 'app.py' in sys.path:
29
+ sys.path.remove('app.py')
30
+ print("DEBUG: Removed app.py from sys.path")
31
+
32
+
33
  if repo_path not in sys.path:
34
+ print("DEBUG: Adding repo_path to sys.path")
35
+ sys.path.insert(0, repo_path) # Add to the *beginning*
36
  else:
37
+ print("DEBUG: repo_path already in sys.path")
38
+ print(f"DEBUG: sys.path = {sys.path}")
39
+
40
 
41
  # --- CORRECT IMPORT (for local cloned repo) ---
42
  try:
43
+ from open_deep_research.agent import OpenDeepResearchAgent # Corrected import
44
  print("DEBUG: Import successful!")
45
  except ImportError as e:
46
  print(f"DEBUG: Import failed: {e}")
47
+ raise # Re-raise the exception so we still see the error in Streamlit
48
+
49
  from report_structure import generate_report
50
 
51
 
 
263
  print("[ERROR] JSON decoding failed:", e)
264
  raise ValueError(f"Failed to parse dialogue: {str(e)}")
265
 
266
+ def transcribe_youtube_video(video_url: str) -> str:
267
+ print("[LOG] Transcribing YouTube video via RapidAPI:", video_url)
268
+ video_id_match = re.search(r"(?:v=|\/)([0-9A-Za-z_-]{11})", video_url)
269
+ if not video_id_match:
270
+ raise ValueError(f"Invalid YouTube URL: {video_url}, cannot extract video ID.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
 
272
+ video_id = video_id_match.group(1)
273
+ print("[LOG] Extracted video ID:", video_id)
 
274
 
275
+ base_url = "https://youtube-transcriptor.p.rapidapi.com/transcript"
276
+ params = {"video_id": video_id, "lang": "en"}
277
+ headers = {
278
+ "x-rapidapi-host": "youtube-transcriptor.p.rapidapi.com",
279
+ "x-rapidapi-key": os.environ.get("RAPIDAPI_KEY")
280
+ }
281
 
282
+ try:
283
+ response = requests.get(base_url, headers=headers, params=params, timeout=30)
284
+ print("[LOG] RapidAPI Response Status Code:", response.status_code)
285
+ print("[LOG] RapidAPI Response Body:", response.text)
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  if response.status_code != 200:
288
+ raise ValueError(f"RapidAPI transcription error: {response.status_code}, {response.text}")
289
+
290
+ data = response.json()
291
+ if not isinstance(data, list) or not data:
292
+ raise ValueError(f"Unexpected transcript format or empty transcript: {data}")
293
+
294
+ transcript_as_text = data[0].get('transcriptionAsText', '').strip()
295
+ if not transcript_as_text:
296
+ raise ValueError("transcriptionAsText field is missing or empty.")
297
+
298
+ print("[LOG] Transcript retrieval successful.")
299
+ print(f"[DEBUG] Transcript Length: {len(transcript_as_text)} characters.")
300
+ snippet = transcript_as_text[:200] + "..." if len(transcript_as_text) > 200 else transcript_as_text
301
+ print(f"[DEBUG] Transcript Snippet: {snippet}")
302
+
303
+ return transcript_as_text
304
+ except Exception as e:
305
+ print("[ERROR] RapidAPI transcription error:", e)
306
+ raise ValueError(f"Error transcribing YouTube video via RapidAPI: {str(e)}")
307
+
308
+ def generate_audio_mp3(text: str, speaker: str) -> str:
309
+ try:
310
+ import streamlit as st
311
+ print(f"[LOG] Generating audio for speaker: {speaker}")
312
+ language_selection = st.session_state.get("language_selection", "English (American)")
313
+ if language_selection == "English (American)":
314
+ print(f"[LOG] Using Deepgram for English (American)")
315
+ if speaker in ["John", "Jane"]:
316
+ processed_text = text
317
+ else:
318
+ processed_text = _preprocess_text_for_tts(text, speaker)
319
+ deepgram_api_url = "https://api.deepgram.com/v1/speak"
320
+ params = {"model": "aura-asteria-en"}
321
+ if speaker == "John":
322
+ params["model"] = "aura-zeus-en"
323
+ headers = {
324
+ "Accept": "audio/mpeg",
325
+ "Content-Type": "application/json",
326
+ "Authorization": f"Token {os.environ.get('DEEPGRAM_API_KEY')}"
327
+ }
328
+ body = {"text": processed_text}
329
+ response = requests.post(deepgram_api_url, params=params, headers=headers, json=body, stream=True)
330
+ if response.status_code != 200:
331
+ raise ValueError(f"Deepgram TTS error: {response.status_code}, {response.text}")
332
+ content_type = response.headers.get('Content-Type', '')
333
+ if 'audio/mpeg' not in content_type:
334
+ raise ValueError("Unexpected Content-Type from Deepgram.")
335
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as mp3_file:
336
+ for chunk in response.iter_content(chunk_size=8192):
337
+ if chunk:
338
+ mp3_file.write(chunk)
339
+ mp3_path = mp3_file.name
340
+ audio_seg = AudioSegment.from_file(mp3_path, format="mp3")
341
+ audio_seg = effects.normalize(audio_seg)
342
+ final_mp3_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3").name
343
+ audio_seg.export(final_mp3_path, format="mp3")
344
+ if os.path.exists(mp3_path):
345
+ os.remove(mp3_path)
346
+ return final_mp3_path
347
+ else:
348
+ print(f"[LOG] Using Murf API for language: {language_selection}")
349
+ if language_selection == "Hinglish":
350
+ from indic_transliteration.sanscript import transliterate, DEVANAGARI, IAST
351
+ text = transliterate(text, DEVANAGARI, IAST)
352
+ api_key = os.environ.get("MURF_API_KEY")
353
+ headers = {
354
+ "Content-Type": "application/json",
355
+ "Accept": "application/json",
356
+ "api-key": api_key
357
+ }
358
+ multi_native_locale = "hi-IN" if language_selection in ["Hinglish", "Hindi"] else "en-IN"
359
+ if language_selection == "English (Indian)":
360
+ voice_id = "en-IN-aarav" if speaker == "John" else "en-IN-isha"
361
+ elif language_selection == "Hindi":
362
+ voice_id = "hi-IN-kabir" if speaker == "John" else "hi-IN-shweta"
363
+ elif language_selection == "Hinglish":
364
+ voice_id = "hi-IN-kabir" if speaker == "John" else "hi-IN-shweta"
365
+ else:
366
+ voice_id = "en-IN-aarav" if speaker == "John" else "en-IN-isha"
367
+ payload = {
368
+ "audioDuration": 0,
369
+ "channelType": "MONO",
370
+ "encodeAsBase64": False,
371
+ "format": "WAV",
372
+ "modelVersion": "GEN2",
373
+ "multiNativeLocale": multi_native_locale,
374
+ "pitch": 0,
375
+ "pronunciationDictionary": {},
376
+ "rate": 0,
377
+ "sampleRate": 48000,
378
+ "style": "Conversational",
379
+ "text": text,
380
+ "variation": 1,
381
+ "voiceId": voice_id
382
+ }
383
+ response = requests.post("https://api.murf.ai/v1/speech/generate", headers=headers, json=payload)
384
+ if response.status_code != 200:
385
+ raise ValueError(f"Murf API error: {response.status_code}, {response.text}")
386
+ json_resp = response.json()
387
+ audio_url = json_resp.get("audioFile")
388
+ if not audio_url:
389
+ raise ValueError("No audio file URL returned by Murf API")
390
+ audio_response = requests.get(audio_url)
391
+ if audio_response.status_code != 200:
392
+ raise ValueError(f"Error fetching audio from {audio_url}")
393
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as wav_file:
394
+ wav_file.write(audio_response.content)
395
+ wav_path = wav_file.name
396
+ audio_seg = AudioSegment.from_file(wav_path, format="wav")
397
+ audio_seg = effects.normalize(audio_seg)
398
+ final_mp3_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3").name
399
+ audio_seg.export(final_mp3_path, format="mp3")
400
+ os.remove(wav_path)
401
+ return final_mp3_path
402
+ except Exception as e:
403
+ print("[ERROR] Error generating audio:", e)
404
+ raise ValueError(f"Error generating audio: {str(e)}")
405
+
406
+ def transcribe_youtube_video_OLD_YTDLP(video_url: str) -> str:
407
+ pass
408
+
409
+ def _preprocess_text_for_tts(text: str, speaker: str) -> str:
410
+ text = re.sub(r"\bNo\.\b", "Number", text)
411
+ text = re.sub(r"\b(?i)SaaS\b", "sass", text)
412
+ abbreviations_as_words = {"NASA", "NATO", "UNESCO"}
413
+ def insert_periods_for_abbrev(m):
414
+ abbr = m.group(0)
415
+ if abbr in abbreviations_as_words:
416
+ return abbr
417
+ return ".".join(list(abbr)) + "."
418
+ text = re.sub(r"\b([A-Z]{2,})\b", insert_periods_for_abbrev, text)
419
+ text = re.sub(r"\.\.", ".", text)
420
+ def remove_periods_for_tts(m):
421
+ return m.group().replace(".", " ").strip()
422
+ text = re.sub(r"[A-Z]\.[A-Z](?:\.[A-Z])*\.", remove_periods_for_tts, text)
423
+ text = re.sub(r"-", " ", text)
424
+ text = re.sub(r"\b(ha(ha)?|heh|lol)\b", "(* laughs *)", text, flags=re.IGNORECASE)
425
+ text = re.sub(r"\bsigh\b", "(* sighs *)", text, flags=re.IGNORECASE)
426
+ text = re.sub(r"\b(groan|moan)\b", "(* groans *)", text, flags=re.IGNORECASE)
427
+ if speaker != "Jane":
428
+ def insert_thinking_pause(m):
429
+ word = m.group(1)
430
+ if random.random() < 0.3:
431
+ filler = random.choice(['hmm,', 'well,', 'let me see,'])
432
+ return f"{word}..., {filler}"
433
+ else:
434
+ return f"{word}...,"
435
+ keywords_pattern = r"\b(important|significant|crucial|point|topic)\b"
436
+ text = re.sub(keywords_pattern, insert_thinking_pause, text, flags=re.IGNORECASE)
437
+ conj_pattern = r"\b(and|but|so|because|however)\b"
438
+ text = re.sub(conj_pattern, lambda m: f"{m.group()}...", text, flags=re.IGNORECASE)
439
+ text = re.sub(r"\b(uh|um|ah)\b", "", text, flags=re.IGNORECASE)
440
+ def capitalize_match(m):
441
+ return m.group().upper()
442
+ text = re.sub(r'(^\s*\w)|([.!?]\s*\w)', capitalize_match, text)
443
+ return text.strip()
444
+
445
+ def _spell_digits(d: str) -> str:
446
+ digit_map = {
447
+ '0': 'zero', '1': 'one', '2': 'two', '3': 'three',
448
+ '4': 'four', '5': 'five', '6': 'six', '7': 'seven',
449
+ '8': 'eight', '9': 'nine'
450
+ }
451
+ return " ".join(digit_map[ch] for ch in d if ch in digit_map)
452
+
453
+ def mix_with_bg_music(spoken: AudioSegment, custom_music_path=None) -> AudioSegment:
454
+ if custom_music_path:
455
+ music_path = custom_music_path
456
  else:
457
+ music_path = "bg_music.mp3"
458
+
459
+ try:
460
+ bg_music = AudioSegment.from_file(music_path, format="mp3")
461
+ except Exception as e:
462
+ print("[ERROR] Failed to load background music:", e)
463
+ return spoken
464
+
465
+ bg_music = bg_music - 18.0
466
+ total_length_ms = len(spoken) + 2000
467
+ looped_music = AudioSegment.empty()
468
+ while len(looped_music) < total_length_ms:
469
+ looped_music += bg_music
470
+ looped_music = looped_music[:total_length_ms]
471
+ final_mix = looped_music.overlay(spoken, position=2000)
472
+ return final_mix
473
+
474
+ def call_groq_api_for_qa(system_prompt: str) -> str:
475
+ #Kept for use, Changed model
476
+ try:
477
  headers = {
478
+ "Authorization": f"Bearer {os.environ.get('GROQ_API_KEY')}", # Use GROQ API KEY
479
  "Content-Type": "application/json",
480
+ "Accept": "application/json"
 
481
  }
482
+ data = {
483
+ "model": "deepseek-r1-distill-llama-70b", #Using Deepseek
484
+ "messages": [{"role": "user", "content": system_prompt}],
485
+ "max_tokens": 512,
486
+ "temperature": 0.7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
  }
488
+ response = requests.post("https://api.groq.com/openai/v1/chat/completions", #Using groq endpoint
489
+ headers=headers, data=json.dumps(data))
490
+ response.raise_for_status()
491
+ return response.json()["choices"][0]["message"]["content"].strip()
492
+ except Exception as e:
493
+ print("[ERROR] Groq API error:", e)
494
+ fallback = {"speaker": "John", "text": "I'm sorry, I'm having trouble answering right now."}
495
+ return json.dumps(fallback)
496
+
497
+ # --- Agent and Tavily Integration ---
498
+ def run_research_agent(topic: str, report_type: str = "research_report", max_results: int = 20) -> str:
499
+ """
500
+ Runs the new research agent to generate a research report.
501
+ """
502
+ print(f"[LOG] Starting research agent for topic: {topic}")
503
+ try:
504
+ # Use the Groq API key here
505
+ agent = OpenDeepResearchAgent(query=topic, max_results=max_results, api_key=os.environ.get("TAVILY_API_KEY"))
506
+ report_content = agent.run()
507
+ print("[LOG] Research agent completed successfully.")
508
+
509
+ # Now, use the report_structure module to generate the structured report.
510
+ structured_report = generate_report(report_content)
511
+ return structured_report
512
+
513
+
514
+ except Exception as e:
515
+ print(f"[ERROR] Error in research agent: {e}")
516
+ return f"Sorry, I encountered an error during research: {e}"