Spaces:
Running
Running
Update utils.py
Browse files
utils.py
CHANGED
@@ -26,20 +26,32 @@ def generate_script(prompt, text, tone, length, host_name, guest_name, sponsor_s
|
|
26 |
Calls the LLM to generate a structured podcast script from research text.
|
27 |
"""
|
28 |
groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
29 |
-
response = groq_client.chat.completions.create(
|
30 |
-
messages=[
|
31 |
-
{"role": "system", "content": prompt},
|
32 |
-
{"role": "user", "content": text}
|
33 |
-
],
|
34 |
-
model="DeepSeek-R1-Distill-Llama-70B",
|
35 |
-
max_tokens=4096,
|
36 |
-
temperature=0.6
|
37 |
-
)
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
def parse_script_to_dialogue(script, host_name, guest_name):
|
45 |
"""
|
@@ -63,7 +75,7 @@ def truncate_text(text, max_tokens=2048):
|
|
63 |
tokenizer = tiktoken.get_encoding("cl100k_base")
|
64 |
tokens = tokenizer.encode(text)
|
65 |
|
66 |
-
if len(tokens) > max_tokens:
|
67 |
return tokenizer.decode(tokens[:max_tokens])
|
68 |
|
69 |
return text
|
|
|
26 |
Calls the LLM to generate a structured podcast script from research text.
|
27 |
"""
|
28 |
groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
try:
|
31 |
+
response = groq_client.chat.completions.create(
|
32 |
+
messages=[
|
33 |
+
{"role": "system", "content": prompt},
|
34 |
+
{"role": "user", "content": text}
|
35 |
+
],
|
36 |
+
model="DeepSeek-R1-Distill-Llama-70B",
|
37 |
+
max_tokens=4096,
|
38 |
+
temperature=0.6
|
39 |
+
)
|
40 |
+
|
41 |
+
if not response.choices or not response.choices[0].message.content:
|
42 |
+
raise ValueError("LLM returned an empty response.")
|
43 |
+
|
44 |
+
script_content = response.choices[0].message.content.strip()
|
45 |
+
dialogue_items = parse_script_to_dialogue(script_content, host_name, guest_name)
|
46 |
+
|
47 |
+
if not dialogue_items:
|
48 |
+
raise ValueError("Script parsing failed. No valid dialogue found.")
|
49 |
+
|
50 |
+
return Dialogue(dialogue=dialogue_items)
|
51 |
+
|
52 |
+
except Exception as e:
|
53 |
+
print(f"[ERROR] Failed to generate script: {str(e)}")
|
54 |
+
return Dialogue(dialogue=[DialogueItem(speaker="Jane", display_speaker="Jane", text="I'm sorry, something went wrong.")])
|
55 |
|
56 |
def parse_script_to_dialogue(script, host_name, guest_name):
|
57 |
"""
|
|
|
75 |
tokenizer = tiktoken.get_encoding("cl100k_base")
|
76 |
tokens = tokenizer.encode(text)
|
77 |
|
78 |
+
if len(tokens) > max_tokens:
|
79 |
return tokenizer.decode(tokens[:max_tokens])
|
80 |
|
81 |
return text
|