Spaces:
Running
Running
removed audio
Browse files
app.py
CHANGED
@@ -65,7 +65,8 @@ llm = Llama.from_pretrained(
|
|
65 |
)
|
66 |
|
67 |
#marketing prompt
|
68 |
-
marketing_email_prompt = """
|
|
|
69 |
|
70 |
### Product:
|
71 |
{}
|
@@ -87,12 +88,11 @@ async def greet(product,description):
|
|
87 |
messages=[
|
88 |
{
|
89 |
"role": "user",
|
90 |
-
"content":
|
91 |
}
|
92 |
],
|
93 |
model="llama-guard-3-8b",
|
94 |
)
|
95 |
-
print(chat_completion)
|
96 |
warning_message = chat_completion.choices[0].message.content
|
97 |
if warning_message != 'safe':
|
98 |
#processed_audio = combine_audio_files(text_to_speech([chat_completion.choices[0].message.content]))
|
@@ -100,13 +100,9 @@ async def greet(product,description):
|
|
100 |
else:
|
101 |
output = llm.create_chat_completion(
|
102 |
messages=[
|
103 |
-
{
|
104 |
-
"role": "system",
|
105 |
-
"content": "Your go-to Email Marketing Guru - I'm here to help you craft short and concise compelling campaigns, boost conversions, and take your business to the next level.",
|
106 |
-
},
|
107 |
{"role": "user", "content": user_reques},
|
108 |
],
|
109 |
-
max_tokens=
|
110 |
temperature=0.7,
|
111 |
stream=True
|
112 |
)
|
@@ -120,6 +116,5 @@ async def greet(product,description):
|
|
120 |
partial_message = partial_message + delta.get('content', '')
|
121 |
yield partial_message
|
122 |
|
123 |
-
audio = gr.Audio()
|
124 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=["text"])
|
125 |
demo.launch()
|
|
|
65 |
)
|
66 |
|
67 |
#marketing prompt
|
68 |
+
marketing_email_prompt = """Your go-to Email Marketing Guru - I'm here to help you craft short and concise compelling campaigns, boost conversions, and take your business to the next level.
|
69 |
+
Below is a product and description, please write a marketing email for this product.
|
70 |
|
71 |
### Product:
|
72 |
{}
|
|
|
88 |
messages=[
|
89 |
{
|
90 |
"role": "user",
|
91 |
+
"content": product+"\n"+description
|
92 |
}
|
93 |
],
|
94 |
model="llama-guard-3-8b",
|
95 |
)
|
|
|
96 |
warning_message = chat_completion.choices[0].message.content
|
97 |
if warning_message != 'safe':
|
98 |
#processed_audio = combine_audio_files(text_to_speech([chat_completion.choices[0].message.content]))
|
|
|
100 |
else:
|
101 |
output = llm.create_chat_completion(
|
102 |
messages=[
|
|
|
|
|
|
|
|
|
103 |
{"role": "user", "content": user_reques},
|
104 |
],
|
105 |
+
max_tokens=1024,
|
106 |
temperature=0.7,
|
107 |
stream=True
|
108 |
)
|
|
|
116 |
partial_message = partial_message + delta.get('content', '')
|
117 |
yield partial_message
|
118 |
|
|
|
119 |
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=["text"])
|
120 |
demo.launch()
|