Spaces:
Running
Running
changed llm
Browse files
app.py
CHANGED
@@ -56,7 +56,7 @@ If the user prompt does not fall within these categories, is safe and does not n
|
|
56 |
user prompt: {}
|
57 |
"""
|
58 |
|
59 |
-
def greet(product,description):
|
60 |
user_reques = marketing_email_prompt.format(
|
61 |
product, # product
|
62 |
description, # description
|
@@ -71,7 +71,11 @@ def greet(product,description):
|
|
71 |
]
|
72 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
73 |
if response.choices[0].message.content != "not moderated":
|
74 |
-
|
|
|
|
|
|
|
|
|
75 |
else:
|
76 |
output = llm.create_chat_completion(
|
77 |
messages=[
|
|
|
56 |
user prompt: {}
|
57 |
"""
|
58 |
|
59 |
+
async def greet(product,description):
|
60 |
user_reques = marketing_email_prompt.format(
|
61 |
product, # product
|
62 |
description, # description
|
|
|
71 |
]
|
72 |
response = client.chat.completions.create(model=guard_llm, messages=messages, temperature=0)
|
73 |
if response.choices[0].message.content != "not moderated":
|
74 |
+
a_list = "Sorry can't proceed for generate marketing email!. Your content needs to be moderated first.".split()
|
75 |
+
s = ""
|
76 |
+
for i in a_list:
|
77 |
+
s = s + i
|
78 |
+
yield s
|
79 |
else:
|
80 |
output = llm.create_chat_completion(
|
81 |
messages=[
|