Spaces:
Running
Running
removed audio
Browse files
app.py
CHANGED
@@ -59,8 +59,8 @@ llm = Llama.from_pretrained(
|
|
59 |
repo_id="amir22010/fine_tuned_product_marketing_email_gemma_2_9b_q4_k_m", #custom fine tuned model
|
60 |
filename="unsloth.Q4_K_M.gguf", #model file name
|
61 |
cache_dir=os.path.abspath(os.getcwd()),
|
62 |
-
|
63 |
-
|
64 |
verbose=False
|
65 |
)
|
66 |
|
@@ -95,8 +95,7 @@ async def greet(product,description):
|
|
95 |
)
|
96 |
warning_message = chat_completion.choices[0].message.content
|
97 |
if warning_message != 'safe':
|
98 |
-
|
99 |
-
yield warning_message
|
100 |
else:
|
101 |
output = llm.create_chat_completion(
|
102 |
messages=[
|
@@ -112,10 +111,8 @@ async def greet(product,description):
|
|
112 |
print(chunk)
|
113 |
delta = chunk['choices'][0]['delta']
|
114 |
if 'content' in delta:
|
115 |
-
# audio_list.append([text_to_speech(delta.get('content', ''))])
|
116 |
-
# processed_audio = combine_audio_files(audio_list)
|
117 |
partial_message = partial_message + delta.get('content', '')
|
118 |
yield partial_message
|
119 |
|
120 |
-
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs=
|
121 |
demo.launch()
|
|
|
59 |
repo_id="amir22010/fine_tuned_product_marketing_email_gemma_2_9b_q4_k_m", #custom fine tuned model
|
60 |
filename="unsloth.Q4_K_M.gguf", #model file name
|
61 |
cache_dir=os.path.abspath(os.getcwd()),
|
62 |
+
n_ctx=2048,
|
63 |
+
n_batch=126,
|
64 |
verbose=False
|
65 |
)
|
66 |
|
|
|
95 |
)
|
96 |
warning_message = chat_completion.choices[0].message.content
|
97 |
if warning_message != 'safe':
|
98 |
+
yield warning_message
|
|
|
99 |
else:
|
100 |
output = llm.create_chat_completion(
|
101 |
messages=[
|
|
|
111 |
print(chunk)
|
112 |
delta = chunk['choices'][0]['delta']
|
113 |
if 'content' in delta:
|
|
|
|
|
114 |
partial_message = partial_message + delta.get('content', '')
|
115 |
yield partial_message
|
116 |
|
117 |
+
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="texts")
|
118 |
demo.launch()
|