Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ print("OpenAI client initialized.")
|
|
20 |
|
21 |
def respond(
|
22 |
message,
|
23 |
-
history
|
24 |
system_message,
|
25 |
max_tokens,
|
26 |
temperature,
|
@@ -53,9 +53,11 @@ def respond(
|
|
53 |
# If user provided a model, use that; otherwise, fall back to a default model
|
54 |
model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
|
55 |
|
56 |
-
#
|
57 |
-
|
58 |
-
|
|
|
|
|
59 |
try:
|
60 |
for message_chunk in client.chat.completions.create(
|
61 |
model=model_to_use,
|
@@ -69,10 +71,14 @@ def respond(
|
|
69 |
):
|
70 |
token_text = message_chunk.choices[0].delta.content
|
71 |
if token_text is not None: # Handle None type in response
|
72 |
-
|
73 |
-
|
|
|
|
|
74 |
except Exception as e:
|
75 |
-
|
|
|
|
|
76 |
|
77 |
print("Completed response generation.")
|
78 |
|
@@ -393,16 +399,24 @@ with gr.Blocks(css=custom_css, title=APP_TITLE, theme=gr.themes.Soft()) as demo:
|
|
393 |
fn=respond,
|
394 |
inputs=[msg, chatbot, system_message_box, max_tokens_slider, temperature_slider,
|
395 |
top_p_slider, frequency_penalty_slider, seed_slider, custom_model_box],
|
396 |
-
outputs=
|
397 |
queue=True
|
|
|
|
|
|
|
|
|
398 |
)
|
399 |
|
400 |
submit_btn.click(
|
401 |
fn=respond,
|
402 |
inputs=[msg, chatbot, system_message_box, max_tokens_slider, temperature_slider,
|
403 |
top_p_slider, frequency_penalty_slider, seed_slider, custom_model_box],
|
404 |
-
outputs=
|
405 |
queue=True
|
|
|
|
|
|
|
|
|
406 |
)
|
407 |
|
408 |
# Update model display when search changes
|
|
|
20 |
|
21 |
def respond(
|
22 |
message,
|
23 |
+
history,
|
24 |
system_message,
|
25 |
max_tokens,
|
26 |
temperature,
|
|
|
53 |
# If user provided a model, use that; otherwise, fall back to a default model
|
54 |
model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
|
55 |
|
56 |
+
# Create a copy of the history and add the new user message
|
57 |
+
new_history = list(history)
|
58 |
+
new_history.append((message, ""))
|
59 |
+
current_response = ""
|
60 |
+
|
61 |
try:
|
62 |
for message_chunk in client.chat.completions.create(
|
63 |
model=model_to_use,
|
|
|
71 |
):
|
72 |
token_text = message_chunk.choices[0].delta.content
|
73 |
if token_text is not None: # Handle None type in response
|
74 |
+
current_response += token_text
|
75 |
+
# Update just the last message in history
|
76 |
+
new_history[-1] = (message, current_response)
|
77 |
+
yield new_history
|
78 |
except Exception as e:
|
79 |
+
error_message = f"Error: {str(e)}\n\nPlease check your model selection and parameters, or try again later."
|
80 |
+
new_history[-1] = (message, error_message)
|
81 |
+
yield new_history
|
82 |
|
83 |
print("Completed response generation.")
|
84 |
|
|
|
399 |
fn=respond,
|
400 |
inputs=[msg, chatbot, system_message_box, max_tokens_slider, temperature_slider,
|
401 |
top_p_slider, frequency_penalty_slider, seed_slider, custom_model_box],
|
402 |
+
outputs=chatbot,
|
403 |
queue=True
|
404 |
+
).then(
|
405 |
+
lambda: "", # Clear the message box after sending
|
406 |
+
None,
|
407 |
+
[msg]
|
408 |
)
|
409 |
|
410 |
submit_btn.click(
|
411 |
fn=respond,
|
412 |
inputs=[msg, chatbot, system_message_box, max_tokens_slider, temperature_slider,
|
413 |
top_p_slider, frequency_penalty_slider, seed_slider, custom_model_box],
|
414 |
+
outputs=chatbot,
|
415 |
queue=True
|
416 |
+
).then(
|
417 |
+
lambda: "", # Clear the message box after sending
|
418 |
+
None,
|
419 |
+
[msg]
|
420 |
)
|
421 |
|
422 |
# Update model display when search changes
|