Update app.py
Browse files
app.py
CHANGED
@@ -392,9 +392,14 @@ def extract_ai_response(result):
|
|
392 |
logger.error(f"Error extracting AI response: {str(e)}")
|
393 |
return f"Error: {str(e)}"
|
394 |
|
|
|
395 |
def streaming_handler(response, chatbot, message_idx):
|
396 |
-
"""Handle streaming response from OpenRouter API"""
|
397 |
try:
|
|
|
|
|
|
|
|
|
|
|
398 |
for line in response.iter_lines():
|
399 |
if not line:
|
400 |
continue
|
@@ -412,7 +417,8 @@ def streaming_handler(response, chatbot, message_idx):
|
|
412 |
if "choices" in chunk and len(chunk["choices"]) > 0:
|
413 |
delta = chunk["choices"][0].get("delta", {})
|
414 |
if "content" in delta and delta["content"]:
|
415 |
-
|
|
|
416 |
yield chatbot
|
417 |
except json.JSONDecodeError:
|
418 |
logger.error(f"Failed to parse JSON from chunk: {data}")
|
@@ -420,7 +426,7 @@ def streaming_handler(response, chatbot, message_idx):
|
|
420 |
logger.error(f"Error in streaming handler: {str(e)}")
|
421 |
# Add error message to the current response
|
422 |
if len(chatbot) > message_idx:
|
423 |
-
chatbot[
|
424 |
yield chatbot
|
425 |
|
426 |
def ask_ai(message, history, model_choice, temperature, max_tokens, top_p,
|
@@ -534,7 +540,8 @@ def ask_ai(message, history, model_choice, temperature, max_tokens, top_p,
|
|
534 |
logger.info(f"Token usage: {result['usage']}")
|
535 |
|
536 |
# Add response to history
|
537 |
-
chat_history.append(
|
|
|
538 |
return chat_history
|
539 |
|
540 |
# Handle error response
|
@@ -604,7 +611,7 @@ def create_app():
|
|
604 |
show_copy_button=True,
|
605 |
show_label=False,
|
606 |
avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/0/04/ChatGPT_logo.svg"),
|
607 |
-
|
608 |
elem_id="chat-window" # Add elem_id for debugging
|
609 |
)
|
610 |
|
@@ -960,7 +967,7 @@ def create_app():
|
|
960 |
)
|
961 |
|
962 |
# Enable debugging for key components
|
963 |
-
gr.debug(chatbot)
|
964 |
|
965 |
return demo
|
966 |
|
|
|
392 |
logger.error(f"Error extracting AI response: {str(e)}")
|
393 |
return f"Error: {str(e)}"
|
394 |
|
395 |
+
# streaming code:
|
396 |
def streaming_handler(response, chatbot, message_idx):
|
|
|
397 |
try:
|
398 |
+
# First add the user message if needed
|
399 |
+
if len(chatbot) == message_idx:
|
400 |
+
chatbot.append({"role": "user", "content": message})
|
401 |
+
chatbot.append({"role": "assistant", "content": ""})
|
402 |
+
|
403 |
for line in response.iter_lines():
|
404 |
if not line:
|
405 |
continue
|
|
|
417 |
if "choices" in chunk and len(chunk["choices"]) > 0:
|
418 |
delta = chunk["choices"][0].get("delta", {})
|
419 |
if "content" in delta and delta["content"]:
|
420 |
+
# Update the last message content
|
421 |
+
chatbot[-1]["content"] += delta["content"]
|
422 |
yield chatbot
|
423 |
except json.JSONDecodeError:
|
424 |
logger.error(f"Failed to parse JSON from chunk: {data}")
|
|
|
426 |
logger.error(f"Error in streaming handler: {str(e)}")
|
427 |
# Add error message to the current response
|
428 |
if len(chatbot) > message_idx:
|
429 |
+
chatbot[-1]["content"] += f"\n\nError during streaming: {str(e)}"
|
430 |
yield chatbot
|
431 |
|
432 |
def ask_ai(message, history, model_choice, temperature, max_tokens, top_p,
|
|
|
540 |
logger.info(f"Token usage: {result['usage']}")
|
541 |
|
542 |
# Add response to history
|
543 |
+
chat_history.append({"role": "user", "content": message})
|
544 |
+
chat_history.append({"role": "assistant", "content": ai_response})
|
545 |
return chat_history
|
546 |
|
547 |
# Handle error response
|
|
|
611 |
show_copy_button=True,
|
612 |
show_label=False,
|
613 |
avatar_images=(None, "https://upload.wikimedia.org/wikipedia/commons/0/04/ChatGPT_logo.svg"),
|
614 |
+
type="messages", # Explicitly set the type to messages
|
615 |
elem_id="chat-window" # Add elem_id for debugging
|
616 |
)
|
617 |
|
|
|
967 |
)
|
968 |
|
969 |
# Enable debugging for key components
|
970 |
+
# gr.debug(chatbot)
|
971 |
|
972 |
return demo
|
973 |
|