Spaces:
sierrafr
/
Runtime error

hadadrjt commited on
Commit
68ce31f
·
1 Parent(s): eb0a349

ai: I don't care about streaming respones :)

Browse files
Files changed (1) hide show
  1. jarvis.py +5 -16
jarvis.py CHANGED
@@ -17,9 +17,7 @@ import gradio as gr
17
  import requests
18
  import json
19
  import os
20
- import threading
21
  import random
22
- import time
23
  import pytesseract
24
  import pdfplumber
25
  import docx
@@ -53,11 +51,6 @@ session = requests.Session()
53
  def get_model_key(display_name):
54
  return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
55
 
56
- def simulate_streaming_response(text):
57
- for line in text.splitlines():
58
- yield line + "\n"
59
- time.sleep(0.05)
60
-
61
  def extract_file_content(file_path):
62
  ext = Path(file_path).suffix.lower()
63
  content = ""
@@ -102,8 +95,7 @@ def extract_file_content(file_path):
102
 
103
  def chat_with_model(history, user_input, selected_model_display):
104
  if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
105
- yield RESPONSES["RESPONSE_3"]
106
- return
107
  selected_model = get_model_key(selected_model_display)
108
  model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
109
  messages = [{"role": "user", "content": user} for user, _ in history]
@@ -118,11 +110,10 @@ def chat_with_model(history, user_input, selected_model_display):
118
  response = session.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"})
119
  if response.status_code < 400:
120
  ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
121
- yield from simulate_streaming_response(ai_text)
122
- return
123
  except requests.exceptions.RequestException:
124
  continue
125
- yield RESPONSES["RESPONSE_3"]
126
 
127
  def respond(multi_input, history, selected_model_display):
128
  message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
@@ -139,10 +130,8 @@ def respond(multi_input, history, selected_model_display):
139
  if message["text"]:
140
  combined_input += message["text"]
141
  history.append([combined_input, ""])
142
- ai_response = ""
143
- for chunk in chat_with_model(history, combined_input, selected_model_display):
144
- ai_response += chunk
145
- history[-1][1] = ai_response
146
  return history, gr.MultimodalTextbox(value=None, interactive=True)
147
 
148
  def change_model(new_model_display):
 
17
  import requests
18
  import json
19
  import os
 
20
  import random
 
21
  import pytesseract
22
  import pdfplumber
23
  import docx
 
51
  def get_model_key(display_name):
52
  return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
53
 
 
 
 
 
 
54
  def extract_file_content(file_path):
55
  ext = Path(file_path).suffix.lower()
56
  content = ""
 
95
 
96
  def chat_with_model(history, user_input, selected_model_display):
97
  if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
98
+ return RESPONSES["RESPONSE_3"]
 
99
  selected_model = get_model_key(selected_model_display)
100
  model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
101
  messages = [{"role": "user", "content": user} for user, _ in history]
 
110
  response = session.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"})
111
  if response.status_code < 400:
112
  ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
113
+ return ai_text
 
114
  except requests.exceptions.RequestException:
115
  continue
116
+ return RESPONSES["RESPONSE_3"]
117
 
118
  def respond(multi_input, history, selected_model_display):
119
  message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
 
130
  if message["text"]:
131
  combined_input += message["text"]
132
  history.append([combined_input, ""])
133
+ ai_response = chat_with_model(history, combined_input, selected_model_display)
134
+ history[-1][1] = ai_response
 
 
135
  return history, gr.MultimodalTextbox(value=None, interactive=True)
136
 
137
  def change_model(new_model_display):