hadadrjt commited on
Commit
ea8a8bf
·
1 Parent(s): c05eaca

ai: Implement async functions.

Browse files
Files changed (2) hide show
  1. jarvis.py +25 -27
  2. requirements.txt +1 -0
jarvis.py CHANGED
@@ -20,6 +20,8 @@ import uuid
20
  import concurrent.futures
21
  import itertools
22
  import threading
 
 
23
 
24
  from openai import OpenAI
25
 
@@ -141,19 +143,21 @@ def process_ai_response(ai_text):
141
  except Exception:
142
  return ai_text
143
 
144
- def fetch_response(host, provider_key, selected_model, messages, model_config, session_id):
145
  try:
146
- client = OpenAI(base_url=host, api_key=provider_key)
147
- data = {"model": selected_model, "messages": messages, **model_config}
148
- response = client.chat.completions.create(extra_body={"optillm_approach": "rto|re2|cot_reflection|self_consistency|plansearch|leap|z3|bon|moa|mcts|mcp|router|privacy|executecode|json", "session_id": session_id}, **data)
149
- ai_text = response.choices[0].message.content if response.choices and response.choices[0].message and response.choices[0].message.content else RESPONSES["RESPONSE_2"]
150
- return process_ai_response(ai_text)
 
 
 
151
  except Exception:
152
  marked_item(provider_key, LINUX_SERVER_PROVIDER_KEYS_MARKED, LINUX_SERVER_PROVIDER_KEYS_ATTEMPTS)
153
- #marked_item(host, LINUX_SERVER_HOSTS_MARKED, LINUX_SERVER_HOSTS_ATTEMPTS)
154
  raise
155
 
156
- def chat_with_model(history, user_input, selected_model_display, sess):
157
  global ACTIVE_CANDIDATE
158
  if not get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED) or not get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_MARKED):
159
  return RESPONSES["RESPONSE_3"]
@@ -166,28 +170,24 @@ def chat_with_model(history, user_input, selected_model_display, sess):
166
  messages.append({"role": "user", "content": user_input})
167
  if ACTIVE_CANDIDATE is not None:
168
  try:
169
- return fetch_response(ACTIVE_CANDIDATE[0], ACTIVE_CANDIDATE[1], selected_model, messages, model_config, sess.session_id)
170
  except Exception:
171
  ACTIVE_CANDIDATE = None
172
  available_keys = get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED)
173
  available_servers = get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_MARKED)
174
  candidates = [(host, key) for host in available_servers for key in available_keys]
175
  random.shuffle(candidates)
176
- with concurrent.futures.ThreadPoolExecutor(max_workers=len(candidates)) as executor:
177
- futures = {executor.submit(fetch_response, host, key, selected_model, messages, model_config, sess.session_id): (host, key) for (host, key) in candidates}
178
- for future in concurrent.futures.as_completed(futures):
179
- try:
180
- result = future.result()
181
- ACTIVE_CANDIDATE = futures[future]
182
- for f in futures:
183
- if f is not future:
184
- f.cancel()
185
- return result
186
- except Exception:
187
- continue
188
  return RESPONSES["RESPONSE_2"]
189
 
190
- def respond(multi_input, history, selected_model_display, sess):
191
  message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
192
  if not message["text"] and not message["files"]:
193
  yield history, gr.MultimodalTextbox(value=None, interactive=True), sess
@@ -200,7 +200,7 @@ def respond(multi_input, history, selected_model_display, sess):
200
  if message["text"]:
201
  combined_input += message["text"]
202
  history.append([combined_input, ""])
203
- ai_response = chat_with_model(history, combined_input, selected_model_display, sess)
204
  history[-1][1] = ""
205
  def convert_to_string(data):
206
  if isinstance(data, (str, int, float)):
@@ -215,7 +215,7 @@ def respond(multi_input, history, selected_model_display, sess):
215
  return repr(data)
216
  for character in ai_response:
217
  history[-1][1] += convert_to_string(character)
218
- time.sleep(0.0001)
219
  yield history, gr.MultimodalTextbox(value=None, interactive=True), sess
220
 
221
  def change_model(new_model_display):
@@ -229,8 +229,6 @@ with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], h
229
  model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
230
  with gr.Row():
231
  msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
232
-
233
  model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, user_session, selected_model])
234
- msg.submit(fn=respond, inputs=[msg, user_history, selected_model, user_session], outputs=[chatbot, msg, user_session])
235
-
236
  jarvis.launch(show_api=False, max_file_size="1mb")
 
20
  import concurrent.futures
21
  import itertools
22
  import threading
23
+ import httpx
24
+ import asyncio
25
 
26
  from openai import OpenAI
27
 
 
143
  except Exception:
144
  return ai_text
145
 
146
+ async def fetch_response_async(host, provider_key, selected_model, messages, model_config, session_id):
147
  try:
148
+ async with httpx.AsyncClient(timeout=1) as client:
149
+ data = {"model": selected_model, "messages": messages, **model_config}
150
+ extra = {"optillm_approach": "rto|re2|cot_reflection|self_consistency|plansearch|leap|z3|bon|moa|mcts|mcp|router|privacy|executecode|json", "session_id": session_id}
151
+ response = await client.post(f"{host}", json={**data, "extra_body": extra, "session_id": session_id}, headers={"Authorization": f"Bearer {provider_key}"})
152
+ response.raise_for_status()
153
+ resp_json = response.json()
154
+ ai_text = resp_json["choices"][0]["message"]["content"] if resp_json.get("choices") and resp_json["choices"][0].get("message") and resp_json["choices"][0]["message"].get("content") else RESPONSES["RESPONSE_2"]
155
+ return process_ai_response(ai_text)
156
  except Exception:
157
  marked_item(provider_key, LINUX_SERVER_PROVIDER_KEYS_MARKED, LINUX_SERVER_PROVIDER_KEYS_ATTEMPTS)
 
158
  raise
159
 
160
+ async def chat_with_model_async(history, user_input, selected_model_display, sess):
161
  global ACTIVE_CANDIDATE
162
  if not get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED) or not get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_MARKED):
163
  return RESPONSES["RESPONSE_3"]
 
170
  messages.append({"role": "user", "content": user_input})
171
  if ACTIVE_CANDIDATE is not None:
172
  try:
173
+ return await fetch_response_async(ACTIVE_CANDIDATE[0], ACTIVE_CANDIDATE[1], selected_model, messages, model_config, sess.session_id)
174
  except Exception:
175
  ACTIVE_CANDIDATE = None
176
  available_keys = get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED)
177
  available_servers = get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_MARKED)
178
  candidates = [(host, key) for host in available_servers for key in available_keys]
179
  random.shuffle(candidates)
180
+ tasks = [fetch_response_async(host, key, selected_model, messages, model_config, sess.session_id) for host, key in candidates]
181
+ for task in asyncio.as_completed(tasks):
182
+ try:
183
+ result = await task
184
+ ACTIVE_CANDIDATE = next(((host, key) for host, key in candidates if host and key), None)
185
+ return result
186
+ except Exception:
187
+ continue
 
 
 
 
188
  return RESPONSES["RESPONSE_2"]
189
 
190
+ async def respond_async(multi_input, history, selected_model_display, sess):
191
  message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
192
  if not message["text"] and not message["files"]:
193
  yield history, gr.MultimodalTextbox(value=None, interactive=True), sess
 
200
  if message["text"]:
201
  combined_input += message["text"]
202
  history.append([combined_input, ""])
203
+ ai_response = await chat_with_model_async(history, combined_input, selected_model_display, sess)
204
  history[-1][1] = ""
205
  def convert_to_string(data):
206
  if isinstance(data, (str, int, float)):
 
215
  return repr(data)
216
  for character in ai_response:
217
  history[-1][1] += convert_to_string(character)
218
+ await asyncio.sleep(0.0001)
219
  yield history, gr.MultimodalTextbox(value=None, interactive=True), sess
220
 
221
  def change_model(new_model_display):
 
229
  model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
230
  with gr.Row():
231
  msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
 
232
  model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, user_session, selected_model])
233
+ msg.submit(fn=respond_async, inputs=[msg, user_history, selected_model, user_session], outputs=[chatbot, msg, user_session])
 
234
  jarvis.launch(show_api=False, max_file_size="1mb")
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
  gradio
2
  huggingface_hub
 
3
  openai
4
  optillm
5
  pandas
 
1
  gradio
2
  huggingface_hub
3
+ httpx
4
  openai
5
  optillm
6
  pandas