fixup! ai: Switch to stable server-side configuration.
Browse files
jarvis.py
CHANGED
@@ -68,11 +68,6 @@ def ensure_stop_event(sess):
|
|
68 |
if not hasattr(sess, "stop_event"):
|
69 |
sess.stop_event = asyncio.Event()
|
70 |
|
71 |
-
def get_available_items(items, marked):
|
72 |
-
a = [i for i in items if i not in marked]
|
73 |
-
random.shuffle(a)
|
74 |
-
return a
|
75 |
-
|
76 |
def marked_item(item, marked, attempts):
|
77 |
marked.add(item)
|
78 |
attempts[item] = attempts.get(item, 0) + 1
|
@@ -237,7 +232,7 @@ async def fetch_response_stream_async(host, key, model, msgs, cfg, sid, stop_eve
|
|
237 |
async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt):
|
238 |
ensure_stop_event(sess)
|
239 |
sess.stop_event.clear()
|
240 |
-
if not
|
241 |
yield ("content", RESPONSES["RESPONSE_3"])
|
242 |
return
|
243 |
if not hasattr(sess, "session_id") or not sess.session_id:
|
@@ -257,26 +252,33 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
257 |
return
|
258 |
yield chunk
|
259 |
return
|
260 |
-
|
|
|
|
|
261 |
hosts = list(LINUX_SERVER_HOSTS)
|
262 |
random.shuffle(keys)
|
263 |
random.shuffle(hosts)
|
264 |
for k in keys:
|
265 |
for h in hosts:
|
|
|
266 |
stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id, sess.stop_event)
|
267 |
-
|
268 |
-
|
269 |
async for chunk in stream_gen:
|
270 |
if sess.stop_event.is_set():
|
271 |
return
|
272 |
-
if not
|
273 |
-
|
274 |
sess.active_candidate = (h, k)
|
275 |
-
|
276 |
yield chunk
|
277 |
-
if
|
|
|
278 |
return
|
279 |
-
|
|
|
|
|
|
|
280 |
|
281 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
282 |
ensure_stop_event(sess)
|
|
|
68 |
if not hasattr(sess, "stop_event"):
|
69 |
sess.stop_event = asyncio.Event()
|
70 |
|
|
|
|
|
|
|
|
|
|
|
71 |
def marked_item(item, marked, attempts):
|
72 |
marked.add(item)
|
73 |
attempts[item] = attempts.get(item, 0) + 1
|
|
|
232 |
async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt):
|
233 |
ensure_stop_event(sess)
|
234 |
sess.stop_event.clear()
|
235 |
+
if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
|
236 |
yield ("content", RESPONSES["RESPONSE_3"])
|
237 |
return
|
238 |
if not hasattr(sess, "session_id") or not sess.session_id:
|
|
|
252 |
return
|
253 |
yield chunk
|
254 |
return
|
255 |
+
jarvis = False
|
256 |
+
responses_success = False
|
257 |
+
keys = list(LINUX_SERVER_PROVIDER_KEYS)
|
258 |
hosts = list(LINUX_SERVER_HOSTS)
|
259 |
random.shuffle(keys)
|
260 |
random.shuffle(hosts)
|
261 |
for k in keys:
|
262 |
for h in hosts:
|
263 |
+
jarvis = True
|
264 |
stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id, sess.stop_event)
|
265 |
+
responses = ""
|
266 |
+
got_responses = False
|
267 |
async for chunk in stream_gen:
|
268 |
if sess.stop_event.is_set():
|
269 |
return
|
270 |
+
if not got_responses:
|
271 |
+
got_responses = True
|
272 |
sess.active_candidate = (h, k)
|
273 |
+
responses += chunk[1]
|
274 |
yield chunk
|
275 |
+
if got_responses and responses.strip():
|
276 |
+
responses_success = True
|
277 |
return
|
278 |
+
if not jarvis:
|
279 |
+
yield ("content", RESPONSES["RESPONSE_3"])
|
280 |
+
elif not responses_success:
|
281 |
+
yield ("content", RESPONSES["RESPONSE_2"])
|
282 |
|
283 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
284 |
ensure_stop_event(sess)
|