ai: Hide reasoning status after complete.
Browse files
jarvis.py
CHANGED
@@ -218,14 +218,12 @@ async def fetch_response_stream_async(host, key, model, msgs, cfg, sid, stop_eve
|
|
218 |
try:
|
219 |
j = json.loads(data)
|
220 |
if isinstance(j, dict) and j.get("choices"):
|
221 |
-
ch
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
if content:
|
228 |
-
yield content
|
229 |
except:
|
230 |
continue
|
231 |
except:
|
@@ -236,7 +234,7 @@ async def fetch_response_stream_async(host, key, model, msgs, cfg, sid, stop_eve
|
|
236 |
async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt):
|
237 |
ensure_stop_event(sess)
|
238 |
if not get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED) or not get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_ATTEMPTS):
|
239 |
-
yield RESPONSES["RESPONSE_3"]
|
240 |
return
|
241 |
if not hasattr(sess, "session_id") or not sess.session_id:
|
242 |
sess.session_id = str(uuid.uuid4())
|
@@ -270,11 +268,11 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
270 |
if not got_any:
|
271 |
got_any = True
|
272 |
sess.active_candidate = (h, k)
|
273 |
-
full_text += chunk
|
274 |
yield chunk
|
275 |
if got_any and full_text:
|
276 |
return
|
277 |
-
yield RESPONSES["RESPONSE_2"]
|
278 |
|
279 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
280 |
ensure_stop_event(sess)
|
@@ -293,15 +291,26 @@ async def respond_async(multi, history, model_display, sess, custom_prompt):
|
|
293 |
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
294 |
queue = asyncio.Queue()
|
295 |
async def background():
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
await queue.put(None)
|
301 |
-
return
|
302 |
bg_task = asyncio.create_task(background())
|
303 |
stop_task = asyncio.create_task(sess.stop_event.wait())
|
304 |
-
|
305 |
try:
|
306 |
while True:
|
307 |
done, _ = await asyncio.wait({stop_task, asyncio.create_task(queue.get())}, return_when=asyncio.FIRST_COMPLETED)
|
@@ -312,15 +321,11 @@ async def respond_async(multi, history, model_display, sess, custom_prompt):
|
|
312 |
sess.stop_event.clear()
|
313 |
return
|
314 |
for d in done:
|
315 |
-
|
316 |
-
if
|
317 |
raise StopAsyncIteration
|
318 |
-
|
319 |
-
|
320 |
-
history[-1][1] = chunk
|
321 |
-
first_meaningful_chunk_found = True
|
322 |
-
else:
|
323 |
-
history[-1][1] += chunk
|
324 |
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
325 |
except StopAsyncIteration:
|
326 |
pass
|
|
|
218 |
try:
|
219 |
j = json.loads(data)
|
220 |
if isinstance(j, dict) and j.get("choices"):
|
221 |
+
for ch in j["choices"]:
|
222 |
+
delta = ch.get("delta", {})
|
223 |
+
if "reasoning" in delta and delta["reasoning"] is not None and delta["reasoning"] != "":
|
224 |
+
yield ("reasoning", delta["reasoning"])
|
225 |
+
if "content" in delta and delta["content"] is not None and delta["content"] != "":
|
226 |
+
yield ("content", delta["content"])
|
|
|
|
|
227 |
except:
|
228 |
continue
|
229 |
except:
|
|
|
234 |
async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt):
|
235 |
ensure_stop_event(sess)
|
236 |
if not get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED) or not get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_ATTEMPTS):
|
237 |
+
yield ("content", RESPONSES["RESPONSE_3"])
|
238 |
return
|
239 |
if not hasattr(sess, "session_id") or not sess.session_id:
|
240 |
sess.session_id = str(uuid.uuid4())
|
|
|
268 |
if not got_any:
|
269 |
got_any = True
|
270 |
sess.active_candidate = (h, k)
|
271 |
+
full_text += chunk[1]
|
272 |
yield chunk
|
273 |
if got_any and full_text:
|
274 |
return
|
275 |
+
yield ("content", RESPONSES["RESPONSE_2"])
|
276 |
|
277 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
278 |
ensure_stop_event(sess)
|
|
|
291 |
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
292 |
queue = asyncio.Queue()
|
293 |
async def background():
|
294 |
+
display_text = ""
|
295 |
+
content_started = False
|
296 |
+
async for typ, chunk in chat_with_model_async(history, inp, model_display, sess, custom_prompt):
|
297 |
+
if typ == "reasoning":
|
298 |
+
if not content_started:
|
299 |
+
display_text += chunk
|
300 |
+
await queue.put(("set", display_text))
|
301 |
+
else:
|
302 |
+
if not content_started:
|
303 |
+
content_started = True
|
304 |
+
display_text = chunk
|
305 |
+
await queue.put(("replace", display_text))
|
306 |
+
else:
|
307 |
+
display_text += chunk
|
308 |
+
await queue.put(("append", display_text))
|
309 |
await queue.put(None)
|
310 |
+
return display_text
|
311 |
bg_task = asyncio.create_task(background())
|
312 |
stop_task = asyncio.create_task(sess.stop_event.wait())
|
313 |
+
first_update = False
|
314 |
try:
|
315 |
while True:
|
316 |
done, _ = await asyncio.wait({stop_task, asyncio.create_task(queue.get())}, return_when=asyncio.FIRST_COMPLETED)
|
|
|
321 |
sess.stop_event.clear()
|
322 |
return
|
323 |
for d in done:
|
324 |
+
result = d.result()
|
325 |
+
if result is None:
|
326 |
raise StopAsyncIteration
|
327 |
+
action, text = result
|
328 |
+
history[-1][1] = text
|
|
|
|
|
|
|
|
|
329 |
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
330 |
except StopAsyncIteration:
|
331 |
pass
|