Update ui/ui_core.py
Browse files- ui/ui_core.py +5 -6
ui/ui_core.py
CHANGED
@@ -134,6 +134,9 @@ def create_ui(agent: TxAgent):
|
|
134 |
chunks = chunk_text(extracted_text.strip())
|
135 |
|
136 |
def process_chunk(i, chunk):
|
|
|
|
|
|
|
137 |
chunked_prompt = (
|
138 |
f"{context}\n\n--- Uploaded File Content (Chunk {i+1}/{len(chunks)}) ---\n\n{chunk}\n\n"
|
139 |
f"--- End of Chunk ---\n\nNow begin your analysis:"
|
@@ -153,7 +156,6 @@ def create_ui(agent: TxAgent):
|
|
153 |
result = ""
|
154 |
for update in generator:
|
155 |
if update is None:
|
156 |
-
print(f"[Warning] Empty response in chunk {i+1}")
|
157 |
continue
|
158 |
if isinstance(update, str):
|
159 |
result += update
|
@@ -166,10 +168,7 @@ def create_ui(agent: TxAgent):
|
|
166 |
print(f"[Error in chunk {i+1}] {err}")
|
167 |
return f"[Chunk {i+1}] ❌ Failed to process due to error."
|
168 |
|
169 |
-
|
170 |
-
futures = [executor.submit(process_chunk, i, chunk) for i, chunk in enumerate(chunks)]
|
171 |
-
results = [f.result() for f in as_completed(futures)]
|
172 |
-
|
173 |
full_response = "\n\n".join(results)
|
174 |
full_response = clean_final_response(full_response.strip())
|
175 |
history[-1] = {"role": "assistant", "content": full_response}
|
@@ -190,4 +189,4 @@ def create_ui(agent: TxAgent):
|
|
190 |
["Is there anything abnormal in the attached blood work report?"]
|
191 |
], inputs=message_input)
|
192 |
|
193 |
-
return demo
|
|
|
134 |
chunks = chunk_text(extracted_text.strip())
|
135 |
|
136 |
def process_chunk(i, chunk):
|
137 |
+
if len(chunk.split()) > 8192:
|
138 |
+
return f"[Chunk {i+1}] ⚠️ Skipped: input exceeds model limit."
|
139 |
+
|
140 |
chunked_prompt = (
|
141 |
f"{context}\n\n--- Uploaded File Content (Chunk {i+1}/{len(chunks)}) ---\n\n{chunk}\n\n"
|
142 |
f"--- End of Chunk ---\n\nNow begin your analysis:"
|
|
|
156 |
result = ""
|
157 |
for update in generator:
|
158 |
if update is None:
|
|
|
159 |
continue
|
160 |
if isinstance(update, str):
|
161 |
result += update
|
|
|
168 |
print(f"[Error in chunk {i+1}] {err}")
|
169 |
return f"[Chunk {i+1}] ❌ Failed to process due to error."
|
170 |
|
171 |
+
results = [process_chunk(i, chunk) for i, chunk in enumerate(chunks)]
|
|
|
|
|
|
|
172 |
full_response = "\n\n".join(results)
|
173 |
full_response = clean_final_response(full_response.strip())
|
174 |
history[-1] = {"role": "assistant", "content": full_response}
|
|
|
189 |
["Is there anything abnormal in the attached blood work report?"]
|
190 |
], inputs=message_input)
|
191 |
|
192 |
+
return demo
|