Update ui/ui_core.py
Browse files- ui/ui_core.py +38 -36
ui/ui_core.py
CHANGED
@@ -75,7 +75,7 @@ def convert_file_to_json(file_path: str, file_type: str) -> str:
|
|
75 |
except Exception as e:
|
76 |
return json.dumps({"error": f"Error reading {os.path.basename(file_path)}: {str(e)}"})
|
77 |
|
78 |
-
def chunk_text(text: str, max_tokens: int =
|
79 |
chunks = []
|
80 |
words = text.split()
|
81 |
chunk = []
|
@@ -94,9 +94,9 @@ def chunk_text(text: str, max_tokens: int = 8192) -> List[str]:
|
|
94 |
|
95 |
def create_ui(agent: TxAgent):
|
96 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
97 |
-
gr.Markdown("<h1 style='text-align: center;'
|
98 |
|
99 |
-
chatbot = gr.Chatbot(label="CPS Assistant", height=600, type="
|
100 |
file_upload = gr.File(
|
101 |
label="Upload Medical File",
|
102 |
file_types=[".pdf", ".txt", ".docx", ".jpg", ".png", ".csv", ".xls", ".xlsx"],
|
@@ -117,18 +117,18 @@ def create_ui(agent: TxAgent):
|
|
117 |
)
|
118 |
|
119 |
try:
|
120 |
-
history.append((
|
|
|
121 |
yield history
|
122 |
|
123 |
extracted_text = ""
|
124 |
if uploaded_files and isinstance(uploaded_files, list):
|
125 |
-
|
126 |
-
for index, file in enumerate(uploaded_files):
|
127 |
if not hasattr(file, 'name'):
|
128 |
continue
|
129 |
path = file.name
|
130 |
-
|
131 |
-
json_text = convert_file_to_json(path,
|
132 |
extracted_text += sanitize_utf8(json_text) + "\n"
|
133 |
|
134 |
chunks = chunk_text(extracted_text.strip())
|
@@ -138,44 +138,46 @@ def create_ui(agent: TxAgent):
|
|
138 |
f"{context}\n\n--- Uploaded File Content (Chunk {i+1}/{len(chunks)}) ---\n\n{chunk}\n\n"
|
139 |
f"--- End of Chunk ---\n\nNow begin your analysis:"
|
140 |
)
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
with ThreadPoolExecutor(max_workers=min(8, len(chunks))) as executor:
|
164 |
futures = [executor.submit(process_chunk, i, chunk) for i, chunk in enumerate(chunks)]
|
165 |
results = [f.result() for f in as_completed(futures)]
|
166 |
|
167 |
full_response = "\n\n".join(results)
|
168 |
full_response = clean_final_response(full_response.strip())
|
169 |
-
history[-1] = (
|
170 |
yield history
|
171 |
|
172 |
except Exception as chat_error:
|
173 |
print(f"Chat handling error: {chat_error}")
|
174 |
-
|
175 |
-
if len(history) > 0 and history[-1][1].startswith("⏳"):
|
176 |
-
history[-1] = (history[-1][0], error_msg)
|
177 |
-
else:
|
178 |
-
history.append((message, error_msg))
|
179 |
yield history
|
180 |
|
181 |
inputs = [message_input, chatbot, conversation_state, file_upload]
|
|
|
75 |
except Exception as e:
|
76 |
return json.dumps({"error": f"Error reading {os.path.basename(file_path)}: {str(e)}"})
|
77 |
|
78 |
+
def chunk_text(text: str, max_tokens: int = 6000) -> List[str]:
|
79 |
chunks = []
|
80 |
words = text.split()
|
81 |
chunk = []
|
|
|
94 |
|
95 |
def create_ui(agent: TxAgent):
|
96 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
97 |
+
gr.Markdown("<h1 style='text-align: center;'>📋 CPS: Clinical Patient Support System</h1>")
|
98 |
|
99 |
+
chatbot = gr.Chatbot(label="CPS Assistant", height=600, type="messages")
|
100 |
file_upload = gr.File(
|
101 |
label="Upload Medical File",
|
102 |
file_types=[".pdf", ".txt", ".docx", ".jpg", ".png", ".csv", ".xls", ".xlsx"],
|
|
|
117 |
)
|
118 |
|
119 |
try:
|
120 |
+
history.append(("user", message))
|
121 |
+
history.append(("assistant", "⏳ Processing your request..."))
|
122 |
yield history
|
123 |
|
124 |
extracted_text = ""
|
125 |
if uploaded_files and isinstance(uploaded_files, list):
|
126 |
+
for file in uploaded_files:
|
|
|
127 |
if not hasattr(file, 'name'):
|
128 |
continue
|
129 |
path = file.name
|
130 |
+
ext = path.split(".")[-1].lower()
|
131 |
+
json_text = convert_file_to_json(path, ext)
|
132 |
extracted_text += sanitize_utf8(json_text) + "\n"
|
133 |
|
134 |
chunks = chunk_text(extracted_text.strip())
|
|
|
138 |
f"{context}\n\n--- Uploaded File Content (Chunk {i+1}/{len(chunks)}) ---\n\n{chunk}\n\n"
|
139 |
f"--- End of Chunk ---\n\nNow begin your analysis:"
|
140 |
)
|
141 |
+
try:
|
142 |
+
generator = agent.run_gradio_chat(
|
143 |
+
message=chunked_prompt,
|
144 |
+
history=[],
|
145 |
+
temperature=0.3,
|
146 |
+
max_new_tokens=1024,
|
147 |
+
max_token=8192,
|
148 |
+
call_agent=False,
|
149 |
+
conversation=conversation,
|
150 |
+
uploaded_files=uploaded_files,
|
151 |
+
max_round=30
|
152 |
+
)
|
153 |
+
result = ""
|
154 |
+
for update in generator:
|
155 |
+
if update is None:
|
156 |
+
print(f"[Warning] Empty response in chunk {i+1}")
|
157 |
+
continue
|
158 |
+
if isinstance(update, str):
|
159 |
+
result += update
|
160 |
+
elif isinstance(update, list):
|
161 |
+
for msg in update:
|
162 |
+
if hasattr(msg, 'content'):
|
163 |
+
result += msg.content
|
164 |
+
return result if result.strip() else f"[Chunk {i+1}] ⚠️ No response received."
|
165 |
+
except Exception as err:
|
166 |
+
print(f"[Error in chunk {i+1}] {err}")
|
167 |
+
return f"[Chunk {i+1}] ❌ Failed to process due to error."
|
168 |
+
|
169 |
with ThreadPoolExecutor(max_workers=min(8, len(chunks))) as executor:
|
170 |
futures = [executor.submit(process_chunk, i, chunk) for i, chunk in enumerate(chunks)]
|
171 |
results = [f.result() for f in as_completed(futures)]
|
172 |
|
173 |
full_response = "\n\n".join(results)
|
174 |
full_response = clean_final_response(full_response.strip())
|
175 |
+
history[-1] = ("assistant", full_response)
|
176 |
yield history
|
177 |
|
178 |
except Exception as chat_error:
|
179 |
print(f"Chat handling error: {chat_error}")
|
180 |
+
history[-1] = ("assistant", "❌ An error occurred while processing your request.")
|
|
|
|
|
|
|
|
|
181 |
yield history
|
182 |
|
183 |
inputs = [message_input, chatbot, conversation_state, file_upload]
|