asebaq commited on
Commit
42e14cf
·
1 Parent(s): 904ceaa

Fix gradio ui error

Browse files
Files changed (1) hide show
  1. Gradio_UI.py +70 -24
Gradio_UI.py CHANGED
@@ -19,7 +19,12 @@ import re
19
  import shutil
20
  from typing import Optional
21
 
22
- from smolagents.agent_types import AgentAudio, AgentImage, AgentText, handle_agent_output_types
 
 
 
 
 
23
  from smolagents.agents import ActionStep, MultiStepAgent
24
  from smolagents.memory import MemoryStep
25
  from smolagents.utils import _is_package_available
@@ -33,7 +38,9 @@ def pull_messages_from_step(
33
 
34
  if isinstance(step_log, ActionStep):
35
  # Output the step number
36
- step_number = f"Step {step_log.step_number}" if step_log.step_number is not None else ""
 
 
37
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
38
 
39
  # First yield the thought/reasoning from the LLM
@@ -41,9 +48,15 @@ def pull_messages_from_step(
41
  # Clean up the LLM output
42
  model_output = step_log.model_output.strip()
43
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
44
- model_output = re.sub(r"```\s*<end_code>", "```", model_output) # handles ```<end_code>
45
- model_output = re.sub(r"<end_code>\s*```", "```", model_output) # handles <end_code>```
46
- model_output = re.sub(r"```\s*\n\s*<end_code>", "```", model_output) # handles ```\n<end_code>
 
 
 
 
 
 
47
  model_output = model_output.strip()
48
  yield gr.ChatMessage(role="assistant", content=model_output)
49
 
@@ -63,8 +76,12 @@ def pull_messages_from_step(
63
 
64
  if used_code:
65
  # Clean up the content by removing any end code tags
66
- content = re.sub(r"```.*?\n", "", content) # Remove existing code blocks
67
- content = re.sub(r"\s*<end_code>\s*", "", content) # Remove end_code tags
 
 
 
 
68
  content = content.strip()
69
  if not content.startswith("```python"):
70
  content = f"```python\n{content}\n```"
@@ -90,7 +107,11 @@ def pull_messages_from_step(
90
  yield gr.ChatMessage(
91
  role="assistant",
92
  content=f"{log_content}",
93
- metadata={"title": "📝 Execution Logs", "parent_id": parent_id, "status": "done"},
 
 
 
 
94
  )
95
 
96
  # Nesting any errors under the tool call
@@ -98,7 +119,11 @@ def pull_messages_from_step(
98
  yield gr.ChatMessage(
99
  role="assistant",
100
  content=str(step_log.error),
101
- metadata={"title": "💥 Error", "parent_id": parent_id, "status": "done"},
 
 
 
 
102
  )
103
 
104
  # Update parent message metadata to done status without yielding a new message
@@ -106,17 +131,25 @@ def pull_messages_from_step(
106
 
107
  # Handle standalone errors but not from tool calls
108
  elif hasattr(step_log, "error") and step_log.error is not None:
109
- yield gr.ChatMessage(role="assistant", content=str(step_log.error), metadata={"title": "💥 Error"})
 
 
 
 
110
 
111
  # Calculate duration and token information
112
  step_footnote = f"{step_number}"
113
- if hasattr(step_log, "input_token_count") and hasattr(step_log, "output_token_count"):
114
- token_str = (
115
- f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
116
- )
117
  step_footnote += token_str
118
  if hasattr(step_log, "duration"):
119
- step_duration = f" | Duration: {round(float(step_log.duration), 2)}" if step_log.duration else None
 
 
 
 
120
  step_footnote += step_duration
121
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
122
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
@@ -139,15 +172,20 @@ def stream_to_gradio(
139
  total_input_tokens = 0
140
  total_output_tokens = 0
141
 
142
- for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
 
 
143
  # Track tokens if model provides them
144
- if hasattr(agent.model, "last_input_token_count"):
 
 
 
145
  total_input_tokens += agent.model.last_input_token_count
146
- total_output_tokens += agent.model.last_output_token_count
 
147
  if isinstance(step_log, ActionStep):
148
  step_log.input_token_count = agent.model.last_input_token_count
149
  step_log.output_token_count = agent.model.last_output_token_count
150
-
151
  for message in pull_messages_from_step(
152
  step_log,
153
  ):
@@ -172,7 +210,9 @@ def stream_to_gradio(
172
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
173
  )
174
  else:
175
- yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
 
 
176
 
177
 
178
  class GradioUI:
@@ -242,10 +282,14 @@ class GradioUI:
242
  sanitized_name = "".join(sanitized_name)
243
 
244
  # Save the uploaded file to the specified folder
245
- file_path = os.path.join(self.file_upload_folder, os.path.basename(sanitized_name))
 
 
246
  shutil.copy(file.name, file_path)
247
 
248
- return gr.Textbox(f"File uploaded: {file_path}", visible=True), file_uploads_log + [file_path]
 
 
249
 
250
  def log_user_message(self, text_input, file_uploads_log):
251
  return (
@@ -277,7 +321,9 @@ class GradioUI:
277
  # If an upload folder is provided, enable the upload feature
278
  if self.file_upload_folder is not None:
279
  upload_file = gr.File(label="Upload a file")
280
- upload_status = gr.Textbox(label="Upload Status", interactive=False, visible=False)
 
 
281
  upload_file.change(
282
  self.upload_file,
283
  [upload_file, file_uploads_log],
@@ -293,4 +339,4 @@ class GradioUI:
293
  demo.launch(debug=True, share=True, **kwargs)
294
 
295
 
296
- __all__ = ["stream_to_gradio", "GradioUI"]
 
19
  import shutil
20
  from typing import Optional
21
 
22
+ from smolagents.agent_types import (
23
+ AgentAudio,
24
+ AgentImage,
25
+ AgentText,
26
+ handle_agent_output_types,
27
+ )
28
  from smolagents.agents import ActionStep, MultiStepAgent
29
  from smolagents.memory import MemoryStep
30
  from smolagents.utils import _is_package_available
 
38
 
39
  if isinstance(step_log, ActionStep):
40
  # Output the step number
41
+ step_number = (
42
+ f"Step {step_log.step_number}" if step_log.step_number is not None else ""
43
+ )
44
  yield gr.ChatMessage(role="assistant", content=f"**{step_number}**")
45
 
46
  # First yield the thought/reasoning from the LLM
 
48
  # Clean up the LLM output
49
  model_output = step_log.model_output.strip()
50
  # Remove any trailing <end_code> and extra backticks, handling multiple possible formats
51
+ model_output = re.sub(
52
+ r"```\s*<end_code>", "```", model_output
53
+ ) # handles ```<end_code>
54
+ model_output = re.sub(
55
+ r"<end_code>\s*```", "```", model_output
56
+ ) # handles <end_code>```
57
+ model_output = re.sub(
58
+ r"```\s*\n\s*<end_code>", "```", model_output
59
+ ) # handles ```\n<end_code>
60
  model_output = model_output.strip()
61
  yield gr.ChatMessage(role="assistant", content=model_output)
62
 
 
76
 
77
  if used_code:
78
  # Clean up the content by removing any end code tags
79
+ content = re.sub(
80
+ r"```.*?\n", "", content
81
+ ) # Remove existing code blocks
82
+ content = re.sub(
83
+ r"\s*<end_code>\s*", "", content
84
+ ) # Remove end_code tags
85
  content = content.strip()
86
  if not content.startswith("```python"):
87
  content = f"```python\n{content}\n```"
 
107
  yield gr.ChatMessage(
108
  role="assistant",
109
  content=f"{log_content}",
110
+ metadata={
111
+ "title": "📝 Execution Logs",
112
+ "parent_id": parent_id,
113
+ "status": "done",
114
+ },
115
  )
116
 
117
  # Nesting any errors under the tool call
 
119
  yield gr.ChatMessage(
120
  role="assistant",
121
  content=str(step_log.error),
122
+ metadata={
123
+ "title": "💥 Error",
124
+ "parent_id": parent_id,
125
+ "status": "done",
126
+ },
127
  )
128
 
129
  # Update parent message metadata to done status without yielding a new message
 
131
 
132
  # Handle standalone errors but not from tool calls
133
  elif hasattr(step_log, "error") and step_log.error is not None:
134
+ yield gr.ChatMessage(
135
+ role="assistant",
136
+ content=str(step_log.error),
137
+ metadata={"title": "💥 Error"},
138
+ )
139
 
140
  # Calculate duration and token information
141
  step_footnote = f"{step_number}"
142
+ if hasattr(step_log, "input_token_count") and hasattr(
143
+ step_log, "output_token_count"
144
+ ):
145
+ token_str = f" | Input-tokens:{step_log.input_token_count:,} | Output-tokens:{step_log.output_token_count:,}"
146
  step_footnote += token_str
147
  if hasattr(step_log, "duration"):
148
+ step_duration = (
149
+ f" | Duration: {round(float(step_log.duration), 2)}"
150
+ if step_log.duration
151
+ else None
152
+ )
153
  step_footnote += step_duration
154
  step_footnote = f"""<span style="color: #bbbbc2; font-size: 12px;">{step_footnote}</span> """
155
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
 
172
  total_input_tokens = 0
173
  total_output_tokens = 0
174
 
175
+ for step_log in agent.run(
176
+ task, stream=True, reset=reset_agent_memory, additional_args=additional_args
177
+ ):
178
  # Track tokens if model provides them
179
+ if (
180
+ hasattr(agent.model, "last_input_token_count")
181
+ and agent.model.last_input_token_count is not None
182
+ ):
183
  total_input_tokens += agent.model.last_input_token_count
184
+ if agent.model.last_output_token_count is not None:
185
+ total_output_tokens += agent.model.last_output_token_count
186
  if isinstance(step_log, ActionStep):
187
  step_log.input_token_count = agent.model.last_input_token_count
188
  step_log.output_token_count = agent.model.last_output_token_count
 
189
  for message in pull_messages_from_step(
190
  step_log,
191
  ):
 
210
  content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
211
  )
212
  else:
213
+ yield gr.ChatMessage(
214
+ role="assistant", content=f"**Final answer:** {str(final_answer)}"
215
+ )
216
 
217
 
218
  class GradioUI:
 
282
  sanitized_name = "".join(sanitized_name)
283
 
284
  # Save the uploaded file to the specified folder
285
+ file_path = os.path.join(
286
+ self.file_upload_folder, os.path.basename(sanitized_name)
287
+ )
288
  shutil.copy(file.name, file_path)
289
 
290
+ return gr.Textbox(
291
+ f"File uploaded: {file_path}", visible=True
292
+ ), file_uploads_log + [file_path]
293
 
294
  def log_user_message(self, text_input, file_uploads_log):
295
  return (
 
321
  # If an upload folder is provided, enable the upload feature
322
  if self.file_upload_folder is not None:
323
  upload_file = gr.File(label="Upload a file")
324
+ upload_status = gr.Textbox(
325
+ label="Upload Status", interactive=False, visible=False
326
+ )
327
  upload_file.change(
328
  self.upload_file,
329
  [upload_file, file_uploads_log],
 
339
  demo.launch(debug=True, share=True, **kwargs)
340
 
341
 
342
+ __all__ = ["stream_to_gradio", "GradioUI"]