hadadrjt commited on
Commit
af424b9
·
1 Parent(s): 4901fb7

ai: Implementing gradio multimodal textbox.

Browse files
Files changed (1) hide show
  1. app.py +62 -141
app.py CHANGED
@@ -29,6 +29,9 @@ import fitz
29
  import io
30
  from pathlib import Path
31
  from PIL import Image
 
 
 
32
 
33
  LINUX_SERVER_HOSTS = [host for host in json.loads(os.getenv("LINUX_SERVER_HOST", "[]")) if host]
34
  LINUX_SERVER_PROVIDER_KEYS = [key for key in json.loads(os.getenv("LINUX_SERVER_PROVIDER_KEY", "[]")) if key]
@@ -45,196 +48,114 @@ META_TAGS = os.getenv("META_TAGS")
45
 
46
  ALLOWED_EXTENSIONS = json.loads(os.getenv("ALLOWED_EXTENSIONS"))
47
 
48
- stop_event = threading.Event()
49
  session = requests.Session()
50
 
51
  def get_model_key(display_name):
52
  return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
53
 
54
- def extract_text(file_path):
55
- ext = Path(file_path).suffix.lower()
56
-
57
- if ext == ".txt":
58
- try:
59
- with open(file_path, "r", encoding="utf-8") as file:
60
- return file.read()
61
- except:
62
- return ""
63
 
64
- elif ext == ".pdf":
65
- text = []
66
- try:
 
 
67
  with pdfplumber.open(file_path) as pdf:
68
  for page in pdf.pages:
69
- text.append(page.extract_text() or "")
70
- if not "".join(text).strip():
71
- text = extract_text_from_pdf_images(file_path)
72
- except:
73
- return ""
74
- return "\n".join(text)
75
-
76
- elif ext in [".doc", ".docx"]:
77
- try:
78
  doc = docx.Document(file_path)
79
- text = "\n".join([para.text for para in doc.paragraphs])
80
- if not text.strip():
81
- text = extract_text_from_doc_images(file_path)
82
- return text
83
- except:
84
- return ""
85
-
86
- elif ext in [".xls", ".xlsx"]:
87
- try:
88
  df = pd.read_excel(file_path)
89
- return df.to_string()
90
- except:
91
- return ""
92
-
93
- elif ext in [".ppt", ".pptx"]:
94
- try:
95
- prs = pptx.Presentation(file_path)
96
- text = []
97
  for slide in prs.slides:
98
  for shape in slide.shapes:
99
- if hasattr(shape, "text"):
100
- text.append(shape.text)
101
- return "\n".join(text)
102
- except:
103
- return ""
104
-
105
- return ""
106
-
107
- def extract_text_from_pdf_images(pdf_path):
108
- text = []
109
- try:
110
- doc = fitz.open(pdf_path)
111
- for page_num in range(len(doc)):
112
- pix = doc[page_num].get_pixmap()
113
- img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
114
- text.append(pytesseract.image_to_string(img))
115
- except:
116
- return []
117
- return text
118
-
119
- def extract_text_from_doc_images(doc_path):
120
- text = []
121
- try:
122
- doc = docx.Document(doc_path)
123
- for rel in doc.part.rels:
124
- if "image" in doc.part.rels[rel].target_ref:
125
- img_data = doc.part.rels[rel].target_part.blob
126
- img = Image.open(io.BytesIO(img_data))
127
- text.append(pytesseract.image_to_string(img))
128
- except:
129
- return []
130
- return "\n".join(text)
131
-
132
- def simulate_streaming_response(text):
133
- for line in text.splitlines():
134
- if stop_event.is_set():
135
- return
136
- yield line + "\n"
137
- time.sleep(0.05)
138
 
139
  def chat_with_model(history, user_input, selected_model_display):
140
- if stop_event.is_set():
141
- yield RESPONSES["RESPONSE_1"]
142
- return
143
-
144
  if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
145
  yield RESPONSES["RESPONSE_3"]
146
  return
147
-
148
  selected_model = get_model_key(selected_model_display)
149
  model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
150
-
151
  messages = [{"role": "user", "content": user} for user, _ in history]
152
  messages += [{"role": "assistant", "content": assistant} for _, assistant in history if assistant]
153
  messages.append({"role": "user", "content": user_input})
154
-
155
  data = {"model": selected_model, "messages": messages, **model_config}
156
-
157
  random.shuffle(LINUX_SERVER_PROVIDER_KEYS)
158
  random.shuffle(LINUX_SERVER_HOSTS)
159
-
160
  for api_key in LINUX_SERVER_PROVIDER_KEYS[:2]:
161
  for host in LINUX_SERVER_HOSTS[:2]:
162
- if stop_event.is_set():
163
- yield RESPONSES["RESPONSE_1"]
164
- return
165
  try:
166
  response = session.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"})
167
- if stop_event.is_set():
168
- yield RESPONSES["RESPONSE_1"]
169
- return
170
  if response.status_code < 400:
171
  ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
172
  yield from simulate_streaming_response(ai_text)
173
  return
174
  except requests.exceptions.RequestException:
175
  continue
176
-
177
  yield RESPONSES["RESPONSE_3"]
178
 
179
- def respond(user_input, file_path, history, selected_model_display):
180
- file_text = extract_text(file_path) if file_path else ""
181
- combined_input = f"{user_input}\n\n{file_text}".strip()
182
-
183
- if not combined_input:
184
- yield history, gr.update(value=""), gr.update(visible=False, interactive=False), gr.update(visible=True)
185
- return
186
-
187
- stop_event.clear()
188
- history.append([combined_input, RESPONSES["RESPONSE_8"]])
189
-
190
- yield history, gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
191
-
 
 
192
  ai_response = ""
193
  for chunk in chat_with_model(history, combined_input, selected_model_display):
194
- if stop_event.is_set():
195
- history[-1][1] = RESPONSES["RESPONSE_1"]
196
- yield history, gr.update(value=""), gr.update(visible=True), gr.update(visible=False)
197
- return
198
  ai_response += chunk
199
  history[-1][1] = ai_response
200
- yield history, gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
201
-
202
- yield history, gr.update(value=""), gr.update(visible=True), gr.update(visible=False)
203
-
204
- def stop_response():
205
- stop_event.set()
206
- session.close()
207
 
208
  def change_model(new_model_display):
209
  return [], new_model_display
210
 
211
- def file_type_validation(file):
212
- if file:
213
- ext = Path(file.name).suffix.lower()
214
- return ext in ALLOWED_EXTENSIONS
215
- return False
216
-
217
- def check_send_button_enabled(msg, file):
218
- is_file_valid = file_type_validation(file)
219
- return gr.update(interactive=bool(msg.strip()) or bool(file))
220
-
221
  with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as demo:
222
  user_history = gr.State([])
223
  selected_model = gr.State(MODEL_CHOICES[0])
224
-
225
- chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, show_share_button=False, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
226
- model_dropdown = gr.Dropdown(label=AI_TYPES["AI_TYPE_3"], show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0], interactive=True)
227
- msg = gr.Textbox(label=RESPONSES["RESPONSE_4"], show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True)
228
- send_btn = gr.Button(RESPONSES["RESPONSE_6"], visible=True, interactive=False)
229
- stop_btn = gr.Button(RESPONSES["RESPONSE_7"], variant=RESPONSES["RESPONSE_9"], visible=False)
230
-
231
- with gr.Accordion(AI_TYPES["AI_TYPE_6"], open=False):
232
- file_upload = gr.File(label=AI_TYPES["AI_TYPE_5"], file_count="single", type="filepath", file_types=ALLOWED_EXTENSIONS)
233
 
234
  model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, selected_model])
235
- send_btn.click(respond, inputs=[msg, file_upload, user_history, selected_model], outputs=[chatbot, msg, send_btn, stop_btn])
236
- msg.change(fn=check_send_button_enabled, inputs=[msg, file_upload], outputs=[send_btn])
237
- stop_btn.click(fn=stop_response, outputs=[send_btn, stop_btn])
238
- file_upload.change(fn=check_send_button_enabled, inputs=[msg, file_upload], outputs=[send_btn])
239
 
240
  demo.launch(show_api=False, max_file_size="1mb")
 
29
  import io
30
  from pathlib import Path
31
  from PIL import Image
32
+ from pptx import Presentation
33
+
34
+ os.system("apt-get update -q -y && apt-get install -q -y tesseract-ocr tesseract-ocr-eng tesseract-ocr-ind libleptonica-dev libtesseract-dev")
35
 
36
  LINUX_SERVER_HOSTS = [host for host in json.loads(os.getenv("LINUX_SERVER_HOST", "[]")) if host]
37
  LINUX_SERVER_PROVIDER_KEYS = [key for key in json.loads(os.getenv("LINUX_SERVER_PROVIDER_KEY", "[]")) if key]
 
48
 
49
  ALLOWED_EXTENSIONS = json.loads(os.getenv("ALLOWED_EXTENSIONS"))
50
 
 
51
  session = requests.Session()
52
 
53
  def get_model_key(display_name):
54
  return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
55
 
56
+ def simulate_streaming_response(text):
57
+ for line in text.splitlines():
58
+ yield line + "\n"
59
+ time.sleep(0.05)
 
 
 
 
 
60
 
61
+ def extract_file_content(file_path):
62
+ ext = Path(file_path).suffix.lower()
63
+ content = ""
64
+ try:
65
+ if ext == ".pdf":
66
  with pdfplumber.open(file_path) as pdf:
67
  for page in pdf.pages:
68
+ text = page.extract_text()
69
+ if text:
70
+ content += text + "\n"
71
+ tables = page.extract_tables()
72
+ if tables:
73
+ for table in tables:
74
+ table_str = "\n".join([", ".join(row) for row in table if row])
75
+ content += "\n" + table_str + "\n"
76
+ elif ext in [".doc", ".docx"]:
77
  doc = docx.Document(file_path)
78
+ for para in doc.paragraphs:
79
+ content += para.text + "\n"
80
+ elif ext in [".xlsx", ".xls"]:
 
 
 
 
 
 
81
  df = pd.read_excel(file_path)
82
+ content += df.to_csv(index=False)
83
+ elif ext in [".ppt", ".pptx"]:
84
+ prs = Presentation(file_path)
 
 
 
 
 
85
  for slide in prs.slides:
86
  for shape in slide.shapes:
87
+ if hasattr(shape, "text") and shape.text:
88
+ content += shape.text + "\n"
89
+ elif ext in [".png", ".jpg", ".jpeg", ".tiff", ".bmp", ".gif", ".webp"]:
90
+ try:
91
+ pytesseract.pytesseract.tesseract_cmd = "/usr/bin/tesseract"
92
+ image = Image.open(file_path)
93
+ text = pytesseract.image_to_string(image)
94
+ content += text + "\n"
95
+ except Exception as e:
96
+ content += f"{e}\n"
97
+ else:
98
+ content = Path(file_path).read_text(encoding="utf-8")
99
+ except Exception as e:
100
+ content = f"{file_path}: {e}"
101
+ return content.strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
 
103
  def chat_with_model(history, user_input, selected_model_display):
 
 
 
 
104
  if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
105
  yield RESPONSES["RESPONSE_3"]
106
  return
 
107
  selected_model = get_model_key(selected_model_display)
108
  model_config = MODEL_CONFIG.get(selected_model, DEFAULT_CONFIG)
 
109
  messages = [{"role": "user", "content": user} for user, _ in history]
110
  messages += [{"role": "assistant", "content": assistant} for _, assistant in history if assistant]
111
  messages.append({"role": "user", "content": user_input})
 
112
  data = {"model": selected_model, "messages": messages, **model_config}
 
113
  random.shuffle(LINUX_SERVER_PROVIDER_KEYS)
114
  random.shuffle(LINUX_SERVER_HOSTS)
 
115
  for api_key in LINUX_SERVER_PROVIDER_KEYS[:2]:
116
  for host in LINUX_SERVER_HOSTS[:2]:
 
 
 
117
  try:
118
  response = session.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"})
 
 
 
119
  if response.status_code < 400:
120
  ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
121
  yield from simulate_streaming_response(ai_text)
122
  return
123
  except requests.exceptions.RequestException:
124
  continue
 
125
  yield RESPONSES["RESPONSE_3"]
126
 
127
+ def respond(multi_input, history, selected_model_display):
128
+ message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
129
+ if not message["text"] and not message["files"]:
130
+ return history, gr.MultimodalTextbox(value=None, interactive=True)
131
+ combined_input = ""
132
+ for file_item in message["files"]:
133
+ if isinstance(file_item, dict) and "name" in file_item:
134
+ file_path = file_item["name"]
135
+ else:
136
+ file_path = file_item
137
+ file_content = extract_file_content(file_path)
138
+ combined_input += f"{Path(file_path).name}\n\n{file_content}\n\n"
139
+ if message["text"]:
140
+ combined_input += message["text"]
141
+ history.append([combined_input, ""])
142
  ai_response = ""
143
  for chunk in chat_with_model(history, combined_input, selected_model_display):
 
 
 
 
144
  ai_response += chunk
145
  history[-1][1] = ai_response
146
+ return history, gr.MultimodalTextbox(value=None, interactive=True)
 
 
 
 
 
 
147
 
148
  def change_model(new_model_display):
149
  return [], new_model_display
150
 
 
 
 
 
 
 
 
 
 
 
151
  with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as demo:
152
  user_history = gr.State([])
153
  selected_model = gr.State(MODEL_CHOICES[0])
154
+ chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
155
+ model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
156
+ msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], scale=0, interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
 
 
 
 
 
 
157
 
158
  model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, selected_model])
159
+ msg.submit(fn=respond, inputs=[msg, user_history, selected_model], outputs=[chatbot, msg])
 
 
 
160
 
161
  demo.launch(show_api=False, max_file_size="1mb")