ai: Better handle of multiple random session request.
Browse files
jarvis.py
CHANGED
@@ -8,6 +8,7 @@ import requests
|
|
8 |
import json
|
9 |
import os
|
10 |
import random
|
|
|
11 |
import pytesseract
|
12 |
import pdfplumber
|
13 |
import docx
|
@@ -36,7 +37,10 @@ META_TAGS = os.getenv("META_TAGS")
|
|
36 |
|
37 |
ALLOWED_EXTENSIONS = json.loads(os.getenv("ALLOWED_EXTENSIONS"))
|
38 |
|
39 |
-
|
|
|
|
|
|
|
40 |
|
41 |
def get_model_key(display_name):
|
42 |
return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
|
@@ -83,7 +87,7 @@ def extract_file_content(file_path):
|
|
83 |
content = f"{file_path}: {e}"
|
84 |
return content.strip()
|
85 |
|
86 |
-
def chat_with_model(history, user_input, selected_model_display):
|
87 |
if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
|
88 |
return RESPONSES["RESPONSE_3"]
|
89 |
selected_model = get_model_key(selected_model_display)
|
@@ -97,7 +101,7 @@ def chat_with_model(history, user_input, selected_model_display):
|
|
97 |
for api_key in LINUX_SERVER_PROVIDER_KEYS[:2]:
|
98 |
for host in LINUX_SERVER_HOSTS[:2]:
|
99 |
try:
|
100 |
-
response =
|
101 |
if response.status_code < 400:
|
102 |
ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
|
103 |
return ai_text
|
@@ -105,10 +109,10 @@ def chat_with_model(history, user_input, selected_model_display):
|
|
105 |
continue
|
106 |
return RESPONSES["RESPONSE_3"]
|
107 |
|
108 |
-
def respond(multi_input, history, selected_model_display):
|
109 |
message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
|
110 |
if not message["text"] and not message["files"]:
|
111 |
-
return history, gr.MultimodalTextbox(value=None, interactive=True)
|
112 |
combined_input = ""
|
113 |
for file_item in message["files"]:
|
114 |
if isinstance(file_item, dict) and "name" in file_item:
|
@@ -120,22 +124,23 @@ def respond(multi_input, history, selected_model_display):
|
|
120 |
if message["text"]:
|
121 |
combined_input += message["text"]
|
122 |
history.append([combined_input, ""])
|
123 |
-
ai_response = chat_with_model(history, combined_input, selected_model_display)
|
124 |
history[-1][1] = ai_response
|
125 |
-
return history, gr.MultimodalTextbox(value=None, interactive=True)
|
126 |
|
127 |
def change_model(new_model_display):
|
128 |
-
return [], new_model_display
|
129 |
|
130 |
with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as jarvis:
|
131 |
user_history = gr.State([])
|
|
|
132 |
selected_model = gr.State(MODEL_CHOICES[0])
|
133 |
chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
|
134 |
model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
|
135 |
with gr.Row():
|
136 |
msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
|
137 |
|
138 |
-
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, selected_model])
|
139 |
-
msg.submit(fn=respond, inputs=[msg, user_history, selected_model], outputs=[chatbot, msg])
|
140 |
|
141 |
jarvis.launch(show_api=False, max_file_size="1mb")
|
|
|
8 |
import json
|
9 |
import os
|
10 |
import random
|
11 |
+
import time
|
12 |
import pytesseract
|
13 |
import pdfplumber
|
14 |
import docx
|
|
|
37 |
|
38 |
ALLOWED_EXTENSIONS = json.loads(os.getenv("ALLOWED_EXTENSIONS"))
|
39 |
|
40 |
+
def create_session():
|
41 |
+
s = requests.Session()
|
42 |
+
s.headers.update({"Connection": "keep-alive"})
|
43 |
+
return s
|
44 |
|
45 |
def get_model_key(display_name):
|
46 |
return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
|
|
|
87 |
content = f"{file_path}: {e}"
|
88 |
return content.strip()
|
89 |
|
90 |
+
def chat_with_model(history, user_input, selected_model_display, sess):
|
91 |
if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
|
92 |
return RESPONSES["RESPONSE_3"]
|
93 |
selected_model = get_model_key(selected_model_display)
|
|
|
101 |
for api_key in LINUX_SERVER_PROVIDER_KEYS[:2]:
|
102 |
for host in LINUX_SERVER_HOSTS[:2]:
|
103 |
try:
|
104 |
+
response = sess.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"}, timeout=5)
|
105 |
if response.status_code < 400:
|
106 |
ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
|
107 |
return ai_text
|
|
|
109 |
continue
|
110 |
return RESPONSES["RESPONSE_3"]
|
111 |
|
112 |
+
def respond(multi_input, history, selected_model_display, sess):
|
113 |
message = {"text": multi_input.get("text", "").strip(), "files": multi_input.get("files", [])}
|
114 |
if not message["text"] and not message["files"]:
|
115 |
+
return history, gr.MultimodalTextbox(value=None, interactive=True), sess
|
116 |
combined_input = ""
|
117 |
for file_item in message["files"]:
|
118 |
if isinstance(file_item, dict) and "name" in file_item:
|
|
|
124 |
if message["text"]:
|
125 |
combined_input += message["text"]
|
126 |
history.append([combined_input, ""])
|
127 |
+
ai_response = chat_with_model(history, combined_input, selected_model_display, sess)
|
128 |
history[-1][1] = ai_response
|
129 |
+
return history, gr.MultimodalTextbox(value=None, interactive=True), sess
|
130 |
|
131 |
def change_model(new_model_display):
|
132 |
+
return [], create_session(), new_model_display
|
133 |
|
134 |
with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as jarvis:
|
135 |
user_history = gr.State([])
|
136 |
+
user_session = gr.State(create_session())
|
137 |
selected_model = gr.State(MODEL_CHOICES[0])
|
138 |
chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"])
|
139 |
model_dropdown = gr.Dropdown(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0])
|
140 |
with gr.Row():
|
141 |
msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count="single", file_types=ALLOWED_EXTENSIONS)
|
142 |
|
143 |
+
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, user_session, selected_model])
|
144 |
+
msg.submit(fn=respond, inputs=[msg, user_history, selected_model, user_session], outputs=[chatbot, msg, user_session])
|
145 |
|
146 |
jarvis.launch(show_api=False, max_file_size="1mb")
|