ai: Make optillm works.
Browse files- jarvis.py +19 -7
- requirements.txt +1 -0
jarvis.py
CHANGED
@@ -57,7 +57,7 @@ def create_session():
|
|
57 |
return SessionWithID()
|
58 |
|
59 |
def get_model_key(display_name):
|
60 |
-
return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
|
61 |
|
62 |
def extract_file_content(file_path):
|
63 |
ext = Path(file_path).suffix.lower()
|
@@ -101,6 +101,20 @@ def extract_file_content(file_path):
|
|
101 |
content = f"{file_path}: {e}"
|
102 |
return content.strip()
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
def chat_with_model(history, user_input, selected_model_display, sess):
|
105 |
if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
|
106 |
return RESPONSES["RESPONSE_3"]
|
@@ -120,7 +134,8 @@ def chat_with_model(history, user_input, selected_model_display, sess):
|
|
120 |
response = sess.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"}, timeout=1)
|
121 |
if response.status_code < 400:
|
122 |
ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
|
123 |
-
|
|
|
124 |
except requests.exceptions.RequestException:
|
125 |
continue
|
126 |
return RESPONSES["RESPONSE_3"]
|
@@ -132,10 +147,7 @@ def respond(multi_input, history, selected_model_display, sess):
|
|
132 |
return
|
133 |
combined_input = ""
|
134 |
for file_item in message["files"]:
|
135 |
-
if isinstance(file_item, dict) and "name" in file_item
|
136 |
-
file_path = file_item["name"]
|
137 |
-
else:
|
138 |
-
file_path = file_item
|
139 |
file_content = extract_file_content(file_path)
|
140 |
combined_input += f"{Path(file_path).name}\n\n{file_content}\n\n"
|
141 |
if message["text"]:
|
@@ -144,7 +156,7 @@ def respond(multi_input, history, selected_model_display, sess):
|
|
144 |
ai_response = chat_with_model(history, combined_input, selected_model_display, sess)
|
145 |
history[-1][1] = ""
|
146 |
for character in ai_response:
|
147 |
-
history[-1][1] += character
|
148 |
time.sleep(0.0009)
|
149 |
yield history, gr.MultimodalTextbox(value=None, interactive=True), sess
|
150 |
|
|
|
57 |
return SessionWithID()
|
58 |
|
59 |
def get_model_key(display_name):
|
60 |
+
return next((k for k, v in MODEL_MAPPING.items() if v == display_name), list(MODEL_MAPPING.keys())[0] if MODEL_MAPPING else MODEL_CHOICES[0])
|
61 |
|
62 |
def extract_file_content(file_path):
|
63 |
ext = Path(file_path).suffix.lower()
|
|
|
101 |
content = f"{file_path}: {e}"
|
102 |
return content.strip()
|
103 |
|
104 |
+
def process_ai_response(ai_text):
|
105 |
+
try:
|
106 |
+
result = round_trip_optimization(ai_text)
|
107 |
+
result = re2_approach(result)
|
108 |
+
result = cot_reflection(result)
|
109 |
+
result = advanced_self_consistency_approach(result)
|
110 |
+
result = plansearch(result)
|
111 |
+
result = leap(result)
|
112 |
+
solver = Z3SymPySolverSystem()
|
113 |
+
result = solver.solve(result)
|
114 |
+
return result
|
115 |
+
except Exception:
|
116 |
+
return ai_text
|
117 |
+
|
118 |
def chat_with_model(history, user_input, selected_model_display, sess):
|
119 |
if not LINUX_SERVER_PROVIDER_KEYS or not LINUX_SERVER_HOSTS:
|
120 |
return RESPONSES["RESPONSE_3"]
|
|
|
134 |
response = sess.post(host, json=data, headers={"Authorization": f"Bearer {api_key}"}, timeout=1)
|
135 |
if response.status_code < 400:
|
136 |
ai_text = response.json().get("choices", [{}])[0].get("message", {}).get("content", RESPONSES["RESPONSE_2"])
|
137 |
+
processed_text = process_ai_response(ai_text)
|
138 |
+
return processed_text
|
139 |
except requests.exceptions.RequestException:
|
140 |
continue
|
141 |
return RESPONSES["RESPONSE_3"]
|
|
|
147 |
return
|
148 |
combined_input = ""
|
149 |
for file_item in message["files"]:
|
150 |
+
file_path = file_item["name"] if isinstance(file_item, dict) and "name" in file_item else file_item
|
|
|
|
|
|
|
151 |
file_content = extract_file_content(file_path)
|
152 |
combined_input += f"{Path(file_path).name}\n\n{file_content}\n\n"
|
153 |
if message["text"]:
|
|
|
156 |
ai_response = chat_with_model(history, combined_input, selected_model_display, sess)
|
157 |
history[-1][1] = ""
|
158 |
for character in ai_response:
|
159 |
+
history[-1][1] += str(character)
|
160 |
time.sleep(0.0009)
|
161 |
yield history, gr.MultimodalTextbox(value=None, interactive=True), sess
|
162 |
|
requirements.txt
CHANGED
@@ -8,3 +8,4 @@ python-pptx
|
|
8 |
PyMuPDF
|
9 |
Pillow
|
10 |
optillm
|
|
|
|
8 |
PyMuPDF
|
9 |
Pillow
|
10 |
optillm
|
11 |
+
json5
|