Spaces:
Running
Running
vaibhavard
commited on
Commit
·
372d102
1
Parent(s):
9a9efee
first_commit
Browse files- .python-version +1 -0
- CodeAI_LOG.txt +1 -0
- Dockerfile +22 -0
- Procfile +1 -0
- app.py +211 -0
- app_combination.py +132 -0
- data.json +1 -0
- extensions/__pycache__/anycreator.cpython-311.pyc +0 -0
- extensions/__pycache__/code_interpreter.cpython-311.pyc +0 -0
- extensions/__pycache__/codebot.cpython-311.pyc +0 -0
- extensions/__pycache__/extensions.cpython-311.pyc +0 -0
- extensions/anycreator.py +31 -0
- function_support.py +71 -0
- har_and_cookies/auth_LambdaChat.json +1 -0
- har_and_cookies/blackbox.json +1 -0
- helpers/__pycache__/config.cpython-311.pyc +0 -0
- helpers/__pycache__/helper.cpython-311.pyc +0 -0
- helpers/__pycache__/models.cpython-311.pyc +0 -0
- helpers/__pycache__/prompts.cpython-311.pyc +0 -0
- helpers/__pycache__/provider.cpython-311.pyc +0 -0
- helpers/helper.py +178 -0
- helpers/models.py +225 -0
- helpers/provider.py +15 -0
- requirements.txt +25 -0
- static/hello_world.txt +0 -0
- tests/apppy.py +32 -0
- tests/e2bdev.py +455 -0
- tests/new.py +29 -0
- tests/prompts.txt +16 -0
- tests/rubbish.py +0 -0
- tests/tests.py +34 -0
- tests/usefull_funcs.py +19 -0
- utils/__init__.py +47 -0
- utils/__pycache__/__init__.cpython-311.pyc +0 -0
- utils/__pycache__/cyclic_buffer.cpython-311.pyc +0 -0
- utils/__pycache__/functions.cpython-311.pyc +0 -0
- utils/__pycache__/llms.cpython-311.pyc +0 -0
- utils/functions.py +50 -0
- utils/llms.py +263 -0
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.10.2
|
CodeAI_LOG.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{'role': 'system', 'content': '\n# Tools\n\n## Tool1:Code-Interpreter\n- You can read, write, and analyze files on a Linux Server using various languages, including Python, Node.js, and Bash.\n- Code-Interpreter will provide the output of the execution or time out after 60.0 seconds.\n- Save code-generated content, such as plots and graphs, to an external file with a distinct filename added to the data variable, separate from the code_filename.\n- All files MUST be saved and retrieved from the current folder. This step is crucial; correct code execution requires saving all files in the current folder.\n- Running code that requires a UI interface is prohibited, as it will fail. Instead, write alternate code without UI support. Matplotlib is supported.\n- For matplotlib animations, limit the duration to maximum 5 seconds and save them as GIFs without displaying the plot using `plot.show()`. Always Set repeat = False.\n- The start_cmd should be prefixed with sudo for proper code execution.\n- Generated Code should have clear and concise comments **within the code** that explains its purpose and functionality.\n\n### Code-interpreter Usage:\n1) Output data variable in `json` codeblock, conforming to the following grammar:\n```json\n{\n"language":"<Code language name such as python/bash/nodejs>",\n"packages":[<List of python/node packages to install>],\n"system_packages":[<List of apt packages to install>],\n"start_cmd":"Example- sudo python app.py or bash run.sh",\n"filename":"<filename of the file created by using code.>",\n"code_filename":"<filename of the code you are going to run using the start command.(Eg- run.sh , script.py , etc)",\n"port":"Specify the port for the Python app to open. Use \'\' if no port is needed.",\n}\n``` \nNote:code_filename , language and start_cmd are Required parameters and **should NOT be left empty**. \n2) After data output, present code in a **separate codeblock**\n```<code language>\n<Code goes here>\n``` \n- All code output calculations will be external and will be outputted by [system](#code_run_response), and you CANNOT provide expected output. \n- Do NOT provide explanations or additional text with code.\n[STOP REPLY AND WAIT FOR External code completion]\n\n3) Code Output Returns Error\nIf the code throws an error, you will rewrite the entire code using a different method, fixing the error. \n'}, {'role': 'system', 'content': '\n# Tools\n\n## Tool1:Code-Interpreter\n- You can read, write, and analyze files on a Linux Server using various languages, including Python, Node.js, and Bash.\n- Code-Interpreter will provide the output of the execution or time out after 60.0 seconds.\n- Save code-generated content, such as plots and graphs, to an external file with a distinct filename added to the data variable, separate from the code_filename.\n- All files MUST be saved and retrieved from the current folder. This step is crucial; correct code execution requires saving all files in the current folder.\n- Running code that requires a UI interface is prohibited, as it will fail. Instead, write alternate code without UI support. Matplotlib is supported.\n- For matplotlib animations, limit the duration to maximum 5 seconds and save them as GIFs without displaying the plot using `plot.show()`. Always Set repeat = False.\n- The start_cmd should be prefixed with sudo for proper code execution.\n- Generated Code should have clear and concise comments **within the code** that explains its purpose and functionality.\n\n### Code-interpreter Usage:\n1) Output data variable in `json` codeblock, conforming to the following grammar:\n```json\n{\n"language":"<Code language name such as python/bash/nodejs>",\n"packages":[<List of python/node packages to install>],\n"system_packages":[<List of apt packages to install>],\n"start_cmd":"Example- sudo python app.py or bash run.sh",\n"filename":"<filename of the file created by using code.>",\n"code_filename":"<filename of the code you are going to run using the start command.(Eg- run.sh , script.py , etc)",\n"port":"Specify the port for the Python app to open. Use \'\' if no port is needed.",\n}\n``` \nNote:code_filename , language and start_cmd are Required parameters and **should NOT be left empty**. \n2) After data output, present code in a **separate codeblock**\n```<code language>\n<Code goes here>\n``` \n- All code output calculations will be external and will be outputted by [system](#code_run_response), and you CANNOT provide expected output. \n- Do NOT provide explanations or additional text with code.\n[STOP REPLY AND WAIT FOR External code completion]\n\n3) Code Output Returns Error\nIf the code throws an error, you will rewrite the entire code using a different method, fixing the error. \n'}, {'role': 'user', 'content': 'Print out the fibonacci series It is mandatory for you to **Output data variable in `json` codeblock.** as program will fail without data variable'}]
|
Dockerfile
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
|
2 |
+
# you will also find guides on how best to write your Dockerfile
|
3 |
+
|
4 |
+
FROM python:3.11
|
5 |
+
|
6 |
+
RUN useradd -m -u 1000 user
|
7 |
+
USER user
|
8 |
+
ENV PATH="/home/user/.local/bin:$PATH"
|
9 |
+
|
10 |
+
WORKDIR /app
|
11 |
+
|
12 |
+
COPY --chown=user ./requirements.txt requirements.txt
|
13 |
+
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
14 |
+
|
15 |
+
COPY --chown=user . /app
|
16 |
+
EXPOSE 7860
|
17 |
+
#ENTRYPOINT ["python"]
|
18 |
+
WORKDIR /app
|
19 |
+
CMD python app.py
|
20 |
+
|
21 |
+
|
22 |
+
#CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
Procfile
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
web: gunicorn main:app --timeout=2000
|
app.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import threading
|
2 |
+
from flask import Flask, url_for, redirect
|
3 |
+
from flask import request as req
|
4 |
+
from flask_cors import CORS
|
5 |
+
import helpers.helper as helper
|
6 |
+
from helpers.provider import *
|
7 |
+
from utils.llms import gpt4,gpt4stream
|
8 |
+
app = Flask(__name__)
|
9 |
+
CORS(app)
|
10 |
+
import queue
|
11 |
+
from utils.functions import allocate
|
12 |
+
from werkzeug.utils import secure_filename
|
13 |
+
import os
|
14 |
+
from PIL import Image
|
15 |
+
#find . -maxdepth 1 -type f -mmin -10
|
16 |
+
#docker run dezsh/inlets client --url=wss://inlets-testing-secret.onrender.com --upstream=http://192.168.1.8:1331 --token=secret --insecure
|
17 |
+
#QxYciPJQwwfb1zBu
|
18 |
+
#corcel nineteen AI
|
19 |
+
app.config['UPLOAD_FOLDER'] = "static"
|
20 |
+
|
21 |
+
@app.route("/v1/chat/completions", methods=['POST'])
|
22 |
+
@app.route("/chat/completions", methods=['POST'])
|
23 |
+
@app.route("/", methods=['POST'])
|
24 |
+
def chat_completions2():
|
25 |
+
all_request_data = {}
|
26 |
+
all_request_data['json'] = req.get_json(silent=True) or {}
|
27 |
+
all_request_data['headers'] = dict(req.headers)
|
28 |
+
|
29 |
+
all_request_data['args'] = req.args.to_dict(flat=False)
|
30 |
+
all_request_data['form'] = req.form.to_dict(flat=False)
|
31 |
+
try:
|
32 |
+
all_request_data['raw_data'] = req.data.decode('utf-8')
|
33 |
+
except Exception:
|
34 |
+
all_request_data['raw_data'] = f"Could not decode raw data (length: {len(req.data)})"
|
35 |
+
|
36 |
+
|
37 |
+
# # --- Now you can access your original values from this dict ---
|
38 |
+
# print("--- Consolidated Request Data ---")
|
39 |
+
# print(json.dumps(all_request_data, indent=2))
|
40 |
+
# print("--------------------------------")
|
41 |
+
|
42 |
+
streaming = req.json.get('stream', False)
|
43 |
+
model = req.json.get('model', 'gpt-4-turbo')
|
44 |
+
messages = req.json.get('messages')
|
45 |
+
api_keys = req.headers.get('Authorization').replace('Bearer ', '')
|
46 |
+
functions = req.json.get('functions')
|
47 |
+
tools = req.json.get('tools')
|
48 |
+
response_format = req.json.get('response_format')
|
49 |
+
if streaming:
|
50 |
+
helper.stopped=True
|
51 |
+
|
52 |
+
|
53 |
+
if tools!=None:
|
54 |
+
allocate(messages,api_keys,model,tools)
|
55 |
+
else:
|
56 |
+
allocate(messages,api_keys,model,[])
|
57 |
+
|
58 |
+
t = time.time()
|
59 |
+
|
60 |
+
def stream_response(messages,model,api_keys="",functions=[],tools=[]):
|
61 |
+
helper.q = queue.Queue() # create a queue to store the response lines
|
62 |
+
if helper.stopped:
|
63 |
+
helper.stopped=False
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
threading.Thread(target=gpt4stream,args=(messages,model,api_keys)).start() # start the thread
|
68 |
+
|
69 |
+
started=False
|
70 |
+
while True: # loop until the queue is empty
|
71 |
+
try:
|
72 |
+
if 20>time.time()-t>18 and not started :
|
73 |
+
yield 'data: %s\n\n' % json.dumps(helper.streamer("> Thinking"), separators=(',' ':'))
|
74 |
+
time.sleep(2)
|
75 |
+
elif time.time()-t>20 and not started :
|
76 |
+
yield 'data: %s\n\n' % json.dumps(helper.streamer("."), separators=(',' ':'))
|
77 |
+
time.sleep(1)
|
78 |
+
if time.time()-t>100 and not started:
|
79 |
+
yield 'data: %s\n\n' % json.dumps(helper.streamer("Still Thinking...Do not terminate"), separators=(',' ':'))
|
80 |
+
break
|
81 |
+
|
82 |
+
line = helper.q.get(block=False)
|
83 |
+
if "RESULT: " in line:
|
84 |
+
line=line.replace("RESULT: ","")
|
85 |
+
if tools !=None:
|
86 |
+
yield f'data: {json.dumps(helper.stream_func(line,"tools"))}\n\n'
|
87 |
+
else:
|
88 |
+
yield f'data: {json.dumps(helper.end())}\n\n'
|
89 |
+
|
90 |
+
break
|
91 |
+
|
92 |
+
|
93 |
+
if line == "END":
|
94 |
+
yield f'data: {json.dumps(helper.end())}\n\n'
|
95 |
+
break
|
96 |
+
if not started:
|
97 |
+
started = True
|
98 |
+
yield 'data: %s\n\n' % json.dumps(helper.streamer("\n\n"), separators=(',' ':'))
|
99 |
+
|
100 |
+
|
101 |
+
yield 'data: %s\n\n' % json.dumps(helper.streamer(line), separators=(',' ':'))
|
102 |
+
|
103 |
+
helper.q.task_done() # mark the task as done
|
104 |
+
|
105 |
+
|
106 |
+
except helper.queue.Empty:
|
107 |
+
pass
|
108 |
+
except Exception as e:
|
109 |
+
print(e)
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
if not streaming :
|
118 |
+
if functions != None :
|
119 |
+
k=gpt4(messages,None,model)
|
120 |
+
return helper.func_output(k,"functions")
|
121 |
+
elif tools!=None:
|
122 |
+
|
123 |
+
k=gpt4(messages,None,model)
|
124 |
+
return helper.func_output(k,"tools")
|
125 |
+
|
126 |
+
else:
|
127 |
+
|
128 |
+
print("USING GPT_4 NO STREAM")
|
129 |
+
print(model)
|
130 |
+
|
131 |
+
k=gpt4(messages,response_format,model)
|
132 |
+
return helper.output(k)
|
133 |
+
|
134 |
+
elif streaming :
|
135 |
+
return app.response_class(stream_response(messages,model,api_keys,functions,tools), mimetype='text/event-stream')
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
@app.route('/upload', methods=['GET','POST'])
|
143 |
+
def index():
|
144 |
+
|
145 |
+
# If a post method then handle file upload
|
146 |
+
if req.method == 'POST':
|
147 |
+
|
148 |
+
if 'file' not in req.files:
|
149 |
+
return redirect('/')
|
150 |
+
|
151 |
+
file = req.files['file']
|
152 |
+
|
153 |
+
|
154 |
+
if file :
|
155 |
+
filename = secure_filename(file.filename)
|
156 |
+
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
|
157 |
+
if ("camera" in file.filename or "capture" in file.filename or "IMG" in file.filename or "Screenshot" in file.filename) :
|
158 |
+
img=Image.open(f"static/{filename}")
|
159 |
+
img.thumbnail((512, 512),Image.Resampling.LANCZOS)
|
160 |
+
|
161 |
+
img.save(f"static/{filename}")
|
162 |
+
|
163 |
+
return filename
|
164 |
+
|
165 |
+
|
166 |
+
# Get Files in the directory and create list items to be displayed to the user
|
167 |
+
file_list = ''
|
168 |
+
for f in os.listdir(app.config['UPLOAD_FOLDER']):
|
169 |
+
# Create link html
|
170 |
+
link = url_for("static", filename=f)
|
171 |
+
file_list = file_list + '<li><a href="%s">%s</a></li>' % (link, f)
|
172 |
+
|
173 |
+
# Format return HTML - allow file upload and list all available files
|
174 |
+
return_html = '''
|
175 |
+
<!doctype html>
|
176 |
+
<title>Upload File</title>
|
177 |
+
<h1>Upload File</h1>
|
178 |
+
<form method=post enctype=multipart/form-data>
|
179 |
+
<input type=file name=file><br>
|
180 |
+
<input type=submit value=Upload>
|
181 |
+
</form>
|
182 |
+
<hr>
|
183 |
+
<h1>Files</h1>
|
184 |
+
<ol>%s</ol>
|
185 |
+
''' % file_list
|
186 |
+
|
187 |
+
return return_html
|
188 |
+
|
189 |
+
|
190 |
+
@app.route('/')
|
191 |
+
def yellow_name():
|
192 |
+
return f'Hello world!'
|
193 |
+
|
194 |
+
|
195 |
+
|
196 |
+
@app.route("/v1/models")
|
197 |
+
@app.route("/models")
|
198 |
+
def models():
|
199 |
+
print("Models")
|
200 |
+
return helper.model
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
if __name__ == '__main__':
|
205 |
+
config = {
|
206 |
+
'host': '0.0.0.0',
|
207 |
+
'port': 1337,
|
208 |
+
'debug': True,
|
209 |
+
}
|
210 |
+
|
211 |
+
app.run(**config)
|
app_combination.py
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, url_for, redirect
|
2 |
+
from flask import request as req
|
3 |
+
from flask_cors import CORS
|
4 |
+
import helpers.helper as helper
|
5 |
+
from helpers.provider import *
|
6 |
+
from utils.llms import chat,chatstream
|
7 |
+
app = Flask(__name__)
|
8 |
+
CORS(app)
|
9 |
+
from utils.functions import allocate
|
10 |
+
from werkzeug.utils import secure_filename
|
11 |
+
import os
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
app.config['UPLOAD_FOLDER'] = "static"
|
16 |
+
|
17 |
+
@app.route("/v1/chat/completions", methods=['POST'])
|
18 |
+
@app.route("/chat/completions", methods=['POST'])
|
19 |
+
@app.route("/", methods=['POST'])
|
20 |
+
def chat_completions2():
|
21 |
+
|
22 |
+
streaming = req.json.get('stream', False)
|
23 |
+
model = req.json.get('model', 'gpt-4-turbo')
|
24 |
+
messages = req.json.get('messages')
|
25 |
+
api_keys = req.headers.get('Authorization').replace('Bearer ', '')
|
26 |
+
tools = req.json.get('tools')
|
27 |
+
response_format = req.json.get('response_format')
|
28 |
+
|
29 |
+
allocate(messages,model,tools)
|
30 |
+
|
31 |
+
|
32 |
+
def stream_response(messages,model,tools):
|
33 |
+
for line in chatstream(messages,model,api_keys):
|
34 |
+
if "RESULT: " in line:
|
35 |
+
line=line.replace("RESULT: ","")
|
36 |
+
if tools !=None:
|
37 |
+
yield f'data: {json.dumps(helper.stream_func(line,"tools"))}\n\n'
|
38 |
+
else:
|
39 |
+
yield f'data: {json.dumps(helper.end())}\n\n'
|
40 |
+
break
|
41 |
+
yield 'data: %s\n\n' % json.dumps(helper.streamer(line), separators=(',', ':'))
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
if not streaming :
|
47 |
+
if tools!=None:
|
48 |
+
|
49 |
+
k=chat(messages,None,model)
|
50 |
+
return helper.func_output(k,"tools")
|
51 |
+
|
52 |
+
else:
|
53 |
+
|
54 |
+
print("NO STREAM")
|
55 |
+
print(model)
|
56 |
+
|
57 |
+
k=chat(messages,response_format,model)
|
58 |
+
return helper.output(k)
|
59 |
+
|
60 |
+
elif streaming :
|
61 |
+
return app.response_class(stream_response(messages,model,tools), mimetype='text/event-stream')
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
@app.route('/upload', methods=['GET','POST'])
|
69 |
+
def index():
|
70 |
+
|
71 |
+
# If a post method then handle file upload
|
72 |
+
if req.method == 'POST':
|
73 |
+
|
74 |
+
if 'file' not in req.files:
|
75 |
+
return redirect('/')
|
76 |
+
|
77 |
+
file = req.files['file']
|
78 |
+
|
79 |
+
|
80 |
+
if file :
|
81 |
+
filename = secure_filename(file.filename)
|
82 |
+
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
|
83 |
+
|
84 |
+
return filename
|
85 |
+
|
86 |
+
|
87 |
+
# Get Files in the directory and create list items to be displayed to the user
|
88 |
+
file_list = ''
|
89 |
+
for f in os.listdir(app.config['UPLOAD_FOLDER']):
|
90 |
+
# Create link html
|
91 |
+
link = url_for("static", filename=f)
|
92 |
+
file_list = file_list + '<li><a href="%s">%s</a></li>' % (link, f)
|
93 |
+
|
94 |
+
# Format return HTML - allow file upload and list all available files
|
95 |
+
return_html = '''
|
96 |
+
<!doctype html>
|
97 |
+
<title>Upload File</title>
|
98 |
+
<h1>Upload File</h1>
|
99 |
+
<form method=post enctype=multipart/form-data>
|
100 |
+
<input type=file name=file><br>
|
101 |
+
<input type=submit value=Upload>
|
102 |
+
</form>
|
103 |
+
<hr>
|
104 |
+
<h1>Files</h1>
|
105 |
+
<ol>%s</ol>
|
106 |
+
''' % file_list
|
107 |
+
|
108 |
+
return return_html
|
109 |
+
|
110 |
+
|
111 |
+
@app.route('/')
|
112 |
+
def yellow_name():
|
113 |
+
return f'All good!'
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
@app.route("/v1/models")
|
118 |
+
@app.route("/models")
|
119 |
+
def models():
|
120 |
+
print("Models")
|
121 |
+
return helper.model
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
if __name__ == '__main__':
|
126 |
+
config = {
|
127 |
+
'host': '0.0.0.0',
|
128 |
+
'port': 1337,
|
129 |
+
'debug': True,
|
130 |
+
}
|
131 |
+
|
132 |
+
app.run(**config)
|
data.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
extensions/__pycache__/anycreator.cpython-311.pyc
ADDED
Binary file (1.78 kB). View file
|
|
extensions/__pycache__/code_interpreter.cpython-311.pyc
ADDED
Binary file (8.59 kB). View file
|
|
extensions/__pycache__/codebot.cpython-311.pyc
ADDED
Binary file (25.2 kB). View file
|
|
extensions/__pycache__/extensions.cpython-311.pyc
ADDED
Binary file (6.97 kB). View file
|
|
extensions/anycreator.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import urllib.request
|
3 |
+
import requests
|
4 |
+
import time
|
5 |
+
import urllib.request
|
6 |
+
data={}
|
7 |
+
imgur=[]
|
8 |
+
def getimage(query):
|
9 |
+
|
10 |
+
payload = {
|
11 |
+
"model": "absolutereality_v181.safetensors [3d9d4d2b]",
|
12 |
+
"prompt": str(query)
|
13 |
+
}
|
14 |
+
|
15 |
+
response = requests.post("https://api.prodia.com/v1/sd/generate", json=payload, headers={"accept": "application/json","content-type": "application/json","X-Prodia-Key": "da6053eb-c352-4374-a459-2a9a5eaaa64b"})
|
16 |
+
jobid=response.json()["job"]
|
17 |
+
while True:
|
18 |
+
response = requests.get(f"https://api.prodia.com/v1/job/{jobid}", headers={"accept": "application/json","X-Prodia-Key": "da6053eb-c352-4374-a459-2a9a5eaaa64b"})
|
19 |
+
if response.json()["status"]=="succeeded":
|
20 |
+
image=response.json()["imageUrl"]
|
21 |
+
break
|
22 |
+
time.sleep(0.5)
|
23 |
+
|
24 |
+
filename=f"static/image{random.randint(1,1000)}.png"
|
25 |
+
|
26 |
+
urllib.request.urlretrieve(image, filename)
|
27 |
+
|
28 |
+
return filename
|
29 |
+
|
30 |
+
|
31 |
+
|
function_support.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from typing import Any, Dict, List, Optional
|
3 |
+
from langchain_core.prompts import SystemMessagePromptTemplate
|
4 |
+
import json
|
5 |
+
|
6 |
+
DEFAULT_SYSTEM_TEMPLATE = """You have access to the following tools:
|
7 |
+
|
8 |
+
{tools}
|
9 |
+
|
10 |
+
If using tools , You must respond with a JSON object in a JSON codeblock inside think matching the following schema.
|
11 |
+
|
12 |
+
|
13 |
+
```json
|
14 |
+
[
|
15 |
+
{{
|
16 |
+
"tool": <name of the selected tool>,
|
17 |
+
"tool_input": <parameters for the selected tool, matching the tool's JSON schema>
|
18 |
+
}}
|
19 |
+
]
|
20 |
+
```
|
21 |
+
|
22 |
+
""" # noqa: E501
|
23 |
+
|
24 |
+
DEFAULT_RESPONSE_FUNCTION = {
|
25 |
+
"name": "__conversational_response",
|
26 |
+
"description": (
|
27 |
+
"Respond conversationally if no other tools should be called for a given query."
|
28 |
+
),
|
29 |
+
"parameters": {
|
30 |
+
"type": "object",
|
31 |
+
"properties": {
|
32 |
+
"response": {
|
33 |
+
"type": "string",
|
34 |
+
"description": "Conversational response to the user.",
|
35 |
+
},
|
36 |
+
},
|
37 |
+
"required": ["response"],
|
38 |
+
},
|
39 |
+
}
|
40 |
+
def _function(**kwargs: Any,):
|
41 |
+
functions = kwargs.get("functions", [])
|
42 |
+
tools=kwargs.get("tools", [])
|
43 |
+
|
44 |
+
|
45 |
+
if "type" not in tools and "function" not in tools:
|
46 |
+
functions=tools
|
47 |
+
functions = [
|
48 |
+
fn for fn in functions
|
49 |
+
]
|
50 |
+
if not functions:
|
51 |
+
raise ValueError(
|
52 |
+
'If "function_call" is specified, you must also pass a matching \
|
53 |
+
function in "functions".'
|
54 |
+
)
|
55 |
+
elif "tools" in kwargs:
|
56 |
+
functions = [
|
57 |
+
fn["function"] for fn in tools
|
58 |
+
]
|
59 |
+
# del kwargs["function_call"]
|
60 |
+
# elif ""
|
61 |
+
# elif not functions:
|
62 |
+
# functions.append(DEFAULT_RESPONSE_FUNCTION)
|
63 |
+
system_message_prompt_template = SystemMessagePromptTemplate.from_template(
|
64 |
+
DEFAULT_SYSTEM_TEMPLATE
|
65 |
+
)
|
66 |
+
system_message = system_message_prompt_template.format(
|
67 |
+
tools=json.dumps(functions, indent=2)
|
68 |
+
)
|
69 |
+
if "functions" in kwargs:
|
70 |
+
del kwargs["functions"]
|
71 |
+
return system_message.content
|
har_and_cookies/auth_LambdaChat.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"cookies": {"hf-chat": "167f7c78-2d32-4d5b-acec-0f77246c09e2"}}
|
har_and_cookies/blackbox.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"validated_value": "00f37b34-a166-4efb-bce5-1312d87f2f94"}
|
helpers/__pycache__/config.cpython-311.pyc
ADDED
Binary file (390 Bytes). View file
|
|
helpers/__pycache__/helper.cpython-311.pyc
ADDED
Binary file (6.53 kB). View file
|
|
helpers/__pycache__/models.cpython-311.pyc
ADDED
Binary file (1.92 kB). View file
|
|
helpers/__pycache__/prompts.cpython-311.pyc
ADDED
Binary file (21.9 kB). View file
|
|
helpers/__pycache__/provider.cpython-311.pyc
ADDED
Binary file (980 Bytes). View file
|
|
helpers/helper.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from helpers.provider import *
|
2 |
+
from helpers.models import model
|
3 |
+
import re,ast
|
4 |
+
|
5 |
+
def streamer(tok):
|
6 |
+
completion_timestamp = int(time.time())
|
7 |
+
completion_id = ''.join(random.choices(
|
8 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
9 |
+
completion_tokens = num_tokens_from_string(tok)
|
10 |
+
|
11 |
+
completion_data = {
|
12 |
+
'id': f'chatcmpl-{completion_id}',
|
13 |
+
'object': 'chat.completion.chunk',
|
14 |
+
'created': completion_timestamp,
|
15 |
+
'model': 'gpt-4',
|
16 |
+
"usage": {
|
17 |
+
"prompt_tokens": 0,
|
18 |
+
"completion_tokens": completion_tokens,
|
19 |
+
"total_tokens": completion_tokens,
|
20 |
+
},
|
21 |
+
'choices': [
|
22 |
+
{
|
23 |
+
'delta': {
|
24 |
+
'role':"assistant",
|
25 |
+
'content':tok
|
26 |
+
},
|
27 |
+
'index': 0,
|
28 |
+
'finish_reason': None
|
29 |
+
}
|
30 |
+
]
|
31 |
+
}
|
32 |
+
return completion_data
|
33 |
+
|
34 |
+
def end():
|
35 |
+
completion_timestamp = int(time.time())
|
36 |
+
completion_id = ''.join(random.choices(
|
37 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
38 |
+
|
39 |
+
end_completion_data = {
|
40 |
+
'id': f'chatcmpl-{completion_id}',
|
41 |
+
'object': 'chat.completion.chunk',
|
42 |
+
'created': completion_timestamp,
|
43 |
+
'model': model,
|
44 |
+
'provider': 'openai',
|
45 |
+
'choices': [
|
46 |
+
{
|
47 |
+
'index': 0,
|
48 |
+
'delta': {},
|
49 |
+
'finish_reason': 'stop',
|
50 |
+
}
|
51 |
+
],
|
52 |
+
}
|
53 |
+
return end_completion_data
|
54 |
+
|
55 |
+
|
56 |
+
def output(tok):
|
57 |
+
completion_timestamp = int(time.time())
|
58 |
+
completion_id = ''.join(random.choices(
|
59 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
60 |
+
completion_tokens = 11
|
61 |
+
|
62 |
+
return {
|
63 |
+
'id': 'chatcmpl-%s' % completion_id,
|
64 |
+
'object': 'chat.completion',
|
65 |
+
'created': completion_timestamp,
|
66 |
+
'model': "gpt-4-0125-preview",
|
67 |
+
"usage": {
|
68 |
+
"prompt_tokens": 0,
|
69 |
+
"completion_tokens": completion_tokens,
|
70 |
+
"total_tokens": completion_tokens,
|
71 |
+
},
|
72 |
+
'choices': [{
|
73 |
+
'message': {
|
74 |
+
'role': 'assistant',
|
75 |
+
'content': tok
|
76 |
+
},
|
77 |
+
'finish_reason': 'stop',
|
78 |
+
'index': 0
|
79 |
+
}]
|
80 |
+
}
|
81 |
+
|
82 |
+
def stream_func(tok,type_tool):
|
83 |
+
print("-"*500)
|
84 |
+
print(f"streaming {type_tool}")
|
85 |
+
completion_timestamp = int(time.time())
|
86 |
+
completion_id = ''.join(random.choices(
|
87 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
88 |
+
completion_tokens = 11
|
89 |
+
tool_calls=[]
|
90 |
+
regex = r'```json\n(.*?)\n```'
|
91 |
+
matches = re.findall(regex, tok, re.DOTALL)
|
92 |
+
print(matches)
|
93 |
+
|
94 |
+
|
95 |
+
if matches !=[]:
|
96 |
+
|
97 |
+
info_blocks = ast.literal_eval(matches[0])
|
98 |
+
|
99 |
+
for info_block in info_blocks:
|
100 |
+
tok=tok.replace(f"```json\n{info_block}\n```",'')
|
101 |
+
|
102 |
+
tool_data=info_block
|
103 |
+
# to_add={"function":{"arguments":re.sub(r"(?<!\w)'(.*?)'(?!\w)", r'"\1"', str(tool_data["tool_input"])),"name":tool_data["tool"]},"id":f"call_3GjyYbPEskNsP67fkjyXj{random.randint(100,999)}","type":"function"}
|
104 |
+
to_add={"function":{"arguments":json.dumps(tool_data["tool_input"]),"name":tool_data["tool"]},"id":f"call_3GjyYbPEskNsP67fkjyXj{random.randint(100,999)}","type":"function"}
|
105 |
+
|
106 |
+
print(to_add)
|
107 |
+
tool_calls.append(to_add)
|
108 |
+
|
109 |
+
completion_data = {
|
110 |
+
'id': f'chatcmpl-{completion_id}',
|
111 |
+
'object': 'chat.completion.chunk',
|
112 |
+
'created': completion_timestamp,
|
113 |
+
'model': 'gpt-4',
|
114 |
+
"usage": {
|
115 |
+
"prompt_tokens": 0,
|
116 |
+
"completion_tokens": completion_tokens,
|
117 |
+
"total_tokens": completion_tokens,
|
118 |
+
},
|
119 |
+
'choices': [
|
120 |
+
{
|
121 |
+
'delta': {
|
122 |
+
'role':"assistant",
|
123 |
+
'content':"",
|
124 |
+
"tool_calls":tool_calls,
|
125 |
+
},
|
126 |
+
'index': 0,
|
127 |
+
'finish_reason': "", }
|
128 |
+
]
|
129 |
+
}
|
130 |
+
|
131 |
+
return completion_data
|
132 |
+
def func_output(tok,type_tool):
|
133 |
+
print("-"*500)
|
134 |
+
print(f"OUTPUTTING {type_tool}")
|
135 |
+
completion_timestamp = int(time.time())
|
136 |
+
completion_id = ''.join(random.choices(
|
137 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789', k=28))
|
138 |
+
completion_tokens = 11
|
139 |
+
tool_calls=[]
|
140 |
+
regex = r'```json\n(.*?)\n```'
|
141 |
+
matches = re.findall(regex, tok, re.DOTALL)
|
142 |
+
print(matches)
|
143 |
+
|
144 |
+
|
145 |
+
if matches !=[]:
|
146 |
+
|
147 |
+
info_blocks = ast.literal_eval(matches[0])
|
148 |
+
|
149 |
+
for info_block in info_blocks:
|
150 |
+
tok=tok.replace(f"```json\n{info_block}\n```",'')
|
151 |
+
|
152 |
+
tool_data=info_block
|
153 |
+
# to_add={"function":{"arguments":re.sub(r"(?<!\w)'(.*?)'(?!\w)", r'"\1"', str(tool_data["tool_input"])),"name":tool_data["tool"]},"id":f"call_3GjyYbPEskNsP67fkjyXj{random.randint(100,999)}","type":"function"}
|
154 |
+
to_add={"function":{"arguments":json.dumps(tool_data["tool_input"]),"name":tool_data["tool"]},"id":f"call_3GjyYbPEskNsP67fkjyXj{random.randint(100,999)}","type":"function"}
|
155 |
+
|
156 |
+
print(to_add)
|
157 |
+
tool_calls.append(to_add)
|
158 |
+
|
159 |
+
return {
|
160 |
+
'id': 'chatcmpl-%s' % completion_id,
|
161 |
+
'object': 'chat.completion',
|
162 |
+
'created': completion_timestamp,
|
163 |
+
'model': "gpt-4.5-turbo",
|
164 |
+
"usage": {
|
165 |
+
"prompt_tokens": 0,
|
166 |
+
"completion_tokens": completion_tokens,
|
167 |
+
"total_tokens": completion_tokens,
|
168 |
+
},
|
169 |
+
'choices': [{
|
170 |
+
'message': {
|
171 |
+
'role': 'assistant',
|
172 |
+
'content': tok,
|
173 |
+
"tool_calls":tool_calls
|
174 |
+
},
|
175 |
+
'finish_reason': '',
|
176 |
+
'index': 0
|
177 |
+
}]
|
178 |
+
}
|
helpers/models.py
ADDED
@@ -0,0 +1,225 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model = {
|
2 |
+
"data": [
|
3 |
+
|
4 |
+
{
|
5 |
+
"id": "gemini-2.0-flash-thinking-exp-01-21",
|
6 |
+
"object": "model",
|
7 |
+
"owned_by": "reversed",
|
8 |
+
"tokens": 819792,
|
9 |
+
"fallbacks": [
|
10 |
+
"gpt-3.5-turbo-16k"
|
11 |
+
],
|
12 |
+
"endpoints": [
|
13 |
+
"/api/chat/completions"
|
14 |
+
],
|
15 |
+
"hidden":False,
|
16 |
+
"limits": [
|
17 |
+
"2/minute",
|
18 |
+
"300/day"
|
19 |
+
],
|
20 |
+
"public": True,
|
21 |
+
"permission": []
|
22 |
+
},
|
23 |
+
{
|
24 |
+
"id": "gemini-2.5-pro-exp-03-25",
|
25 |
+
"object": "model",
|
26 |
+
"owned_by": "reversed",
|
27 |
+
"tokens": 819792,
|
28 |
+
"fallbacks": [
|
29 |
+
"gpt-3.5-turbo-16k"
|
30 |
+
],
|
31 |
+
"endpoints": [
|
32 |
+
"/api/chat/completions"
|
33 |
+
],
|
34 |
+
"hidden":False,
|
35 |
+
"limits": [
|
36 |
+
"2/minute",
|
37 |
+
"300/day"
|
38 |
+
],
|
39 |
+
"public": True,
|
40 |
+
"permission": []
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"id": "qwen-qwq-32b",
|
44 |
+
"object": "model",
|
45 |
+
"owned_by": "reversed",
|
46 |
+
"tokens": 81792,
|
47 |
+
"fallbacks": [
|
48 |
+
"gpt-3.5-turbo-16k"
|
49 |
+
],
|
50 |
+
"endpoints": [
|
51 |
+
"/api/chat/completions"
|
52 |
+
],
|
53 |
+
"limits": [
|
54 |
+
"2/minute",
|
55 |
+
"300/day"
|
56 |
+
],
|
57 |
+
"public": True,
|
58 |
+
"permission": []
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"id": "QwQ-32B",
|
62 |
+
"object": "model",
|
63 |
+
"owned_by": "reversed",
|
64 |
+
"tokens": 81792,
|
65 |
+
"fallbacks": [
|
66 |
+
"gpt-3.5-turbo-16k"
|
67 |
+
],
|
68 |
+
"endpoints": [
|
69 |
+
"/api/chat/completions"
|
70 |
+
],
|
71 |
+
"limits": [
|
72 |
+
"2/minute",
|
73 |
+
"300/day"
|
74 |
+
],
|
75 |
+
"public": True,
|
76 |
+
"permission": []
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"id": "gemini-2.0-flash-thinking-exp-01-21",
|
80 |
+
"object": "model",
|
81 |
+
"owned_by": "reversed",
|
82 |
+
"tokens": 819792,
|
83 |
+
"fallbacks": [
|
84 |
+
"gpt-3.5-turbo-16k"
|
85 |
+
],
|
86 |
+
"endpoints": [
|
87 |
+
"/api/chat/completions"
|
88 |
+
],
|
89 |
+
"limits": [
|
90 |
+
"2/minute",
|
91 |
+
"300/day"
|
92 |
+
],
|
93 |
+
"public": True,
|
94 |
+
"permission": []
|
95 |
+
},
|
96 |
+
{
|
97 |
+
"id": "deepseek-r1-distill-llama-70b",
|
98 |
+
"object": "model",
|
99 |
+
"owned_by": "reversed",
|
100 |
+
"tokens": 819792,
|
101 |
+
"fallbacks": [
|
102 |
+
"gpt-3.5-turbo-16k"
|
103 |
+
],
|
104 |
+
"endpoints": [
|
105 |
+
"/api/chat/completions"
|
106 |
+
],
|
107 |
+
"limits": [
|
108 |
+
"2/minute",
|
109 |
+
"300/day"
|
110 |
+
],
|
111 |
+
"public": True,
|
112 |
+
"permission": []
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"id": "DeepSeekR1-togetherAI",
|
116 |
+
"object": "model",
|
117 |
+
"owned_by": "reversed",
|
118 |
+
"tokens": 819792,
|
119 |
+
"fallbacks": [
|
120 |
+
"gpt-3.5-turbo-16k"
|
121 |
+
],
|
122 |
+
"endpoints": [
|
123 |
+
"/api/chat/completions"
|
124 |
+
],
|
125 |
+
"limits": [
|
126 |
+
"2/minute",
|
127 |
+
"300/day"
|
128 |
+
],
|
129 |
+
"public": True,
|
130 |
+
"permission": []
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"id": "DeepSeekV3-togetherAI",
|
134 |
+
"object": "model",
|
135 |
+
"owned_by": "reversed",
|
136 |
+
"tokens": 812192,
|
137 |
+
"fallbacks": [
|
138 |
+
"gpt-3.5-turbo-16k"
|
139 |
+
],
|
140 |
+
"endpoints": [
|
141 |
+
"/api/chat/completions"
|
142 |
+
],
|
143 |
+
"limits": [
|
144 |
+
"2/minute",
|
145 |
+
"300/day"
|
146 |
+
],
|
147 |
+
"public": True,
|
148 |
+
"permission": []
|
149 |
+
},
|
150 |
+
{
|
151 |
+
"id": "llama-3.3-70b-versatile",
|
152 |
+
"object": "model",
|
153 |
+
"owned_by": "reversed",
|
154 |
+
"tokens": 813392,
|
155 |
+
"fallbacks": [
|
156 |
+
"gpt-3.5-turbo-16k"
|
157 |
+
],
|
158 |
+
"endpoints": [
|
159 |
+
"/api/chat/completions"
|
160 |
+
],
|
161 |
+
"limits": [
|
162 |
+
"2/minute",
|
163 |
+
"300/day"
|
164 |
+
],
|
165 |
+
"public": True,
|
166 |
+
"permission": []
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"id": "gpt-4-0125-preview-web",
|
170 |
+
"object": "model",
|
171 |
+
"owned_by": "reversed",
|
172 |
+
"tokens": 8192,
|
173 |
+
"fallbacks": [
|
174 |
+
"gpt-3.5-turbo-16k"
|
175 |
+
],
|
176 |
+
"endpoints": [
|
177 |
+
"/api/chat/completions"
|
178 |
+
],
|
179 |
+
"limits": [
|
180 |
+
"2/minute",
|
181 |
+
"300/day"
|
182 |
+
],
|
183 |
+
"public": True,
|
184 |
+
"permission": []
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"id": "gpt-4-1106-vision-preview",
|
188 |
+
"object": "model",
|
189 |
+
"owned_by": "reversed",
|
190 |
+
"tokens": 8192,
|
191 |
+
"fallbacks": [
|
192 |
+
"gpt-3.5-turbo-16k"
|
193 |
+
],
|
194 |
+
"endpoints": [
|
195 |
+
"/api/chat/completions"
|
196 |
+
],
|
197 |
+
"limits": [
|
198 |
+
"2/minute",
|
199 |
+
"300/day"
|
200 |
+
],
|
201 |
+
"public": True,
|
202 |
+
"permission": []
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"id": "gpt-4-0613-web",
|
206 |
+
"object": "model",
|
207 |
+
"owned_by": "reversed",
|
208 |
+
"tokens": 8192,
|
209 |
+
"fallbacks": [
|
210 |
+
"gpt-3.5-turbo-16k"
|
211 |
+
],
|
212 |
+
"endpoints": [
|
213 |
+
"/api/chat/completions"
|
214 |
+
],
|
215 |
+
"limits": [
|
216 |
+
"2/minute",
|
217 |
+
"300/day"
|
218 |
+
],
|
219 |
+
"public": True,
|
220 |
+
"permission": []
|
221 |
+
},
|
222 |
+
],
|
223 |
+
"object": "list"
|
224 |
+
}
|
225 |
+
|
helpers/provider.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import requests
|
3 |
+
import time
|
4 |
+
import tiktoken
|
5 |
+
import random
|
6 |
+
|
7 |
+
TOKEN = "5182224145:AAEjkSlPqV-Q3rH8A9X8HfCDYYEQ44v_qy0"
|
8 |
+
chat_id = "5075390513"
|
9 |
+
|
10 |
+
def num_tokens_from_string(string: str, encoding_name: str = "cl100k_base") -> int:
|
11 |
+
"""Returns the number of tokens in a text string."""
|
12 |
+
encoding = tiktoken.get_encoding(encoding_name)
|
13 |
+
num_tokens = len(encoding.encode(string))
|
14 |
+
return num_tokens
|
15 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Flask
|
2 |
+
flask_cors
|
3 |
+
requests
|
4 |
+
revchatgpt
|
5 |
+
gunicorn>=20.1.0
|
6 |
+
g4f>=0.4.9.6
|
7 |
+
tiktoken
|
8 |
+
PyExecJS
|
9 |
+
pyimgur
|
10 |
+
transformers
|
11 |
+
werkzeug
|
12 |
+
e2b==0.17.1
|
13 |
+
openpyxl
|
14 |
+
beautifulsoup4
|
15 |
+
google-generativeai
|
16 |
+
Pillow
|
17 |
+
requests_futures
|
18 |
+
langchain_core
|
19 |
+
unofficial-claude-api
|
20 |
+
opencv-python
|
21 |
+
langchain_community
|
22 |
+
tool_calling_llm
|
23 |
+
langchain
|
24 |
+
langchain-core
|
25 |
+
litellm
|
static/hello_world.txt
ADDED
File without changes
|
tests/apppy.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# from g4f.Provider import DeepInfraChat,Glider,Liaobots,Blackbox,ChatGptEs,LambdaChat,TypeGPT
|
3 |
+
# DeepInfraChat.models = ["google/gemma-3-27b-it","deepseek-ai/DeepSeek-R1-Turbo","Qwen/QwQ-32B","deepseek-ai/DeepSeek-R1","deepseek-ai/DeepSeek-V3-0324","meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","meta-llama/Llama-4-Scout-17B-16E-Instruct","microsoft/Phi-4-multimodal-instruct"]
|
4 |
+
# deepinframodels=["meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","microsoft/Phi-4-multimodal-instruct","google/gemma-3-27b-it","meta-llama/Llama-4-Scout-17B-16E-Instruct"]
|
5 |
+
# REASONING_CORRESPONDANCE = {"DeepSeek-R1-Glider":Glider, "DeepSeekR1-LAMBDA":LambdaChat,"DeepSeekR1":DeepInfraChat,"deepseek-slow":TypeGPT}
|
6 |
+
# REASONING_QWQ = {"qwq-32b":DeepInfraChat}
|
7 |
+
# from g4f.client import Client
|
8 |
+
# client = Client()
|
9 |
+
|
10 |
+
# cunk=""
|
11 |
+
|
12 |
+
# providers=REASONING_CORRESPONDANCE
|
13 |
+
# model_name="deepseek-r1"
|
14 |
+
|
15 |
+
# for provider in providers:
|
16 |
+
# try:
|
17 |
+
# response = client.chat.completions.create(
|
18 |
+
# provider=providers[provider],
|
19 |
+
# model=model_name,
|
20 |
+
# messages=[{"role": "user", "content": f"Hi!"}],
|
21 |
+
# stream=True
|
22 |
+
|
23 |
+
# # Add any other necessary parameters
|
24 |
+
# )
|
25 |
+
# for part in response:
|
26 |
+
# # print(part)
|
27 |
+
# cunk=cunk+(str(part.choices[0].delta.content) or "")
|
28 |
+
# print(str(part.choices[0].delta.content),end="")
|
29 |
+
# break
|
30 |
+
# except Exception as e:
|
31 |
+
# print(f"Error with {provider}: {e}")
|
32 |
+
# pass
|
tests/e2bdev.py
ADDED
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from mcp.server.fastmcp import FastMCP
|
2 |
+
import time
|
3 |
+
from litellm import completion
|
4 |
+
import os
|
5 |
+
import glob
|
6 |
+
import http.client
|
7 |
+
import json
|
8 |
+
import openpyxl
|
9 |
+
import shutil
|
10 |
+
from google import genai
|
11 |
+
import pexpect
|
12 |
+
|
13 |
+
client = genai.Client(api_key="AIzaSyDtP05TyoIy9j0uPL7_wLEhgQEE75AZQSc")
|
14 |
+
source_dir = "/app/uploads/temp"
|
15 |
+
destination_dir = "/app/code_interpreter"
|
16 |
+
files_list=[]
|
17 |
+
downloaded_files=[]
|
18 |
+
|
19 |
+
from openai import OpenAI
|
20 |
+
clienty = OpenAI(api_key="xyz", base_url="https://akiko19191-backend.hf.space/")
|
21 |
+
|
22 |
+
mcp = FastMCP("code_sandbox")
|
23 |
+
data={}
|
24 |
+
result=""
|
25 |
+
import requests
|
26 |
+
import os
|
27 |
+
from bs4 import BeautifulSoup # For parsing HTML
|
28 |
+
|
29 |
+
Parent=pexpect.spawn('bash')
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
def transfer_files():
|
34 |
+
try:
|
35 |
+
for item in os.listdir(source_dir):
|
36 |
+
item_path = os.path.join(source_dir, item)
|
37 |
+
if os.path.isdir(item_path): # Check if it's a directory
|
38 |
+
for filename in os.listdir(item_path):
|
39 |
+
source_file_path = os.path.join(item_path, filename)
|
40 |
+
destination_file_path = os.path.join(destination_dir, filename)
|
41 |
+
if not os.path.exists(destination_file_path):
|
42 |
+
shutil.move(source_file_path, destination_file_path)
|
43 |
+
except:
|
44 |
+
pass
|
45 |
+
def transfer_files2():
|
46 |
+
try:
|
47 |
+
for item in os.listdir("/app/uploads"):
|
48 |
+
if "temp" not in item:
|
49 |
+
item_path = os.path.join(source_dir, item)
|
50 |
+
if os.path.isdir(item_path): # Check if it's a directory
|
51 |
+
for filename in os.listdir(item_path):
|
52 |
+
source_file_path = os.path.join(item_path, filename)
|
53 |
+
destination_file_path = os.path.join(destination_dir, filename.split("__")[1])
|
54 |
+
if not os.path.exists(destination_file_path):
|
55 |
+
shutil.move(source_file_path, destination_file_path)
|
56 |
+
except:
|
57 |
+
pass
|
58 |
+
def upload_file(file_path, upload_url):
|
59 |
+
"""Uploads a file to the specified server endpoint."""
|
60 |
+
|
61 |
+
try:
|
62 |
+
# Check if the file exists
|
63 |
+
if not os.path.exists(file_path):
|
64 |
+
raise FileNotFoundError(f"File not found: {file_path}")
|
65 |
+
|
66 |
+
# Prepare the file for upload
|
67 |
+
with open(file_path, "rb") as file:
|
68 |
+
files = {"file": (os.path.basename(file_path), file)} # Important: Provide filename
|
69 |
+
|
70 |
+
# Send the POST request
|
71 |
+
response = requests.post(upload_url, files=files)
|
72 |
+
|
73 |
+
# Check the response status code
|
74 |
+
response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
|
75 |
+
|
76 |
+
# Parse and print the response
|
77 |
+
if response.status_code == 200:
|
78 |
+
print(f"File uploaded successfully. Filename returned by server: {response.text}")
|
79 |
+
return response.text # Return the filename returned by the server
|
80 |
+
else:
|
81 |
+
print(f"Upload failed. Status code: {response.status_code}, Response: {response.text}")
|
82 |
+
return None
|
83 |
+
|
84 |
+
except FileNotFoundError as e:
|
85 |
+
print(e)
|
86 |
+
return None # or re-raise the exception if you want the program to halt
|
87 |
+
except requests.exceptions.RequestException as e:
|
88 |
+
print(f"Upload failed. Network error: {e}")
|
89 |
+
return None
|
90 |
+
|
91 |
+
|
92 |
+
TOKEN = "5182224145:AAEjkSlPqV-Q3rH8A9X8HfCDYYEQ44v_qy0"
|
93 |
+
chat_id = "5075390513"
|
94 |
+
from requests_futures.sessions import FuturesSession
|
95 |
+
session = FuturesSession()
|
96 |
+
|
97 |
+
def run(cmd, timeout_sec,forever_cmd):
|
98 |
+
global Parent
|
99 |
+
if forever_cmd == 'true':
|
100 |
+
Parent.close()
|
101 |
+
Parent = pexpect.spawn("bash")
|
102 |
+
command="cd /app/code_interpreter/ && "+cmd
|
103 |
+
|
104 |
+
Parent.sendline(command)
|
105 |
+
Parent.readline().decode()
|
106 |
+
return str(Parent.readline().decode())
|
107 |
+
t=time.time()
|
108 |
+
child = pexpect.spawn("bash")
|
109 |
+
output=""
|
110 |
+
command="cd /app/code_interpreter/ && "+cmd
|
111 |
+
|
112 |
+
child.sendline('PROMPT_COMMAND="echo END"')
|
113 |
+
child.readline().decode()
|
114 |
+
child.readline().decode()
|
115 |
+
|
116 |
+
child.sendline(command)
|
117 |
+
|
118 |
+
while (not child.eof() ) and (time.time()-t<timeout_sec):
|
119 |
+
x=child.readline().decode()
|
120 |
+
output=output+x
|
121 |
+
print(x)
|
122 |
+
if "END" in x :
|
123 |
+
output=output.replace("END","")
|
124 |
+
child.close()
|
125 |
+
break
|
126 |
+
if "true" in forever_cmd:
|
127 |
+
break
|
128 |
+
return output
|
129 |
+
|
130 |
+
@mcp.tool()
|
131 |
+
def analyse_audio(audiopath,query) -> dict:
|
132 |
+
"""Ask another AI model about audios.The AI model can listen to the audio and give answers.Eg-query:Generate detailed minutes of meeting from the audio clip,audiopath='/app/code_interpreter/<audioname>'.Note:The audios are automatically present in the /app/code_interpreter directory."""
|
133 |
+
transfer_files2()
|
134 |
+
myfile = client.files.upload(file=audiopath)
|
135 |
+
|
136 |
+
response = client.models.generate_content(
|
137 |
+
model='gemini-2.0-flash',
|
138 |
+
contents=[query, myfile]
|
139 |
+
)
|
140 |
+
return {"Output":str(response.text)}
|
141 |
+
|
142 |
+
@mcp.tool()
|
143 |
+
def analyse_video(videopath,query) -> dict:
|
144 |
+
"""Ask another AI model about videos.The AI model can see the videos and give answers.Eg-query:Create a very detailed transcript and summary of the video,videopath='/app/code_interpreter/<videoname>'Note:The videos are automatically present in the /app/code_interpreter directory."""
|
145 |
+
transfer_files2()
|
146 |
+
video_file = client.files.upload(file=videopath)
|
147 |
+
|
148 |
+
while video_file.state.name == "PROCESSING":
|
149 |
+
print('.', end='')
|
150 |
+
time.sleep(1)
|
151 |
+
video_file = client.files.get(name=video_file.name)
|
152 |
+
|
153 |
+
if video_file.state.name == "FAILED":
|
154 |
+
raise ValueError(video_file.state.name)
|
155 |
+
|
156 |
+
response = client.models.generate_content(
|
157 |
+
model='gemini-2.0-flash',
|
158 |
+
contents=[query, video_file]
|
159 |
+
)
|
160 |
+
return {"Output":str(response.text)}
|
161 |
+
|
162 |
+
|
163 |
+
@mcp.tool()
|
164 |
+
def analyse_images(imagepath,query) -> dict:
|
165 |
+
"""Ask another AI model about images.The AI model can see the images and give answers.Eg-query:Who is the person in this image?,imagepath='/app/code_interpreter/<imagename>'.Note:The images are automatically present in the /app/code_interpreter directory."""
|
166 |
+
transfer_files2()
|
167 |
+
video_file = client.files.upload(file=imagepath)
|
168 |
+
|
169 |
+
|
170 |
+
response = client.models.generate_content(
|
171 |
+
model='gemini-2.0-flash',
|
172 |
+
contents=[query, video_file]
|
173 |
+
)
|
174 |
+
return {"Output":str(response.text)}
|
175 |
+
|
176 |
+
|
177 |
+
# @mcp.tool()
|
178 |
+
# def generate_images(imagepath,query) -> dict:
|
179 |
+
# """Ask another AI model to generate images based on the query and the image path.Set image path as an empty string , if you dont want to edit images , but rather generate images.Eg-query:Generate a cartoon version of this image,imagepath='/app/code_interpreter/<imagename>'.Note:The images are automatically present in the /app/code_interpreter directory."""
|
180 |
+
# transfer_files2()
|
181 |
+
# video_file = client.files.upload(file=imagepath)
|
182 |
+
|
183 |
+
|
184 |
+
# response = client.models.generate_content(
|
185 |
+
# model='gemini-2.0-flash',
|
186 |
+
# contents=[query, video_file]
|
187 |
+
# )
|
188 |
+
# return {"Output":str(response.text)}
|
189 |
+
|
190 |
+
@mcp.tool()
|
191 |
+
def create_code_files(filename: str, code) -> dict:
|
192 |
+
"""Create code files by passing the the filename as well the entire code to write.The file is created by default in the /app/code_interpreter directory.Note:All user uploaded files that you might need to work upon are stored in the /app/code_interpreter directory."""
|
193 |
+
global destination_dir
|
194 |
+
transfer_files()
|
195 |
+
transfer_files2()
|
196 |
+
if not os.path.exists(os.path.join(destination_dir, filename)):
|
197 |
+
|
198 |
+
if isinstance(code, dict):
|
199 |
+
with open(os.path.join(destination_dir, filename), 'w', encoding='utf-8') as f:
|
200 |
+
json.dump(code, f, ensure_ascii=False, indent=4)
|
201 |
+
else:
|
202 |
+
f = open(os.path.join(destination_dir, filename), "w")
|
203 |
+
f.write(str(code))
|
204 |
+
f.close()
|
205 |
+
return {"info":"The referenced code files were created successfully."}
|
206 |
+
|
207 |
+
else:
|
208 |
+
if isinstance(code, dict):
|
209 |
+
with open(os.path.join(destination_dir, filename), 'w', encoding='utf-8') as f:
|
210 |
+
json.dump(code, f, ensure_ascii=False, indent=4)
|
211 |
+
else:
|
212 |
+
f = open(os.path.join(destination_dir, filename), "w")
|
213 |
+
f.write(str(code))
|
214 |
+
f.close()
|
215 |
+
return {"info":"The referenced code files were created successfully."}
|
216 |
+
# return {"info":"The referenced code files already exist. Please rename the file or delete the existing one."}
|
217 |
+
|
218 |
+
|
219 |
+
@mcp.tool()
|
220 |
+
def run_code(language:str,packages:str,filename: str, code: str,start_cmd:str,forever_cmd:str) -> dict:
|
221 |
+
"""
|
222 |
+
Execute code in a controlled environment with package installation and file handling.
|
223 |
+
Args:
|
224 |
+
language:Programming language of the code (eg:"python", "nodejs", "bash","html",etc).
|
225 |
+
packages: Space-separated list of packages to install.(python packages are installed if language set to python and npm packages are installed if language set to nodejs).
|
226 |
+
Preinstalled python packages: gradio, XlsxWriter, openpyxl , mpxj , jpype1.
|
227 |
+
Preinstalled npm packages: express, ejs, chart.js.
|
228 |
+
filename:Name of the file to create (stored in /app/code_interpreter/).
|
229 |
+
code:Full code to write to the file.
|
230 |
+
start_cmd:Command to execute the file (e.g., "python /app/code_interpreter/app.py"
|
231 |
+
or "bash /app/code_interpreter/app.py").
|
232 |
+
Leave blank ('') if only file creation is needed / start_cmd not required.
|
233 |
+
forever_cmd:If 'true', the command will run indefinitely.Set to 'true', when runnig a website/server.Run all servers/website on port 1337. If 'false', the command will time out after 300 second and the result will be returned.
|
234 |
+
Notes:
|
235 |
+
- All user-uploaded files are in /app/code_interpreter/.
|
236 |
+
- After execution, embed a download link (or display images/gifs/videos directly in markdown format) in your response.
|
237 |
+
- bash/apk packages cannot be installed.
|
238 |
+
- When editing and subsequently re-executing the server with the forever_cmd='true' setting, the previous server instance will be automatically terminated, and the updated server will commence operation. This functionality negates the requirement for manual process termination commands such as pkill node.
|
239 |
+
- The opened ports can be externally accessed at https://suitable-liked-ibex.ngrok-free.app/ (ONLY if the website is running successfully)
|
240 |
+
- Do not use `plt.show()` in this headless environment. Save visualizations directly (e.g., `plt.savefig("happiness_img.png")` or export GIFs/videos).
|
241 |
+
"""
|
242 |
+
global destination_dir
|
243 |
+
package_names = packages.strip()
|
244 |
+
if "python" in language:
|
245 |
+
command="pip install --break-system-packages "
|
246 |
+
elif "node" in language:
|
247 |
+
command="npm install "
|
248 |
+
else:
|
249 |
+
command="ls"
|
250 |
+
if packages != "" and packages != " ":
|
251 |
+
package_logs=run(
|
252 |
+
f"{command} {package_names}", timeout_sec=300,forever_cmd= 'false'
|
253 |
+
)
|
254 |
+
if "ERROR" in package_logs:
|
255 |
+
return {"package_installation_log":package_logs,"info":"Package installation failed. Please check the package names. Tip:Try using another package/method to accomplish the task."}
|
256 |
+
transfer_files2()
|
257 |
+
transfer_files()
|
258 |
+
f = open(os.path.join(destination_dir, filename), "w")
|
259 |
+
f.write(code)
|
260 |
+
f.close()
|
261 |
+
global files_list
|
262 |
+
if start_cmd != "" and start_cmd != " ":
|
263 |
+
stdot=run(start_cmd, 120,forever_cmd)
|
264 |
+
else:
|
265 |
+
stdot="File created successfully."
|
266 |
+
onlyfiles = glob.glob("/app/code_interpreter/*")
|
267 |
+
onlyfiles=list(set(onlyfiles)-set(files_list))
|
268 |
+
uploaded_filenames=[]
|
269 |
+
for files in onlyfiles:
|
270 |
+
try:
|
271 |
+
uploaded_filename = upload_file(files, "https://opengpt-4ik5.onrender.com/upload")
|
272 |
+
uploaded_filenames.append(f"https://opengpt-4ik5.onrender.com/static/{uploaded_filename}")
|
273 |
+
except:
|
274 |
+
pass
|
275 |
+
files_list=onlyfiles
|
276 |
+
return {"output":stdot,"Files_download_link":uploaded_filenames}
|
277 |
+
|
278 |
+
|
279 |
+
@mcp.tool()
|
280 |
+
def run_code_files(start_cmd:str,forever_cmd:str) -> dict:
|
281 |
+
"""Executes a shell command to run code files from /app/code_interpreter.
|
282 |
+
Runs the given `start_cmd`. The execution behavior depends on `forever_cmd`.
|
283 |
+
Any server/website started should use port 1337.
|
284 |
+
Args:
|
285 |
+
start_cmd (str): The shell command to execute the code.
|
286 |
+
(e.g., ``python /app/code_interpreter/app.py`` or ``node /app/code_interpreter/server.js``).
|
287 |
+
Files must be in ``/app/code_interpreter``.
|
288 |
+
forever_cmd (str): Execution mode.
|
289 |
+
- ``'true'``: Runs indefinitely (for servers/websites).
|
290 |
+
- ``'false'``: Runs up to 300s, captures output.
|
291 |
+
Returns:
|
292 |
+
dict: A dictionary containing:
|
293 |
+
- ``'output'`` (str): Captured stdout (mainly when forever_cmd='false').
|
294 |
+
- ``'Files_download_link'`` (Any): Links/identifiers for downloadable files.
|
295 |
+
Notes:
|
296 |
+
- After execution, embed a download link (or display images/gifs/videos directly in markdown format) in your response.
|
297 |
+
- When editing and subsequently re-executing the server with the forever_cmd='true' setting, the previous server instance will be automatically terminated, and the updated server will commence operation. This functionality negates the requirement for manual process termination commands such as pkill node.
|
298 |
+
- The opened ports can be externally accessed at https://suitable-liked-ibex.ngrok-free.app/ (ONLY if the website is running successfully)
|
299 |
+
"""
|
300 |
+
global files_list
|
301 |
+
|
302 |
+
stdot=run(start_cmd, 300,forever_cmd)
|
303 |
+
onlyfiles = glob.glob("/app/code_interpreter/*")
|
304 |
+
onlyfiles=list(set(onlyfiles)-set(files_list))
|
305 |
+
uploaded_filenames=[]
|
306 |
+
for files in onlyfiles:
|
307 |
+
try:
|
308 |
+
uploaded_filename = upload_file(files, "https://opengpt-4ik5.onrender.com/upload")
|
309 |
+
uploaded_filenames.append(f"https://opengpt-4ik5.onrender.com/static/{uploaded_filename}")
|
310 |
+
except:
|
311 |
+
pass
|
312 |
+
files_list=onlyfiles
|
313 |
+
return {"output":stdot,"Files_download_link":uploaded_filenames}
|
314 |
+
|
315 |
+
|
316 |
+
@mcp.tool()
|
317 |
+
def run_shell_command(cmd:str,forever_cmd:str) -> dict:
|
318 |
+
"""Executes a shell command in a sandboxed Alpine Linux environment.
|
319 |
+
Runs the provided `cmd` string within a bash shell. Commands are executed
|
320 |
+
relative to the `/app/code_interpreter/` working directory by default.
|
321 |
+
The execution behavior (indefinite run vs. timeout) is controlled by
|
322 |
+
the `forever_cmd` parameter.
|
323 |
+
Important Environment Notes:
|
324 |
+
- The execution environment is **Alpine Linux**. Commands should be
|
325 |
+
compatible .
|
326 |
+
- `sudo` commands are restricted for security reasons.Hence commands which require elevated privelages like `apk add` CANNOT be executed.Instead try to use `pip install` or `npm install` commands.
|
327 |
+
- Standard bash features like `&&`, `||`, pipes (`|`), etc., are supported.
|
328 |
+
Args:
|
329 |
+
cmd (str): The shell command to execute.
|
330 |
+
Example: ``mkdir test_dir && ls -l``
|
331 |
+
forever_cmd (str): Determines the execution mode.
|
332 |
+
- ``'true'``: Runs the command indefinitely. Suitable
|
333 |
+
for starting servers or long-running processes.
|
334 |
+
Output capture might be limited.
|
335 |
+
- ``'false'``: Runs the command until completion or
|
336 |
+
a 300-second timeout, whichever comes first.
|
337 |
+
Captures standard output.
|
338 |
+
Returns:
|
339 |
+
dict: A dictionary containing the execution results:
|
340 |
+
- ``'output'`` (str): The captured standard output (stdout) and potentially
|
341 |
+
standard error (stderr) from the command.
|
342 |
+
"""
|
343 |
+
transfer_files()
|
344 |
+
transfer_files2()
|
345 |
+
output=run(cmd, 300,forever_cmd)
|
346 |
+
return {"output":output}
|
347 |
+
|
348 |
+
|
349 |
+
|
350 |
+
@mcp.tool()
|
351 |
+
def install_python_packages(python_packages:str) -> dict:
|
352 |
+
"""python_packages to install seperated by space.eg-(python packages:numpy matplotlib).The following python packages are preinstalled:gradio XlsxWriter openpyxl"""
|
353 |
+
global sbx
|
354 |
+
package_names = python_packages.strip()
|
355 |
+
command="pip install"
|
356 |
+
if not package_names:
|
357 |
+
return
|
358 |
+
|
359 |
+
stdot=run(
|
360 |
+
f"{command} --break-system-packages {package_names}", timeout_sec=300, forever_cmd= 'false'
|
361 |
+
)
|
362 |
+
|
363 |
+
return {"stdout":stdot,"info":"Ran package installation command"}
|
364 |
+
|
365 |
+
@mcp.tool()
|
366 |
+
def get_youtube_transcript(videoid:str) -> dict:
|
367 |
+
"""Get the transcript of a youtube video by passing the video id.Eg videoid=ZacjOVVgoLY"""
|
368 |
+
conn = http.client.HTTPSConnection("youtube-transcript3.p.rapidapi.com")
|
369 |
+
headers = {
|
370 |
+
'x-rapidapi-key': "2a155d4498mshd52b7d6b7a2ff86p10cdd0jsn6252e0f2f529",
|
371 |
+
'x-rapidapi-host': "youtube-transcript3.p.rapidapi.com"
|
372 |
+
}
|
373 |
+
conn.request("GET",f"/api/transcript?videoId={videoid}", headers=headers)
|
374 |
+
|
375 |
+
res = conn.getresponse()
|
376 |
+
data = res.read()
|
377 |
+
return json.loads(data)
|
378 |
+
|
379 |
+
@mcp.tool()
|
380 |
+
def read_excel_file(filename) -> dict:
|
381 |
+
"""Reads the contents of an excel file.Returns a dict with key :value pair = cell location:cell content.Always run this command first , when working with excels.The excel file is automatically present in the /app/code_interpreter directory. """
|
382 |
+
global destination_dir
|
383 |
+
transfer_files2()
|
384 |
+
transfer_files()
|
385 |
+
|
386 |
+
workbook = openpyxl.load_workbook(os.path.join(destination_dir, filename))
|
387 |
+
|
388 |
+
# Create an empty dictionary to store the data
|
389 |
+
excel_data_dict = {}
|
390 |
+
|
391 |
+
# Iterate over all sheets
|
392 |
+
for sheet_name in workbook.sheetnames:
|
393 |
+
sheet = workbook[sheet_name]
|
394 |
+
# Iterate over all rows and columns
|
395 |
+
for row in sheet.iter_rows():
|
396 |
+
for cell in row:
|
397 |
+
# Get cell coordinate (e.g., 'A1') and value
|
398 |
+
cell_coordinate = cell.coordinate
|
399 |
+
cell_value = cell.value
|
400 |
+
if cell_value is not None:
|
401 |
+
excel_data_dict[cell_coordinate] = str(cell_value)
|
402 |
+
return excel_data_dict
|
403 |
+
@mcp.tool()
|
404 |
+
def scrape_websites(url_list:list,query:str) -> list:
|
405 |
+
"""Scrapes specific website content.query is the question you want to ask about the content of the website.e.g-query:Give .pptx links in the website,Summarise the content in very great detail,etc.Maximum 4 urls can be passed at a time."""
|
406 |
+
|
407 |
+
conn = http.client.HTTPSConnection("scrapeninja.p.rapidapi.com")
|
408 |
+
|
409 |
+
|
410 |
+
headers = {
|
411 |
+
'x-rapidapi-key': "2a155d4498mshd52b7d6b7a2ff86p10cdd0jsn6252e0f2f529",
|
412 |
+
'x-rapidapi-host': "scrapeninja.p.rapidapi.com",
|
413 |
+
'Content-Type': "application/json"
|
414 |
+
}
|
415 |
+
Output=""
|
416 |
+
links=""
|
417 |
+
content=""
|
418 |
+
for urls in url_list:
|
419 |
+
payload = {"url" :urls}
|
420 |
+
payload=json.dumps(payload)
|
421 |
+
conn.request("POST", "/scrape", payload, headers)
|
422 |
+
res = conn.getresponse()
|
423 |
+
data = res.read()
|
424 |
+
content=content+str(data.decode("utf-8"))
|
425 |
+
|
426 |
+
#Only thing llama 4 is good for.
|
427 |
+
|
428 |
+
response = clienty.chat.completions.create(
|
429 |
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
430 |
+
messages=[
|
431 |
+
{"role": "user", "content": f"{query} [CONTENT]:{content}"}
|
432 |
+
],stream=True
|
433 |
+
)
|
434 |
+
for chunk in response:
|
435 |
+
Output = Output +str(chunk.choices[0].delta.content)
|
436 |
+
#--------------
|
437 |
+
response2 = clienty.chat.completions.create(
|
438 |
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
439 |
+
messages=[
|
440 |
+
{"role": "user", "content": f"Give all relevant and different types of links in this content.The links may be relevant image links , file links , video links , website links , etc .You must give Minimum 30 links and maximum 50 links.[CONTENT]:{content}"}
|
441 |
+
],stream=True
|
442 |
+
)
|
443 |
+
for chunk in response2:
|
444 |
+
links = links +str(chunk.choices[0].delta.content)
|
445 |
+
return {"website_content":Output,"relevant_links":links}
|
446 |
+
|
447 |
+
|
448 |
+
|
449 |
+
|
450 |
+
if __name__ == "__main__":
|
451 |
+
# Initialize and run the server
|
452 |
+
Ngrok=pexpect.spawn('bash')
|
453 |
+
Ngrok.sendline("ngrok http --url=suitable-liked-ibex.ngrok-free.app 1337 --config /home/node/.config/ngrok/ngrok.yml")
|
454 |
+
Ngrok.readline().decode()
|
455 |
+
mcp.run(transport='stdio')
|
tests/new.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from openai import OpenAI
|
2 |
+
client = OpenAI(base_url="http://192.168.1.7:1337/")
|
3 |
+
from rubbish import x
|
4 |
+
|
5 |
+
completion = client.chat.completions.create(
|
6 |
+
model="deepseek-reasoner",
|
7 |
+
messages=[
|
8 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
9 |
+
{"role": "user", "content": f"Hi!"},
|
10 |
+
],
|
11 |
+
stream=True
|
12 |
+
)
|
13 |
+
for chunk in completion:
|
14 |
+
# print(chunk)
|
15 |
+
print(chunk.choices[0].delta.content,end="")
|
16 |
+
# print("****************")
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
tests/prompts.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
After successfully completing a tool Call, run the next tool Call.
|
2 |
+
Eg - after the create code file, you must run the run code file tool Call.
|
3 |
+
Note:Do NOT run the same tool Call consecutively. I am telling you this because you often make mistakes of running the same tool Call with the same code again and again. Maybe you are forgetting that you have run the tool Call previously. Please don't forget this.
|
4 |
+
Important note:Always respond in natural language after a particular tool Call has been successfully completed. Start initialising the next required tool Call.
|
5 |
+
|
6 |
+
Your purpose:
|
7 |
+
You have been especially designed to work with excel files. You have all the tools available to you to work with the excel files.
|
8 |
+
You must primarily use python and openpyxl to work with the excel files. Always save the files with a different name each time , this is important.Do not save the files with the same name that was given to you.
|
9 |
+
If you have any confusions if you are to use this cell or that cell for fetching the data for a formula, always clarify with the user first.
|
10 |
+
Always read the excel files before you start writing the code files.
|
11 |
+
|
12 |
+
Your main job is to work with Excel files using Python and the openpyxl library. You have all the tools needed to handle these files. Always save changes with a new filename—never overwrite the original file. Before writing any code, carefully read the Excel file to understand its structure. If you're unsure about which cells to use for formulas or data, ask the user for clarification first. Keep your approach simple and clear.
|
13 |
+
Important Info:Always run the next tool after completing one (e.g., create code → run code). Never repeat the same tool.Respond to user after completion giving file links.
|
14 |
+
|
15 |
+
Your main job is to work with Excel files using Python and the openpyxl library. You have all the tools needed to handle these files. Always save changes with a new filename—never overwrite the original file. Before writing any code, carefully read the Excel file to understand its structure. If you're unsure about which cells to use for formulas or data, ask the user for clarification first. Keep your approach simple and clear.Always try to insert excel formulas to accomplish the tasks rather than using python code to calculate the tasks. Eg : use the excel SUM formula to calculate SUM.
|
16 |
+
Important Info:Always run the next tool after completing one (e.g., create code → run code). Never run the same tool with the same content consecutively.Respond to user after all task completion giving file links.
|
tests/rubbish.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tests/tests.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# from litellm import completion
|
2 |
+
# from openai import OpenAI
|
3 |
+
|
4 |
+
# client = OpenAI(
|
5 |
+
# base_url="http://192.168.1.7:1337",
|
6 |
+
|
7 |
+
# api_key="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",
|
8 |
+
|
9 |
+
# )
|
10 |
+
|
11 |
+
# completion = client.chat.completions.create(
|
12 |
+
|
13 |
+
# model="google/gemini-2.0-flash-001",
|
14 |
+
|
15 |
+
# messages= [
|
16 |
+
|
17 |
+
# { "role": "user", "content": "What's the weather like in London?" }
|
18 |
+
|
19 |
+
# ],
|
20 |
+
|
21 |
+
|
22 |
+
# )
|
23 |
+
# print(completion.choices[0].message)
|
24 |
+
|
25 |
+
from g4f.client import Client
|
26 |
+
|
27 |
+
client = Client()
|
28 |
+
response = client.images.generate(
|
29 |
+
model="flux",
|
30 |
+
prompt="a white siamese cat",
|
31 |
+
response_format="url"
|
32 |
+
)
|
33 |
+
|
34 |
+
print(f"Generated image URL: {response.data[0].url}")
|
tests/usefull_funcs.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tiktoken
|
2 |
+
import re
|
3 |
+
|
4 |
+
def split_text_into_chunks(text, max_tokens=100000, encoding_name="cl100k_base"):
|
5 |
+
enc = tiktoken.get_encoding(encoding_name)
|
6 |
+
tokens = enc.encode(text)
|
7 |
+
chunks = []
|
8 |
+
for i in range(0, len(tokens), max_tokens):
|
9 |
+
chunk = tokens[i:i+max_tokens]
|
10 |
+
chunks.append(enc.decode(chunk))
|
11 |
+
return chunks
|
12 |
+
|
13 |
+
|
14 |
+
# print(completion.choices[0].message.content)
|
15 |
+
|
16 |
+
def extract_https_links(text):
|
17 |
+
# Finds all HTTPS URLs starting with "https://" followed by non-whitespace characters
|
18 |
+
return re.findall(r'\b((?:https?://)?(?:(?:www\.)?(?:[\da-z\.-]+)\.(?:[a-z]{2,6})|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(?::[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(?::[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(?::[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(?::[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:(?:(?::[0-9a-fA-F]{1,4}){1,6})|:(?:(?::[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(?::[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(?:ffff(?::0{1,4}){0,1}:){0,1}(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])|(?:[0-9a-fA-F]{1,4}:){1,4}:(?:(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(?:25[0-5]|(?:2[0-4]|1{0,1}[0-9]){0,1}[0-9])))(?::[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])?(?:/[\w\.-]*)*/?)\b', text)
|
19 |
+
|
utils/__init__.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tiktoken
|
2 |
+
|
3 |
+
|
4 |
+
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301"):
|
5 |
+
"""Returns the number of tokens used by a list of messages."""
|
6 |
+
try:
|
7 |
+
encoding = tiktoken.encoding_for_model(model)
|
8 |
+
except KeyError:
|
9 |
+
print("Warning: model not found. Using cl100k_base encoding.")
|
10 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
11 |
+
if model == "gpt-3.5-turbo":
|
12 |
+
print(
|
13 |
+
"Warning: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301."
|
14 |
+
)
|
15 |
+
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
|
16 |
+
elif model == "gpt-4":
|
17 |
+
print(
|
18 |
+
"Warning: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314."
|
19 |
+
)
|
20 |
+
return num_tokens_from_messages(messages, model="gpt-4-0314")
|
21 |
+
elif model == "gpt-3.5-turbo-0301":
|
22 |
+
tokens_per_message = (
|
23 |
+
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
24 |
+
)
|
25 |
+
tokens_per_name = -1 # if there's a name, the role is omitted
|
26 |
+
elif model == "gpt-4":
|
27 |
+
tokens_per_message = 3
|
28 |
+
tokens_per_name = 1
|
29 |
+
else:
|
30 |
+
raise NotImplementedError(
|
31 |
+
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
|
32 |
+
)
|
33 |
+
num_tokens = 0
|
34 |
+
for message in messages:
|
35 |
+
num_tokens += tokens_per_message
|
36 |
+
for key, value in message.items():
|
37 |
+
num_tokens += len(encoding.encode(value))
|
38 |
+
if key == "name":
|
39 |
+
num_tokens += tokens_per_name
|
40 |
+
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
41 |
+
return num_tokens
|
42 |
+
|
43 |
+
def num_tokens_from_string(string: str ) -> int:
|
44 |
+
"""Returns the number of tokens in a text string."""
|
45 |
+
encoding =tiktoken.encoding_for_model("gpt-4-0314")
|
46 |
+
num_tokens = len(encoding.encode(string))
|
47 |
+
return num_tokens
|
utils/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (2.62 kB). View file
|
|
utils/__pycache__/cyclic_buffer.cpython-311.pyc
ADDED
Binary file (2.08 kB). View file
|
|
utils/__pycache__/functions.cpython-311.pyc
ADDED
Binary file (2.01 kB). View file
|
|
utils/__pycache__/llms.cpython-311.pyc
ADDED
Binary file (10.2 kB). View file
|
|
utils/functions.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from function_support import _function
|
3 |
+
|
4 |
+
def extract_links(text):
|
5 |
+
url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
|
6 |
+
|
7 |
+
urls = re.findall(url_pattern, text)
|
8 |
+
return urls
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
def allocate(messages,model,functs):
|
13 |
+
if "gemini" not in model:
|
14 |
+
for msg in messages:
|
15 |
+
if isinstance(msg["content"],list):
|
16 |
+
msg["content"]=msg["content"][0]["text"]
|
17 |
+
|
18 |
+
|
19 |
+
for msg in messages:
|
20 |
+
if "tool" in msg["role"]:
|
21 |
+
msg["role"]="user"
|
22 |
+
msg["content"]=f"Tool {msg['name']} returned response: {msg['content']}. Now you must output the next tool Call or respond to user in natural language after the task has been completed. "
|
23 |
+
del msg['name']
|
24 |
+
del msg["tool_call_id"]
|
25 |
+
if "tool_calls" in msg:
|
26 |
+
add=""
|
27 |
+
for tools in msg["tool_calls"]:
|
28 |
+
add=f"""
|
29 |
+
```json
|
30 |
+
[
|
31 |
+
{{
|
32 |
+
"tool":"{tools["function"]["name"]}",
|
33 |
+
"tool_input":{tools["function"]["arguments"]}
|
34 |
+
}}
|
35 |
+
]
|
36 |
+
```"""
|
37 |
+
|
38 |
+
msg["content"]=add
|
39 |
+
del msg["tool_calls"]
|
40 |
+
|
41 |
+
|
42 |
+
if functs !=None:
|
43 |
+
function_call=_function(tools=functs)
|
44 |
+
messages.insert(1,{"role": "system", "content": function_call})
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
utils/llms.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from g4f.client import Client
|
3 |
+
from litellm import completion
|
4 |
+
import random
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
|
8 |
+
from g4f.Provider import DeepInfraChat,Glider,LambdaChat,TypeGPT
|
9 |
+
|
10 |
+
gemini_api_keys=json.loads(os.environ.get("GEMINI_KEY_LIST"))
|
11 |
+
groq_api_keys=json.loads(os.environ.get("GROQ_API_KEYS"))
|
12 |
+
chutes_key=os.environ.get("CHUTES_API_KEY")
|
13 |
+
github_key=os.environ.get("GITHUB_API_KEY")
|
14 |
+
|
15 |
+
DeepInfraChat.models = ["google/gemma-3-27b-it","deepseek-ai/DeepSeek-R1-Turbo","Qwen/QwQ-32B","deepseek-ai/DeepSeek-R1","deepseek-ai/DeepSeek-V3-0324","meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","meta-llama/Llama-4-Scout-17B-16E-Instruct","microsoft/Phi-4-multimodal-instruct"]
|
16 |
+
|
17 |
+
deepinframodels=["meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8","microsoft/Phi-4-multimodal-instruct","google/gemma-3-27b-it","meta-llama/Llama-4-Scout-17B-16E-Instruct"]
|
18 |
+
chutes_models={"MAI-DS-R1-FP8":"microsoft/MAI-DS-R1-FP8","DeepSeek-V3-0324":"deepseek-ai/DeepSeek-V3-0324","deepseek-reasoner":"deepseek-ai/DeepSeek-R1","GLM-4-32B-0414":"THUDM/GLM-4-32B-0414","GLM-Z1-32B-0414":"THUDM/GLM-Z1-32B-0414"}
|
19 |
+
github_models={"gpt4.1":"gpt-4.1","gpt-4o":"gpt-4o","o4-mini":"o4-mini"}
|
20 |
+
|
21 |
+
REASONING_CORRESPONDANCE = {"DeepSeek-R1-Glider":Glider, "DeepSeekR1-LAMBDA":LambdaChat,"DeepSeekR1":DeepInfraChat,"deepseek-slow":TypeGPT}
|
22 |
+
os.environ["GEMINI_API_KEY"] =random.choice(gemini_api_keys)
|
23 |
+
|
24 |
+
REASONING_QWQ = {"qwq-32b":DeepInfraChat}
|
25 |
+
|
26 |
+
|
27 |
+
CHAT_CORRESPONDANCE = {"DeepSeek-V3":DeepInfraChat}
|
28 |
+
|
29 |
+
client = Client()
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
def chat(messages,response_format,model="gpt-4"):
|
34 |
+
if len(messages) ==1:
|
35 |
+
messages[0]["role"]="user"
|
36 |
+
response = completion(
|
37 |
+
model="gemini/gemini-2.0-flash",
|
38 |
+
messages=messages,
|
39 |
+
response_format=response_format
|
40 |
+
)
|
41 |
+
return str(response.choices[0].message.content)
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
def chatstream(messages,model,api_keys):
|
46 |
+
print(f"-------{model}--------")
|
47 |
+
global llmfree
|
48 |
+
global llmdeepseek
|
49 |
+
global llmgroq
|
50 |
+
|
51 |
+
cunk=""
|
52 |
+
|
53 |
+
if model in deepinframodels:
|
54 |
+
try:
|
55 |
+
response = client.chat.completions.create(
|
56 |
+
provider=DeepInfraChat,
|
57 |
+
model=model,
|
58 |
+
messages=messages,
|
59 |
+
stream=True
|
60 |
+
|
61 |
+
)
|
62 |
+
for part in response:
|
63 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
64 |
+
if "```json" not in cunk or "```" not in cunk:
|
65 |
+
yield (part.choices[0].delta.content or "")
|
66 |
+
except Exception as e:
|
67 |
+
pass
|
68 |
+
yield ("RESULT: "+cunk)
|
69 |
+
|
70 |
+
elif model == "DeepSeekR1-togetherAI":
|
71 |
+
response = completion(model="together_ai/deepseek-ai/DeepSeek-R1", messages=messages, stream=True)
|
72 |
+
|
73 |
+
cunk=""
|
74 |
+
for part in response:
|
75 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
76 |
+
if "```json" not in cunk:
|
77 |
+
yield(part.choices[0].delta.content or "")
|
78 |
+
|
79 |
+
yield("RESULT: "+cunk)
|
80 |
+
|
81 |
+
elif model == "DeepSeekV3-togetherAI":
|
82 |
+
response = completion(model="together_ai/deepseek-ai/DeepSeek-V3", messages=messages, stream=True)
|
83 |
+
|
84 |
+
cunk=""
|
85 |
+
for part in response:
|
86 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
87 |
+
|
88 |
+
if "```json" not in cunk:
|
89 |
+
yield(part.choices[0].delta.content or "")
|
90 |
+
yield("RESULT: "+cunk)
|
91 |
+
|
92 |
+
elif model=="groq/deepseek-r1-distill-llama-70b":
|
93 |
+
os.environ["GROQ_API_KEY"] =random.choice(groq_api_keys)
|
94 |
+
|
95 |
+
response = completion(model="groq/deepseek-r1-distill-llama-70b", messages=messages, stream=True)
|
96 |
+
|
97 |
+
cunk=""
|
98 |
+
for part in response:
|
99 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
100 |
+
if "```json" not in cunk:
|
101 |
+
yield(part.choices[0].delta.content or "")
|
102 |
+
yield("RESULT: "+cunk)
|
103 |
+
elif model=="groq/qwq-32b":
|
104 |
+
os.environ["GROQ_API_KEY"] =random.choice(groq_api_keys)
|
105 |
+
response = completion(model="groq/qwen-qwq-32b", messages=messages, stream=True)
|
106 |
+
|
107 |
+
cunk=""
|
108 |
+
for part in response:
|
109 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
110 |
+
if "```json" not in cunk:
|
111 |
+
yield(part.choices[0].delta.content or "")
|
112 |
+
|
113 |
+
yield("RESULT: "+cunk)
|
114 |
+
|
115 |
+
elif model=="llama-3.3-70b-versatile":
|
116 |
+
response = completion(model="groq/llama-3.3-70b-versatile", messages=messages, stream=True)
|
117 |
+
|
118 |
+
cunk=""
|
119 |
+
for part in response:
|
120 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
121 |
+
|
122 |
+
if "```json" not in cunk:
|
123 |
+
yield(part.choices[0].delta.content or "")
|
124 |
+
|
125 |
+
yield("RESULT: "+cunk)
|
126 |
+
elif model in chutes_models:
|
127 |
+
response = completion(model=f"openai/{chutes_models[model]}",api_key=chutes_key,base_url="https://llm.chutes.ai/v1", messages=messages, stream=True)
|
128 |
+
if model == "MAI-DS-R1-FP8" or model == "GLM-Z1-32B-0414":
|
129 |
+
|
130 |
+
yield("<think> \n")
|
131 |
+
|
132 |
+
cunk=""
|
133 |
+
for part in response:
|
134 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
135 |
+
x=str(part.choices[0].delta.content)
|
136 |
+
# print(part.choices[0])
|
137 |
+
# x=x.replace(">","> \n \n s ")
|
138 |
+
# # if "</think>" in str(part.choices[0].delta.content):
|
139 |
+
# # yield(part.choices[0].delta.content.replace("</think>","") or "")
|
140 |
+
# # print(part.choices[0].delta.content.replace("</think>","</think> \n \n ") or "", end="")
|
141 |
+
if "```json" not in cunk:
|
142 |
+
if "None" not in x:
|
143 |
+
yield(x or "")
|
144 |
+
print(x,end="")
|
145 |
+
yield("RESULT: "+cunk)
|
146 |
+
|
147 |
+
elif model in github_models:
|
148 |
+
response = completion(model=f"github/{github_models[model]}",api_key=github_key, messages=messages, stream=True)
|
149 |
+
|
150 |
+
cunk=""
|
151 |
+
for part in response:
|
152 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
153 |
+
|
154 |
+
if "```json" not in cunk:
|
155 |
+
yield(part.choices[0].delta.content or "")
|
156 |
+
yield("RESULT: "+cunk)
|
157 |
+
|
158 |
+
elif "gemini" in model:
|
159 |
+
for key in gemini_api_keys:
|
160 |
+
try:
|
161 |
+
os.environ["GEMINI_API_KEY"] =key
|
162 |
+
|
163 |
+
response = completion(model=f"gemini/{model}", messages=messages, stream=True)
|
164 |
+
|
165 |
+
cunk=""
|
166 |
+
for part in response:
|
167 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
168 |
+
print(part.choices[0].delta.content or "", end="")
|
169 |
+
if "```json" not in cunk:
|
170 |
+
yield(part.choices[0].delta.content or "")
|
171 |
+
|
172 |
+
break
|
173 |
+
except:
|
174 |
+
pass
|
175 |
+
print("STOPPING")
|
176 |
+
yield("RESULT: "+cunk)
|
177 |
+
|
178 |
+
elif model=="deepseek.r1" or model=="deepseek-chat":
|
179 |
+
|
180 |
+
cunk=""
|
181 |
+
|
182 |
+
if "chat" in model:
|
183 |
+
providers = CHAT_CORRESPONDANCE
|
184 |
+
model_name="deepseek-ai/DeepSeek-V3-0324"
|
185 |
+
else:
|
186 |
+
providers = REASONING_CORRESPONDANCE
|
187 |
+
model_name="deepseek-r1"
|
188 |
+
|
189 |
+
for provider in providers:
|
190 |
+
try:
|
191 |
+
response = client.chat.completions.create(
|
192 |
+
provider=providers[provider],
|
193 |
+
model=model_name,
|
194 |
+
messages=messages,
|
195 |
+
stream=True
|
196 |
+
|
197 |
+
# Add any other necessary parameters
|
198 |
+
)
|
199 |
+
for part in response:
|
200 |
+
# print(part)
|
201 |
+
cunk=cunk+(str(part.choices[0].delta.content) or "")
|
202 |
+
|
203 |
+
if ("```json" not in cunk or "```" not in cunk) and (str(part.choices[0].delta.content) != "None"):
|
204 |
+
yield(str(part.choices[0].delta.content) or "")
|
205 |
+
break
|
206 |
+
except Exception as e:
|
207 |
+
#yield(str(e))
|
208 |
+
print(e)
|
209 |
+
pass
|
210 |
+
print("STOPPING")
|
211 |
+
yield("RESULT: "+cunk)
|
212 |
+
|
213 |
+
|
214 |
+
elif model=="qwq-32b" :
|
215 |
+
yield("<think>")
|
216 |
+
cunk=""
|
217 |
+
providers=REASONING_QWQ
|
218 |
+
for provider in providers:
|
219 |
+
try:
|
220 |
+
response = client.chat.completions.create(
|
221 |
+
provider=providers[provider],
|
222 |
+
model="Qwen/QwQ-32B",
|
223 |
+
messages=messages,
|
224 |
+
stream=True
|
225 |
+
|
226 |
+
# Add any other necessary parameters
|
227 |
+
)
|
228 |
+
for part in response:
|
229 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
230 |
+
if "```json" not in cunk or "```" not in cunk:
|
231 |
+
yield(part.choices[0].delta.content or "")
|
232 |
+
|
233 |
+
break
|
234 |
+
except Exception as e:
|
235 |
+
|
236 |
+
pass
|
237 |
+
yield("RESULT: "+cunk)
|
238 |
+
|
239 |
+
|
240 |
+
elif "DeepSeek" in model and "dev" in model:
|
241 |
+
cunk=""
|
242 |
+
|
243 |
+
if "V3" in model:
|
244 |
+
providers = CHAT_CORRESPONDANCE
|
245 |
+
else:
|
246 |
+
providers = REASONING_CORRESPONDANCE
|
247 |
+
for provider in providers:
|
248 |
+
try:
|
249 |
+
response = client.chat.completions.create(
|
250 |
+
provider=providers[provider],
|
251 |
+
model="deepseek-r1",
|
252 |
+
messages=messages,
|
253 |
+
stream=True
|
254 |
+
|
255 |
+
# Add any other necessary parameters
|
256 |
+
)
|
257 |
+
for part in response:
|
258 |
+
cunk=cunk+(part.choices[0].delta.content or "")
|
259 |
+
break
|
260 |
+
except Exception as e:
|
261 |
+
pass
|
262 |
+
print("STOPPING")
|
263 |
+
yield("RESULT: "+cunk)
|