Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -11,45 +11,11 @@ from openai import OpenAI, AsyncOpenAI
|
|
11 |
|
12 |
clients = {}
|
13 |
token = os.getenv('API_KEY')
|
14 |
-
#try:
|
15 |
-
#clients['32B-Pro (beta)'] = [
|
16 |
-
# OpenAI(api_key=token, base_url=os.getenv('RUADAPT_PRO_PATH')),
|
17 |
-
# requests.get(os.getenv('RUADAPT_PRO_PATH') + '/models', headers={"Authorization": f"Bearer {token}"}).json()['data'][0]['id']
|
18 |
-
#]
|
19 |
|
20 |
clients['32B-QWQ'] = [
|
21 |
-
OpenAI(api_key=token, base_url=os.getenv('
|
22 |
-
|
23 |
]
|
24 |
-
#clients['32B-FUSE-O1 (preview)'] = [
|
25 |
-
# OpenAI(api_key=token, base_url=os.getenv('RUADAPT_FUSE_PATH')),
|
26 |
-
# requests.get(os.getenv('RUADAPT_FUSE_PATH') + '/models', headers={"Authorization": f"Bearer {token}"}).json()['data'][0]['id']
|
27 |
-
#]
|
28 |
-
|
29 |
-
#clients['7B-Lite (beta)'] = [
|
30 |
-
# OpenAI(api_key=token, base_url=os.getenv('RUADAPT_LITE_PATH')),
|
31 |
-
# requests.get(os.getenv('RUADAPT_LITE_PATH') + '/models', headers={"Authorization": f"Bearer {token}"}).json()['data'][0]['id']
|
32 |
-
#]
|
33 |
-
#except:
|
34 |
-
# pass
|
35 |
-
|
36 |
-
try:
|
37 |
-
clients['32B QWQ (experimental, without any additional tuning after LEP!)'] = [
|
38 |
-
OpenAI(api_key=token,
|
39 |
-
base_url=os.getenv('MODEL_NAME_OR_PATH_QWQ')),
|
40 |
-
requests.get(os.getenv('MODEL_NAME_OR_PATH_QWQ') + '/models', headers={"Authorization": f"Bearer {token}"}).json()['data'][0]['id']]
|
41 |
-
except:
|
42 |
-
pass
|
43 |
-
|
44 |
-
try:
|
45 |
-
clients['7B (work in progress)'] = [OpenAI(api_key=os.getenv('API_KEY'), base_url=os.getenv('MODEL_NAME_OR_PATH_7B')), requests.get(os.getenv('MODEL_NAME_OR_PATH_7B') + '/models').json()['data'][0]['id']]
|
46 |
-
except:
|
47 |
-
pass
|
48 |
-
|
49 |
-
try:
|
50 |
-
clients['3B'] = [OpenAI(api_key=os.getenv('API_KEY'), base_url=os.getenv('MODEL_NAME_OR_PATH_3B')), requests.get(os.getenv('MODEL_NAME_OR_PATH_3B') + '/models').json()['data'][0]['id']]
|
51 |
-
except:
|
52 |
-
pass
|
53 |
|
54 |
def respond(
|
55 |
message,
|
@@ -107,8 +73,8 @@ def respond(
|
|
107 |
"""
|
108 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
109 |
"""
|
110 |
-
options = [
|
111 |
-
options = options[1
|
112 |
system_old = "You are a helpful and harmless assistant. You should think step-by-step. First, reason (the user does not see your reasoning), then give your final answer."
|
113 |
system_new = "Ты Руадапт - полезный и дружелюбный интеллектуальный ассистент для помощи пользователям в их вопросах."
|
114 |
system_new2 = "Ты — Руадапт, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им."
|
|
|
11 |
|
12 |
clients = {}
|
13 |
token = os.getenv('API_KEY')
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
clients['32B-QWQ'] = [
|
16 |
+
OpenAI(api_key=token, base_url=os.getenv('RUADAPT_UNIVERSAL_URL')),
|
17 |
+
'RefalMachine/RuadaptQwen2.5-32B-QWQ-Beta'
|
18 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def respond(
|
21 |
message,
|
|
|
73 |
"""
|
74 |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
75 |
"""
|
76 |
+
options = ['32B-QWQ']
|
77 |
+
options = options[:1]
|
78 |
system_old = "You are a helpful and harmless assistant. You should think step-by-step. First, reason (the user does not see your reasoning), then give your final answer."
|
79 |
system_new = "Ты Руадапт - полезный и дружелюбный интеллектуальный ассистент для помощи пользователям в их вопросах."
|
80 |
system_new2 = "Ты — Руадапт, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им."
|