Spaces:
Running
Running
Update PCAgent/api.py
Browse files- PCAgent/api.py +33 -16
PCAgent/api.py
CHANGED
@@ -35,6 +35,23 @@ def inference_chat(chat, model, api_url, token):
|
|
35 |
for role, content in chat:
|
36 |
messages.append({"role": role, "content": content})
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
# client = OpenAI(
|
39 |
# # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
|
40 |
# api_key=token,
|
@@ -59,23 +76,23 @@ def inference_chat(chat, model, api_url, token):
|
|
59 |
# else:
|
60 |
# break
|
61 |
|
62 |
-
dashscope.base_http_api_url = api_url
|
63 |
-
num_try = 5
|
64 |
-
for _ in range(num_try):
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
|
78 |
-
return response
|
79 |
# return json.loads(completion.model_dump_json())['choices'][0]['message']['content']
|
80 |
|
81 |
# headers = {
|
|
|
35 |
for role, content in chat:
|
36 |
messages.append({"role": role, "content": content})
|
37 |
|
38 |
+
payload = json.dumps({
|
39 |
+
"model": "gpt-4o",
|
40 |
+
"messages": messages
|
41 |
+
})
|
42 |
+
|
43 |
+
headers = {
|
44 |
+
'Accept': 'application/json',
|
45 |
+
'Authorization': f'Bearer {token}',
|
46 |
+
'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
|
47 |
+
'Content-Type': 'application/json'
|
48 |
+
}
|
49 |
+
|
50 |
+
response = requests.request("POST", api_url, headers=headers, data=payload)
|
51 |
+
data = response.json()
|
52 |
+
print(data)
|
53 |
+
return data
|
54 |
+
|
55 |
# client = OpenAI(
|
56 |
# # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
|
57 |
# api_key=token,
|
|
|
76 |
# else:
|
77 |
# break
|
78 |
|
79 |
+
# dashscope.base_http_api_url = api_url
|
80 |
+
# num_try = 5
|
81 |
+
# for _ in range(num_try):
|
82 |
+
# try:
|
83 |
+
# response = MultiModalConversation.call(api_key=token, model=model, messages=messages)
|
84 |
+
# response = response['output']['choices'][0]['message']['content'][0]["text"]
|
85 |
+
# except Exception as e:
|
86 |
+
# print(f"Network Error: {e}")
|
87 |
+
# try:
|
88 |
+
# print(response)
|
89 |
+
# except:
|
90 |
+
# print("Request Failed")
|
91 |
+
# time.sleep(2)
|
92 |
+
# else:
|
93 |
+
# break
|
94 |
|
95 |
+
# return response
|
96 |
# return json.loads(completion.model_dump_json())['choices'][0]['message']['content']
|
97 |
|
98 |
# headers = {
|