Upload 9 files
Browse files- __pycache__/app.cpython-312.pyc +0 -0
- app.py +68 -3
- models/.DS_Store +0 -0
- models/together/__pycache__/main.cpython-312.pyc +0 -0
- models/together/main.py +79 -0
- models/vercel/__pycache__/main.cpython-312.pyc +0 -0
- models/vercel/main.py +210 -0
- requirements.txt +8 -1
- test.py +35 -0
__pycache__/app.cpython-312.pyc
ADDED
Binary file (2.96 kB). View file
|
|
app.py
CHANGED
@@ -1,7 +1,72 @@
|
|
1 |
-
from fastapi import FastAPI
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
app = FastAPI()
|
4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
@app.get("/")
|
6 |
-
def
|
7 |
-
return {"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request
|
2 |
+
from fastapi.responses import StreamingResponse
|
3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
4 |
+
from models.together.main import TogetherAPI
|
5 |
+
from models.vercel.main import XaiAPI, GroqAPI
|
6 |
+
|
7 |
|
8 |
app = FastAPI()
|
9 |
|
10 |
+
app.add_middleware(
|
11 |
+
CORSMiddleware,
|
12 |
+
allow_origins=["*"], # Allows all origins
|
13 |
+
allow_credentials=True,
|
14 |
+
allow_methods=["*"], # Allows all methods
|
15 |
+
allow_headers=["*"], # Allows all headers
|
16 |
+
)
|
17 |
+
|
18 |
@app.get("/")
|
19 |
+
async def root():
|
20 |
+
return {"message": "Server Running Successfully"}
|
21 |
+
|
22 |
+
@app.post("/api/v1/generate")
|
23 |
+
async def generate(request: Request):
|
24 |
+
data = await request.json()
|
25 |
+
messages = data['messages']
|
26 |
+
model = data['model']
|
27 |
+
|
28 |
+
if not messages or not model:
|
29 |
+
return {"error": "Invalid request. 'messages' and 'model' are required."}
|
30 |
+
|
31 |
+
try:
|
32 |
+
query = {
|
33 |
+
'model': model,
|
34 |
+
'max_tokens': None,
|
35 |
+
'temperature': 0.7,
|
36 |
+
'top_p': 0.7,
|
37 |
+
'top_k': 50,
|
38 |
+
'repetition_penalty': 1,
|
39 |
+
'stream_tokens': True,
|
40 |
+
'stop': ['<|eot_id|>', '<|eom_id|>'],
|
41 |
+
'messages': messages,
|
42 |
+
'stream': True,
|
43 |
+
}
|
44 |
+
|
45 |
+
together_models = TogetherAPI().get_model_list()
|
46 |
+
xai_models = XaiAPI().get_model_list()
|
47 |
+
groq_models = GroqAPI().get_model_list()
|
48 |
+
|
49 |
+
if model in together_models:
|
50 |
+
streamModel = TogetherAPI()
|
51 |
+
elif model in xai_models:
|
52 |
+
streamModel = XaiAPI()
|
53 |
+
elif model in groq_models:
|
54 |
+
streamModel = GroqAPI()
|
55 |
+
else:
|
56 |
+
return {"error": f"Model '{model}' is not supported."}
|
57 |
+
|
58 |
+
response = streamModel.generate(query)
|
59 |
+
|
60 |
+
return StreamingResponse(response, media_type="text/event-stream")
|
61 |
+
|
62 |
+
except Exception as e:
|
63 |
+
return {"error": f"An error occurred: {str(e)}"}
|
64 |
+
|
65 |
+
@app.get("/api/v1/models")
|
66 |
+
async def get_models():
|
67 |
+
try:
|
68 |
+
streamModel = TogetherAPI()
|
69 |
+
models = streamModel.get_model_list()
|
70 |
+
return {"models": models}
|
71 |
+
except Exception as e:
|
72 |
+
return {"error": f"An error occurred: {str(e)}"}
|
models/.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
models/together/__pycache__/main.cpython-312.pyc
ADDED
Binary file (5.8 kB). View file
|
|
models/together/main.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import httpx
|
2 |
+
import asyncio
|
3 |
+
|
4 |
+
class TogetherAPI:
|
5 |
+
|
6 |
+
cookies = {
|
7 |
+
'intercom-id-evnv2y8k': 'fea4d452-f9be-42e0-93e3-1e47a3836362',
|
8 |
+
'intercom-device-id-evnv2y8k': '2bb3e469-0159-4b6b-a33e-1aea4b51ccb1',
|
9 |
+
'__stripe_mid': 'e0f7c1ba-56c6-44d4-ba1d-cf4611453eb43cf922',
|
10 |
+
'state-csrf': '6f2o8nqgee2dfqdmhaxipe',
|
11 |
+
'together_auth_cookie': '%7B%22expires%22%3A%222026-04-09T15%3A14%3A08.985Z%22%2C%22session%22%3A%220eae08c6fd1b79a22476a317d440a2104d74cd3ba333e40771b5ce50a90784297eb82eff36263debca2ee0658abe3e43cab97f87794421111d4bdec56b43dd2595ee22a165c123ba3d0f807759555b5f6d3f51b7c248e7cefcdf0f0b897f62b25b2a569e2cb89633032f15dca9818f39ed49f3ac2d7e0bc3d24517c62c78b1e4%22%7D',
|
12 |
+
'__stripe_sid': '979e00a2-06ed-45be-9a95-88d7e7580f625ccce4',
|
13 |
+
'intercom-session-evnv2y8k': 'TzZzSzBNRG8xdHJtTVprMm1zUXFob0M2ekhFV3VmeDZFcW5UVldlYmFYc3RsRjFmdWJidjU1ZXVSZzNOSW9QTE82OUx6anlvMWVncmlTd2ZvOERDUXN4OUdoSEM5ZzRnQmh4d2o5S3JKeDA9LS00S3JOclNpNzU0VkVBaTNRNWhSMm93PT0=--2719775e99e920753d35527a45a6731bac5e8f8f',
|
14 |
+
'AMP_7112ee0414': 'JTdCJTIyZGV2aWNlSWQlMjIlM0ElMjJmY2ZmNjE3Ny00Yzg0LTRlOTItYTFhMC1kM2Y1ZjllOTFkYTglMjIlMkMlMjJ1c2VySWQlMjIlM0ElMjI2N2I1ZDkwNDNkZTIyN2Q0OGIzMWEwZTMlMjIlMkMlMjJzZXNzaW9uSWQlMjIlM0ExNzQ0MjExNjQyMjEwJTJDJTIyb3B0T3V0JTIyJTNBZmFsc2UlMkMlMjJsYXN0RXZlbnRUaW1lJTIyJTNBMTc0NDIxMTc1ODAwOSUyQyUyMmxhc3RFdmVudElkJTIyJTNBMjMyJTJDJTIycGFnZUNvdW50ZXIlMjIlM0E1JTdE',
|
15 |
+
}
|
16 |
+
|
17 |
+
headers = {
|
18 |
+
'accept': 'application/json',
|
19 |
+
'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
|
20 |
+
'authorization': 'Bearer 4d900964e385651ea685af6f6cd5573a17b421f50657f73f903525177915a7e2',
|
21 |
+
'content-type': 'application/json',
|
22 |
+
'priority': 'u=1, i',
|
23 |
+
'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
24 |
+
'sec-ch-ua-mobile': '?0',
|
25 |
+
'sec-ch-ua-platform': '"macOS"',
|
26 |
+
'sec-fetch-dest': 'empty',
|
27 |
+
'sec-fetch-mode': 'cors',
|
28 |
+
'sec-fetch-site': 'same-origin',
|
29 |
+
'x-stainless-arch': 'unknown',
|
30 |
+
'x-stainless-lang': 'js',
|
31 |
+
'x-stainless-os': 'Unknown',
|
32 |
+
'x-stainless-package-version': '0.11.1',
|
33 |
+
'x-stainless-retry-count': '0',
|
34 |
+
'x-stainless-runtime': 'browser:chrome',
|
35 |
+
'x-stainless-runtime-version': '135.0.0',
|
36 |
+
'referer': 'https://api.together.ai/playground/v2/chat/meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8',
|
37 |
+
}
|
38 |
+
|
39 |
+
def __init__(self):
|
40 |
+
self.base_url = "https://api.together.ai/inference"
|
41 |
+
|
42 |
+
def get_model_list(self):
|
43 |
+
models = ['meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8', 'meta-llama/Llama-4-Scout-17B-16E-Instruct', 'deepseek-ai/DeepSeek-R1', 'deepseek-ai/DeepSeek-V3', 'Qwen/Qwen2.5-VL-72B-Instruct', 'google/gemma-2-27b-it']
|
44 |
+
return models
|
45 |
+
|
46 |
+
async def generate(self, json_data: dict):
|
47 |
+
max_retries = 5
|
48 |
+
for attempt in range(max_retries):
|
49 |
+
async with httpx.AsyncClient(timeout=None) as client:
|
50 |
+
try:
|
51 |
+
request_ctx = client.stream(
|
52 |
+
"POST",
|
53 |
+
"https://api.together.ai/inference",
|
54 |
+
cookies=TogetherAPI.cookies,
|
55 |
+
headers=TogetherAPI.headers,
|
56 |
+
json=json_data
|
57 |
+
)
|
58 |
+
|
59 |
+
async with request_ctx as response:
|
60 |
+
if response.status_code == 200:
|
61 |
+
async for line in response.aiter_lines():
|
62 |
+
if line:
|
63 |
+
yield f"{line}\n"
|
64 |
+
return
|
65 |
+
elif response.status_code == 429:
|
66 |
+
if attempt < max_retries - 1:
|
67 |
+
await asyncio.sleep(0.5)
|
68 |
+
continue
|
69 |
+
yield "data: [Rate limited, max retries]\n\n"
|
70 |
+
return
|
71 |
+
else:
|
72 |
+
yield f"data: [Unexpected status code: {response.status_code}]\n\n"
|
73 |
+
return
|
74 |
+
except Exception as e:
|
75 |
+
yield f"data: [Connection error: {str(e)}]\n\n"
|
76 |
+
return
|
77 |
+
|
78 |
+
yield "data: [Max retries reached]\n\n"
|
79 |
+
|
models/vercel/__pycache__/main.cpython-312.pyc
ADDED
Binary file (9.64 kB). View file
|
|
models/vercel/main.py
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import httpx
|
2 |
+
import asyncio
|
3 |
+
import random
|
4 |
+
import json
|
5 |
+
|
6 |
+
class XaiAPI:
|
7 |
+
|
8 |
+
headers = {
|
9 |
+
'accept': '*/*',
|
10 |
+
'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
|
11 |
+
'content-type': 'application/json',
|
12 |
+
'origin': 'https://ai-sdk-starter-xai.vercel.app',
|
13 |
+
'referer': 'https://ai-sdk-starter-xai.vercel.app/',
|
14 |
+
'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
15 |
+
'sec-ch-ua-mobile': '?0',
|
16 |
+
'sec-ch-ua-platform': '"macOS"',
|
17 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36'
|
18 |
+
}
|
19 |
+
|
20 |
+
|
21 |
+
def __init__(self):
|
22 |
+
self.base_url = "https://ai-sdk-starter-xai.vercel.app/api/chat"
|
23 |
+
|
24 |
+
def get_model_list(self):
|
25 |
+
models = ["grok-3-mini", "grok-2-1212", "grok-3", "grok-3-fast", "grok-3-mini-fast"]
|
26 |
+
return models
|
27 |
+
|
28 |
+
def convert(messages):
|
29 |
+
converted = []
|
30 |
+
for message in messages:
|
31 |
+
role = message.get("role", "user")
|
32 |
+
content = message.get("content", "")
|
33 |
+
|
34 |
+
if isinstance(content, list):
|
35 |
+
parts = content
|
36 |
+
text_content = "\n".join([p.get("text", "") for p in content if p.get("type") == "text"])
|
37 |
+
else:
|
38 |
+
text_content = str(content)
|
39 |
+
parts = [{"type": "text", "text": text_content}]
|
40 |
+
if role == "assistant":
|
41 |
+
parts.insert(0, {"type": "step-start"})
|
42 |
+
|
43 |
+
converted.append({
|
44 |
+
"role": role,
|
45 |
+
"content": text_content,
|
46 |
+
"parts": parts
|
47 |
+
})
|
48 |
+
return converted
|
49 |
+
|
50 |
+
async def generate(self, json_data: dict):
|
51 |
+
messages = XaiAPI.convert(json_data["messages"])
|
52 |
+
|
53 |
+
request_data = {
|
54 |
+
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
55 |
+
"messages": messages,
|
56 |
+
"selectedModel": json_data.get("model", "grok-2-1212"),
|
57 |
+
}
|
58 |
+
|
59 |
+
chunk_id = "chipling-xai-" + "".join(random.choices("0123456789abcdef", k=32))
|
60 |
+
created = int(asyncio.get_event_loop().time())
|
61 |
+
total_tokens = 0
|
62 |
+
|
63 |
+
try:
|
64 |
+
async with httpx.AsyncClient(timeout=None) as client:
|
65 |
+
async with client.stream(
|
66 |
+
"POST",
|
67 |
+
"https://ai-sdk-starter-xai.vercel.app/api/chat",
|
68 |
+
headers=XaiAPI.headers,
|
69 |
+
json=request_data
|
70 |
+
) as request_ctx:
|
71 |
+
if request_ctx.status_code == 200:
|
72 |
+
async for line in request_ctx.aiter_lines():
|
73 |
+
if line:
|
74 |
+
if line.startswith('0:'):
|
75 |
+
# Clean up the text and properly escape JSON characters
|
76 |
+
text = line[2:].strip()
|
77 |
+
if text.startswith('"') and text.endswith('"'):
|
78 |
+
text = text[1:-1]
|
79 |
+
text = text.replace('\\n', '\n').replace('\\', '')
|
80 |
+
|
81 |
+
response = {
|
82 |
+
"id": chunk_id,
|
83 |
+
"object": "chat.completion.chunk",
|
84 |
+
"created": created,
|
85 |
+
"model": json_data.get("model", "grok-2-1212"),
|
86 |
+
"choices": [{
|
87 |
+
"index": 0,
|
88 |
+
"text": text,
|
89 |
+
"logprobs": None,
|
90 |
+
"finish_reason": None
|
91 |
+
}],
|
92 |
+
"usage": None
|
93 |
+
}
|
94 |
+
yield f"data: {json.dumps(response)}\n\n"
|
95 |
+
total_tokens += 1
|
96 |
+
elif line.startswith('d:'):
|
97 |
+
final = {
|
98 |
+
"id": chunk_id,
|
99 |
+
"object": "chat.completion.chunk",
|
100 |
+
"created": created,
|
101 |
+
"model": json_data.get("model", "grok-2-1212"),
|
102 |
+
"choices": [],
|
103 |
+
"usage": {
|
104 |
+
"prompt_tokens": len(messages),
|
105 |
+
"completion_tokens": total_tokens,
|
106 |
+
"total_tokens": len(messages) + total_tokens
|
107 |
+
}
|
108 |
+
}
|
109 |
+
yield f"data: {json.dumps(final)}\n\n"
|
110 |
+
yield "data: [DONE]\n\n"
|
111 |
+
return
|
112 |
+
else:
|
113 |
+
yield f"data: [Unexpected status code: {request_ctx.status_code}]\n\n"
|
114 |
+
except Exception as e:
|
115 |
+
yield f"data: [Connection error: {str(e)}]\n\n"
|
116 |
+
|
117 |
+
|
118 |
+
class GroqAPI:
|
119 |
+
|
120 |
+
headers = {
|
121 |
+
'accept': '*/*',
|
122 |
+
'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
|
123 |
+
'content-type': 'application/json',
|
124 |
+
'origin': 'https://ai-sdk-starter-groq.vercel.app',
|
125 |
+
'priority': 'u=1, i',
|
126 |
+
'referer': 'https://ai-sdk-starter-groq.vercel.app/',
|
127 |
+
'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
128 |
+
'sec-ch-ua-mobile': '?0',
|
129 |
+
'sec-ch-ua-platform': '"macOS"',
|
130 |
+
'sec-fetch-dest': 'empty',
|
131 |
+
'sec-fetch-mode': 'cors',
|
132 |
+
'sec-fetch-site': 'same-origin',
|
133 |
+
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
|
134 |
+
}
|
135 |
+
|
136 |
+
def __init__(self):
|
137 |
+
self.base_url = "https://ai-sdk-starter-groq.vercel.app/api/chat"
|
138 |
+
|
139 |
+
def get_model_list(self):
|
140 |
+
models = ['meta-llama/llama-4-scout-17b-16e-instruct', 'llama-3.1-8b-instant', 'llama-3.3-70b-versatile', 'deepseek-r1-distill-llama-70b']
|
141 |
+
return models
|
142 |
+
|
143 |
+
|
144 |
+
async def generate(self, json_data: dict):
|
145 |
+
messages = XaiAPI.convert(json_data["messages"])
|
146 |
+
|
147 |
+
request_data = {
|
148 |
+
"id": "".join(random.choices("0123456789abcdef", k=16)),
|
149 |
+
"messages": messages,
|
150 |
+
"selectedModel": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
151 |
+
}
|
152 |
+
|
153 |
+
chunk_id = "chipling-groq-" + "".join(random.choices("0123456789abcdef", k=32))
|
154 |
+
created = int(asyncio.get_event_loop().time())
|
155 |
+
total_tokens = 0
|
156 |
+
|
157 |
+
try:
|
158 |
+
async with httpx.AsyncClient(timeout=None) as client:
|
159 |
+
async with client.stream(
|
160 |
+
"POST",
|
161 |
+
"https://ai-sdk-starter-groq.vercel.app/api/chat",
|
162 |
+
headers=GroqAPI.headers,
|
163 |
+
json=request_data
|
164 |
+
) as request_ctx:
|
165 |
+
print(request_ctx.status_code)
|
166 |
+
if request_ctx.status_code == 200:
|
167 |
+
async for line in request_ctx.aiter_lines():
|
168 |
+
if line:
|
169 |
+
if line.startswith('0:'):
|
170 |
+
# Clean up the text and properly escape JSON characters
|
171 |
+
text = line[2:].strip()
|
172 |
+
if text.startswith('"') and text.endswith('"'):
|
173 |
+
text = text[1:-1]
|
174 |
+
text = text.replace('\\n', '\n').replace('\\', '')
|
175 |
+
|
176 |
+
response = {
|
177 |
+
"id": chunk_id,
|
178 |
+
"object": "chat.completion.chunk",
|
179 |
+
"created": created,
|
180 |
+
"model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
181 |
+
"choices": [{
|
182 |
+
"index": 0,
|
183 |
+
"text": text,
|
184 |
+
"logprobs": None,
|
185 |
+
"finish_reason": None
|
186 |
+
}],
|
187 |
+
"usage": None
|
188 |
+
}
|
189 |
+
yield f"data: {json.dumps(response)}\n\n"
|
190 |
+
total_tokens += 1
|
191 |
+
elif line.startswith('d:'):
|
192 |
+
final = {
|
193 |
+
"id": chunk_id,
|
194 |
+
"object": "chat.completion.chunk",
|
195 |
+
"created": created,
|
196 |
+
"model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
|
197 |
+
"choices": [],
|
198 |
+
"usage": {
|
199 |
+
"prompt_tokens": len(messages),
|
200 |
+
"completion_tokens": total_tokens,
|
201 |
+
"total_tokens": len(messages) + total_tokens
|
202 |
+
}
|
203 |
+
}
|
204 |
+
yield f"data: {json.dumps(final)}\n\n"
|
205 |
+
yield "data: [DONE]\n\n"
|
206 |
+
return
|
207 |
+
else:
|
208 |
+
yield f"data: [Unexpected status code: {request_ctx.status_code}]\n\n"
|
209 |
+
except Exception as e:
|
210 |
+
yield f"data: [Connection error: {str(e)}]\n\n"
|
requirements.txt
CHANGED
@@ -1,2 +1,9 @@
|
|
1 |
-
fastapi
|
|
|
|
|
|
|
2 |
uvicorn[standard]
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi==0.115.12
|
2 |
+
httpx==0.28.1
|
3 |
+
pydantic==2.11.3
|
4 |
+
Requests==2.32.3
|
5 |
uvicorn[standard]
|
6 |
+
asyncio
|
7 |
+
jinja2
|
8 |
+
aiofiles
|
9 |
+
curl_cffi
|
test.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import json
|
3 |
+
|
4 |
+
messages = [
|
5 |
+
{"role": "user", "content": "helo"},
|
6 |
+
{"role": "assistant", "content": "Hello! How can I assist you today?"},
|
7 |
+
{"role": "user", "content": "who are you and give me a breif description of who created you "}
|
8 |
+
]
|
9 |
+
|
10 |
+
model = "grok-3-mini"
|
11 |
+
|
12 |
+
url = " http://127.0.0.1:8000/api/v1/generate"
|
13 |
+
|
14 |
+
payload = {
|
15 |
+
"messages": messages,
|
16 |
+
"model": model
|
17 |
+
}
|
18 |
+
|
19 |
+
response = requests.post(url, json=payload, stream=True)
|
20 |
+
|
21 |
+
if response.status_code == 200:
|
22 |
+
for line in response.iter_lines():
|
23 |
+
if line:
|
24 |
+
decoded_line = line.decode('utf-8')
|
25 |
+
if decoded_line.startswith('data: [DONE]'):
|
26 |
+
break
|
27 |
+
elif decoded_line.startswith('data: '):
|
28 |
+
try:
|
29 |
+
json_data = json.loads(decoded_line[6:])
|
30 |
+
if json_data["choices"] and "text" in json_data["choices"][0]:
|
31 |
+
print(json_data["choices"][0]["text"], end='')
|
32 |
+
except json.JSONDecodeError:
|
33 |
+
continue
|
34 |
+
else:
|
35 |
+
print(f"Request failed with status code {response.status_code}")
|