Maouu commited on
Commit
184feca
·
1 Parent(s): 691b1e3

Added a new ai model provider for grok

Browse files
Files changed (4) hide show
  1. .DS_Store +0 -0
  2. __pycache__/app.cpython-312.pyc +0 -0
  3. app.py +102 -1
  4. logs.json +17 -1
.DS_Store CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
 
__pycache__/app.cpython-312.pyc CHANGED
Binary files a/__pycache__/app.cpython-312.pyc and b/__pycache__/app.cpython-312.pyc differ
 
app.py CHANGED
@@ -127,6 +127,31 @@ def convert_to_groq_schema(messages: List[Dict[str, Any]]) -> List[Dict[str, str
127
  return converted
128
 
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  async def groqgenerate(json_data: Dict[str, Any]):
131
  try:
132
  messages = convert_to_groq_schema(json_data["messages"])
@@ -185,6 +210,82 @@ async def groqgenerate(json_data: Dict[str, Any]):
185
  generate(json_data)
186
 
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  @app.get("/")
189
  async def index():
190
  return {"status": "ok", "message": "Welcome to the Chipling API!", "version": "1.0", "routes": ["/chat", "/generate-modules", "/generate-topics"]}
@@ -218,7 +319,7 @@ async def chat(request: ChatRequest):
218
  'stream': True,
219
  }
220
 
221
- selected_generator = random.choice([groqgenerate, generate])
222
  log_request("/chat", selected_generator.__name__)
223
  return StreamingResponse(selected_generator(json_data), media_type='text/event-stream')
224
 
 
127
  return converted
128
 
129
 
130
+ def conver_to_xai_schema(messages: List[Dict[str, Any]]) -> List[Dict[str, str]]:
131
+ converted = []
132
+ for message in messages:
133
+ role = message.get("role", "user")
134
+ content = message.get("content", "")
135
+
136
+ if isinstance(content, list):
137
+ # Handle content that's already in parts format
138
+ parts = content
139
+ text_content = "\n".join([p.get("text", "") for p in content if p.get("type") == "text"])
140
+ else:
141
+ # Create parts format for text content
142
+ text_content = str(content)
143
+ parts = [{"type": "text", "text": text_content}]
144
+ if role == "assistant":
145
+ parts.insert(0, {"type": "step-start"})
146
+
147
+ converted.append({
148
+ "role": role,
149
+ "content": text_content,
150
+ "parts": parts
151
+ })
152
+ return converted
153
+
154
+
155
  async def groqgenerate(json_data: Dict[str, Any]):
156
  try:
157
  messages = convert_to_groq_schema(json_data["messages"])
 
210
  generate(json_data)
211
 
212
 
213
+ async def vercelXaigenerate(json_data: Dict[str, Any]):
214
+ headers = {
215
+ 'accept': '*/*',
216
+ 'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
217
+ 'content-type': 'application/json',
218
+ 'origin': 'https://ai-sdk-starter-xai.vercel.app',
219
+ 'referer': 'https://ai-sdk-starter-xai.vercel.app/',
220
+ 'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
221
+ 'sec-ch-ua-mobile': '?0',
222
+ 'sec-ch-ua-platform': '"macOS"',
223
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36'
224
+ }
225
+
226
+ # Convert messages to XAI format
227
+ messages = conver_to_xai_schema(json_data["messages"])
228
+
229
+ request_data = {
230
+ "id": "".join(random.choices("0123456789abcdef", k=16)),
231
+ "messages": messages,
232
+ "selectedModel": "grok-2-1212"
233
+ }
234
+
235
+ chunk_id = "xai-" + "".join(random.choices("0123456789abcdef", k=32))
236
+ created = int(asyncio.get_event_loop().time())
237
+ total_tokens = 0
238
+
239
+ try:
240
+ async with httpx.AsyncClient(timeout=None) as client:
241
+ async with client.stream(
242
+ "POST",
243
+ "https://ai-sdk-starter-xai.vercel.app/api/chat",
244
+ headers=headers,
245
+ json=request_data
246
+ ) as request_ctx:
247
+ if request_ctx.status_code == 200:
248
+ async for line in request_ctx.aiter_lines():
249
+ if line:
250
+ if line.startswith('0:'):
251
+ text = line[2:]
252
+ response = {
253
+ "id": chunk_id,
254
+ "object": "chat.completion.chunk",
255
+ "created": created,
256
+ "model": json_data.get("model", "grok-2-1212"),
257
+ "choices": [{
258
+ "index": 0,
259
+ "text": text,
260
+ "logprobs": None,
261
+ "finish_reason": None
262
+ }],
263
+ "usage": None
264
+ }
265
+ yield f"data: {json.dumps(response)}\n\n"
266
+ total_tokens += 1
267
+ elif line.startswith('d:'):
268
+ final = {
269
+ "id": chunk_id,
270
+ "object": "chat.completion.chunk",
271
+ "created": created,
272
+ "model": json_data.get("model", "grok-2-1212"),
273
+ "choices": [],
274
+ "usage": {
275
+ "prompt_tokens": len(messages),
276
+ "completion_tokens": total_tokens,
277
+ "total_tokens": len(messages) + total_tokens
278
+ }
279
+ }
280
+ yield f"data: {json.dumps(final)}\n\n"
281
+ yield "data: [DONE]\n\n"
282
+ return
283
+ else:
284
+ yield f"data: [Unexpected status code: {request_ctx.status_code}]\n\n"
285
+ except Exception as e:
286
+ yield f"data: [Connection error: {str(e)}]\n\n"
287
+
288
+
289
  @app.get("/")
290
  async def index():
291
  return {"status": "ok", "message": "Welcome to the Chipling API!", "version": "1.0", "routes": ["/chat", "/generate-modules", "/generate-topics"]}
 
319
  'stream': True,
320
  }
321
 
322
+ selected_generator = random.choice([vercelXaigenerate, generate, groqgenerate])
323
  log_request("/chat", selected_generator.__name__)
324
  return StreamingResponse(selected_generator(json_data), media_type='text/event-stream')
325
 
logs.json CHANGED
@@ -1 +1,17 @@
1
- []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "timestamp": "2025-04-12T11:48:38.183002",
4
+ "endpoint": "/chat",
5
+ "query": "vercelXaigenerate"
6
+ },
7
+ {
8
+ "timestamp": "2025-04-12T11:49:55.976382",
9
+ "endpoint": "/chat",
10
+ "query": "vercelXaigenerate"
11
+ },
12
+ {
13
+ "timestamp": "2025-04-12T11:51:54.228166",
14
+ "endpoint": "/chat",
15
+ "query": "vercelXaigenerate"
16
+ }
17
+ ]