Maouu commited on
Commit
3f69a5f
·
1 Parent(s): 199518c

added routes

Browse files
Files changed (1) hide show
  1. app.py +93 -8
app.py CHANGED
@@ -163,7 +163,7 @@ async def groqgenerate(json_data: Dict[str, Any]):
163
  # Create streaming response
164
  stream = client.chat.completions.create(
165
  messages=messages,
166
- model="meta-llama/llama-4-scout-17b-16e-instruct",
167
  temperature=json_data.get("temperature", 0.7),
168
  max_completion_tokens=json_data.get("max_tokens", 1024),
169
  top_p=json_data.get("top_p", 1),
@@ -181,7 +181,7 @@ async def groqgenerate(json_data: Dict[str, Any]):
181
  "id": chunk_id,
182
  "object": "chat.completion.chunk",
183
  "created": created,
184
- "model": json_data.get("model", "llama-3.3-70b-versatile"),
185
  "choices": [{
186
  "index": 0,
187
  "text": content,
@@ -197,7 +197,7 @@ async def groqgenerate(json_data: Dict[str, Any]):
197
  "id": chunk_id,
198
  "object": "chat.completion.chunk",
199
  "created": created,
200
- "model": json_data.get("model", "llama-3.3-70b-versatile"),
201
  "choices": [],
202
  "usage": {
203
  "prompt_tokens": len(messages),
@@ -230,9 +230,11 @@ async def vercelXaigenerate(json_data: Dict[str, Any]):
230
  request_data = {
231
  "id": "".join(random.choices("0123456789abcdef", k=16)),
232
  "messages": messages,
233
- "selectedModel": "grok-2-1212"
234
  }
235
 
 
 
236
  chunk_id = "xai-" + "".join(random.choices("0123456789abcdef", k=32))
237
  created = int(asyncio.get_event_loop().time())
238
  total_tokens = 0
@@ -314,7 +316,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
314
  request_data = {
315
  "id": "".join(random.choices("0123456789abcdef", k=16)),
316
  "messages": messages,
317
- "selectedModel": "deepseek-r1-distill-llama-70b"
318
  }
319
 
320
  chunk_id = "vercel-groq-" + "".join(random.choices("0123456789abcdef", k=32))
@@ -344,7 +346,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
344
  "id": chunk_id,
345
  "object": "chat.completion.chunk",
346
  "created": created,
347
- "model": json_data.get("model", "grok-2-1212"),
348
  "choices": [{
349
  "index": 0,
350
  "text": text,
@@ -360,7 +362,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
360
  "id": chunk_id,
361
  "object": "chat.completion.chunk",
362
  "created": created,
363
- "model": json_data.get("model", "llama-8b"),
364
  "choices": [],
365
  "usage": {
366
  "prompt_tokens": len(messages),
@@ -379,7 +381,7 @@ async def vercelGroqgenerate(json_data: Dict[str, Any]):
379
 
380
  @app.get("/")
381
  async def index():
382
- return {"status": "ok", "message": "Welcome to the Chipling API!", "version": "1.5", "routes": ["/chat", "/generate-modules", "/generate-topics"]}
383
 
384
  @app.post("/chat")
385
  async def chat(request: ChatRequest):
@@ -437,3 +439,86 @@ async def scrape_md(request: Request):
437
  data = scrape_to_markdown(url)
438
 
439
  return {"markdown": data}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  # Create streaming response
164
  stream = client.chat.completions.create(
165
  messages=messages,
166
+ model=json_data.get("model", "meta-llama/llama-4-scout-17b-16e-instruct"),
167
  temperature=json_data.get("temperature", 0.7),
168
  max_completion_tokens=json_data.get("max_tokens", 1024),
169
  top_p=json_data.get("top_p", 1),
 
181
  "id": chunk_id,
182
  "object": "chat.completion.chunk",
183
  "created": created,
184
+ "model": json_data.get("model", "meta-llama/llama-4-scout-17b-16e-instruct"),
185
  "choices": [{
186
  "index": 0,
187
  "text": content,
 
197
  "id": chunk_id,
198
  "object": "chat.completion.chunk",
199
  "created": created,
200
+ "model": json_data.get("model", "meta-llama/llama-4-scout-17b-16e-instruct"),
201
  "choices": [],
202
  "usage": {
203
  "prompt_tokens": len(messages),
 
230
  request_data = {
231
  "id": "".join(random.choices("0123456789abcdef", k=16)),
232
  "messages": messages,
233
+ "selectedModel": json_data.get("model", "grok-2-1212"),
234
  }
235
 
236
+ print(request_data)
237
+
238
  chunk_id = "xai-" + "".join(random.choices("0123456789abcdef", k=32))
239
  created = int(asyncio.get_event_loop().time())
240
  total_tokens = 0
 
316
  request_data = {
317
  "id": "".join(random.choices("0123456789abcdef", k=16)),
318
  "messages": messages,
319
+ "selectedModel": json_data.get("model", "deepseek-r1-distill-llama-70b"),
320
  }
321
 
322
  chunk_id = "vercel-groq-" + "".join(random.choices("0123456789abcdef", k=32))
 
346
  "id": chunk_id,
347
  "object": "chat.completion.chunk",
348
  "created": created,
349
+ "model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
350
  "choices": [{
351
  "index": 0,
352
  "text": text,
 
362
  "id": chunk_id,
363
  "object": "chat.completion.chunk",
364
  "created": created,
365
+ "model": json_data.get("model", "deepseek-r1-distill-llama-70b"),
366
  "choices": [],
367
  "usage": {
368
  "prompt_tokens": len(messages),
 
381
 
382
  @app.get("/")
383
  async def index():
384
+ return {"status": "ok", "message": "Welcome to the Chipling API!", "version": "2.0", "routes": ["/chat", "/generate-modules", "/generate-topics", "/v1/generate", "/v1/generate-images", "/chipsearch", "/scrape-md"]}
385
 
386
  @app.post("/chat")
387
  async def chat(request: ChatRequest):
 
439
  data = scrape_to_markdown(url)
440
 
441
  return {"markdown": data}
442
+
443
+ @app.post("/v1/generate")
444
+ async def api_generate(request: Request):
445
+ data = await request.json()
446
+ messages = data["messages"]
447
+ model = data["model"]
448
+ if not messages:
449
+ return {"error": "messages is required"}
450
+ elif not model:
451
+ return {"error": "Model is required"}
452
+
453
+ try:
454
+ json_data = {
455
+ 'model': model,
456
+ 'max_tokens': None,
457
+ 'temperature': 0.7,
458
+ 'top_p': 0.7,
459
+ 'top_k': 50,
460
+ 'repetition_penalty': 1,
461
+ 'stream_tokens': True,
462
+ 'stop': ['<|eot_id|>', '<|eom_id|>'],
463
+ 'messages': messages,
464
+ 'stream': True,
465
+ }
466
+
467
+ xaimodels = ["grok-3-mini", "grok-2-1212", "grok-3", "grok-3-fast", "grok-3-mini-fast"]
468
+
469
+ if model in xaimodels:
470
+ return StreamingResponse(vercelXaigenerate(json_data), media_type='text/event-stream')
471
+ else:
472
+ try:
473
+ return StreamingResponse(vercelGroqgenerate(json_data), media_type='text/event-stream')
474
+ except Exception as e:
475
+ try:
476
+ return StreamingResponse(generate(json_data), media_type='text/event-stream')
477
+ except Exception as e:
478
+ return StreamingResponse(groqgenerate(json_data), media_type='text/event-stream')
479
+ except Exception as e:
480
+ return {"error": str(e)}
481
+
482
+ @app.post("/v1/generate-images")
483
+ async def generate_images(request: Request):
484
+ data = await request.json()
485
+ prompt = data.get("prompt")
486
+ provider = data.get("provider")
487
+ modelId = data.get("modelId")
488
+
489
+ if not prompt:
490
+ return {"error": "Prompt is required"}
491
+ if not provider:
492
+ return {"error": "Provider is required"}
493
+ if not modelId:
494
+ return {"error": "Model ID is required"}
495
+
496
+ headers = {
497
+ 'accept': '*/*',
498
+ 'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
499
+ 'content-type': 'application/json',
500
+ 'origin': 'https://fal-image-generator.vercel.app',
501
+ 'priority': 'u=1, i',
502
+ 'referer': 'https://fal-image-generator.vercel.app/',
503
+ 'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
504
+ 'sec-ch-ua-mobile': '?0',
505
+ 'sec-ch-ua-platform': '"macOS"',
506
+ 'sec-fetch-dest': 'empty',
507
+ 'sec-fetch-mode': 'cors',
508
+ 'sec-fetch-site': 'same-origin',
509
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
510
+ }
511
+
512
+ json_data = {
513
+ 'prompt': prompt,
514
+ 'provider': 'fal',
515
+ 'modelId': 'fal-ai/fast-sdxl',
516
+ }
517
+
518
+ async with httpx.AsyncClient() as client:
519
+ response = await client.post(
520
+ 'https://fal-image-generator.vercel.app/api/generate-images',
521
+ headers=headers,
522
+ json=json_data
523
+ )
524
+ return response.json()