|
from fastapi import FastAPI, Request |
|
from fastapi.responses import StreamingResponse |
|
from fastapi.middleware.cors import CORSMiddleware |
|
from models.together.main import TogetherAPI |
|
from models.vercel.main import XaiAPI, GroqAPI |
|
|
|
|
|
app = FastAPI() |
|
|
|
app.add_middleware( |
|
CORSMiddleware, |
|
allow_origins=["*"], |
|
allow_credentials=True, |
|
allow_methods=["*"], |
|
allow_headers=["*"], |
|
) |
|
|
|
@app.get("/") |
|
async def root(): |
|
return {"message": "Server Running Successfully"} |
|
|
|
@app.post("/api/v1/generate") |
|
async def generate(request: Request): |
|
data = await request.json() |
|
messages = data['messages'] |
|
model = data['model'] |
|
|
|
if not messages or not model: |
|
return {"error": "Invalid request. 'messages' and 'model' are required."} |
|
|
|
try: |
|
query = { |
|
'model': model, |
|
'max_tokens': None, |
|
'temperature': 0.7, |
|
'top_p': 0.7, |
|
'top_k': 50, |
|
'repetition_penalty': 1, |
|
'stream_tokens': True, |
|
'stop': ['<|eot_id|>', '<|eom_id|>'], |
|
'messages': messages, |
|
'stream': True, |
|
} |
|
|
|
together_models = TogetherAPI().get_model_list() |
|
xai_models = XaiAPI().get_model_list() |
|
groq_models = GroqAPI().get_model_list() |
|
|
|
if model in together_models: |
|
streamModel = TogetherAPI() |
|
elif model in xai_models: |
|
streamModel = XaiAPI() |
|
elif model in groq_models: |
|
streamModel = GroqAPI() |
|
else: |
|
return {"error": f"Model '{model}' is not supported."} |
|
|
|
response = streamModel.generate(query) |
|
|
|
return StreamingResponse(response, media_type="text/event-stream") |
|
|
|
except Exception as e: |
|
return {"error": f"An error occurred: {str(e)}"} |
|
|
|
@app.get("/api/v1/models") |
|
async def get_models(): |
|
try: |
|
streamModel = TogetherAPI() |
|
models = streamModel.get_model_list() |
|
return {"models": models} |
|
except Exception as e: |
|
return {"error": f"An error occurred: {str(e)}"} |