File size: 2,120 Bytes
e700505 f866f5e e700505 f866f5e e700505 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
from fastapi import FastAPI, Request
from fastapi.responses import StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from models.together.main import TogetherAPI
from models.vercel.main import XaiAPI, GroqAPI
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allows all origins
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
@app.get("/")
async def root():
return {"message": "Server Running Successfully"}
@app.post("/api/v1/generate")
async def generate(request: Request):
data = await request.json()
messages = data['messages']
model = data['model']
if not messages or not model:
return {"error": "Invalid request. 'messages' and 'model' are required."}
try:
query = {
'model': model,
'max_tokens': None,
'temperature': 0.7,
'top_p': 0.7,
'top_k': 50,
'repetition_penalty': 1,
'stream_tokens': True,
'stop': ['<|eot_id|>', '<|eom_id|>'],
'messages': messages,
'stream': True,
}
together_models = TogetherAPI().get_model_list()
xai_models = XaiAPI().get_model_list()
groq_models = GroqAPI().get_model_list()
if model in together_models:
streamModel = TogetherAPI()
elif model in xai_models:
streamModel = XaiAPI()
elif model in groq_models:
streamModel = GroqAPI()
else:
return {"error": f"Model '{model}' is not supported."}
response = streamModel.generate(query)
return StreamingResponse(response, media_type="text/event-stream")
except Exception as e:
return {"error": f"An error occurred: {str(e)}"}
@app.get("/api/v1/models")
async def get_models():
try:
streamModel = TogetherAPI()
models = streamModel.get_model_list()
return {"models": models}
except Exception as e:
return {"error": f"An error occurred: {str(e)}"} |