abdullahalioo commited on
Commit
fa004ce
·
verified ·
1 Parent(s): 523f3b3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -6,17 +6,16 @@ from openai import AsyncOpenAI
6
 
7
  app = FastAPI()
8
 
9
- # Define a request model for the prompt and optional model name
10
  class GenerateRequest(BaseModel):
11
  prompt: str
12
- model: str
13
 
14
  async def generate_ai_response(prompt: str, model: str):
15
- # Configuration for unofficial GitHub AI endpoint
16
  token = os.getenv("GITHUB_TOKEN")
17
  if not token:
18
  raise HTTPException(status_code=500, detail="GitHub token not configured")
19
-
 
20
  endpoint = "https://models.github.ai/inference"
21
 
22
  client = AsyncOpenAI(base_url=endpoint, api_key=token)
@@ -27,7 +26,7 @@ async def generate_ai_response(prompt: str, model: str):
27
  {"role": "system", "content": "You are a helpful assistant."},
28
  {"role": "user", "content": prompt}
29
  ],
30
- model=model,
31
  temperature=1.0,
32
  top_p=1.0,
33
  stream=True
@@ -45,11 +44,11 @@ async def generate_ai_response(prompt: str, model: str):
45
  async def generate_response(request: GenerateRequest):
46
  if not request.prompt:
47
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
48
-
49
  return StreamingResponse(
50
  generate_ai_response(request.prompt, request.model),
51
  media_type="text/event-stream"
52
  )
53
 
54
  def get_app():
55
- return app
 
6
 
7
  app = FastAPI()
8
 
 
9
  class GenerateRequest(BaseModel):
10
  prompt: str
11
+ model: str # e.g., "deepseek/DeepSeek-V3-0324"
12
 
13
  async def generate_ai_response(prompt: str, model: str):
 
14
  token = os.getenv("GITHUB_TOKEN")
15
  if not token:
16
  raise HTTPException(status_code=500, detail="GitHub token not configured")
17
+
18
+ # You can also make this endpoint dynamic if needed
19
  endpoint = "https://models.github.ai/inference"
20
 
21
  client = AsyncOpenAI(base_url=endpoint, api_key=token)
 
26
  {"role": "system", "content": "You are a helpful assistant."},
27
  {"role": "user", "content": prompt}
28
  ],
29
+ model=model, # dynamically set model from user input
30
  temperature=1.0,
31
  top_p=1.0,
32
  stream=True
 
44
  async def generate_response(request: GenerateRequest):
45
  if not request.prompt:
46
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
47
+
48
  return StreamingResponse(
49
  generate_ai_response(request.prompt, request.model),
50
  media_type="text/event-stream"
51
  )
52
 
53
  def get_app():
54
+ return app