abdullahalioo commited on
Commit
93c4b1f
·
verified ·
1 Parent(s): 471a43e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -22
app.py CHANGED
@@ -2,30 +2,22 @@ import os
2
  from fastapi import FastAPI, HTTPException
3
  from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
 
5
 
6
  app = FastAPI()
7
 
8
- async def generate_ai_response(prompt: str):
9
- # Configuration for unofficial GitHub AI endpoint
10
- global token
11
- token = os.getenv("GITHUB_TOKEN")
12
- if not token:
13
- raise HTTPException(status_code=500, detail="GitHub token not configured")
14
-
15
- endpoint = "https://models.github.ai/inference"
16
- model = "openai/gpt-4.1-mini" # Unofficial model name
17
-
18
- client = AsyncOpenAI(base_url=endpoint, api_key=token)
19
 
 
20
  try:
21
  stream = await client.chat.completions.create(
 
22
  messages=[
23
  {"role": "system", "content": "You are a helpful assistant."},
24
  {"role": "user", "content": prompt}
25
  ],
26
- model=model,
27
- temperature=1.0,
28
- top_p=1.0,
29
  stream=True
30
  )
31
 
@@ -43,14 +35,10 @@ async def generate_response(prompt: str):
43
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
44
 
45
  return StreamingResponse(
46
- response_with_token(),
47
- media_type="text/event-stream",
48
  )
49
 
 
50
  def get_app():
51
- return app
52
-
53
- @app.get("/gettoken")
54
- async def get_token():
55
- global token
56
- return {"token": token}
 
2
  from fastapi import FastAPI, HTTPException
3
  from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
5
+ import asyncio
6
 
7
  app = FastAPI()
8
 
9
+ # Initialize OpenAI client
10
+ client = AsyncOpenAI(api_key=os.getenv("GITHUB_TOKEN"))
 
 
 
 
 
 
 
 
 
11
 
12
+ async def generate_ai_response(prompt: str):
13
  try:
14
  stream = await client.chat.completions.create(
15
+ model="gpt-3.5-turbo", # Using 3.5-turbo for better compatibility
16
  messages=[
17
  {"role": "system", "content": "You are a helpful assistant."},
18
  {"role": "user", "content": prompt}
19
  ],
20
+ temperature=0.7,
 
 
21
  stream=True
22
  )
23
 
 
35
  raise HTTPException(status_code=400, detail="Prompt cannot be empty")
36
 
37
  return StreamingResponse(
38
+ generate_ai_response(prompt),
39
+ media_type="text/event-stream"
40
  )
41
 
42
+ # For Hugging Face Spaces
43
  def get_app():
44
+ return app