abdullahalioo commited on
Commit
6025f1c
·
verified ·
1 Parent(s): 93c4b1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -2,22 +2,29 @@ import os
2
  from fastapi import FastAPI, HTTPException
3
  from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
5
- import asyncio
6
 
7
  app = FastAPI()
8
 
9
- # Initialize OpenAI client
10
- client = AsyncOpenAI(api_key=os.getenv("GITHUB_TOKEN"))
11
-
12
  async def generate_ai_response(prompt: str):
 
 
 
 
 
 
 
 
 
 
13
  try:
14
  stream = await client.chat.completions.create(
15
- model="gpt-3.5-turbo", # Using 3.5-turbo for better compatibility
16
  messages=[
17
  {"role": "system", "content": "You are a helpful assistant."},
18
  {"role": "user", "content": prompt}
19
  ],
20
- temperature=0.7,
 
 
21
  stream=True
22
  )
23
 
@@ -39,6 +46,5 @@ async def generate_response(prompt: str):
39
  media_type="text/event-stream"
40
  )
41
 
42
- # For Hugging Face Spaces
43
  def get_app():
44
  return app
 
2
  from fastapi import FastAPI, HTTPException
3
  from fastapi.responses import StreamingResponse
4
  from openai import AsyncOpenAI
 
5
 
6
  app = FastAPI()
7
 
 
 
 
8
  async def generate_ai_response(prompt: str):
9
+ # Configuration for unofficial GitHub AI endpoint
10
+ token = os.getenv("GITHUB_TOKEN")
11
+ if not token:
12
+ raise HTTPException(status_code=500, detail="GitHub token not configured")
13
+
14
+ endpoint = "https://models.github.ai/inference"
15
+ model = "openai/gpt-4.1-mini" # Unofficial model name
16
+
17
+ client = AsyncOpenAI(base_url=endpoint, api_key=token)
18
+
19
  try:
20
  stream = await client.chat.completions.create(
 
21
  messages=[
22
  {"role": "system", "content": "You are a helpful assistant."},
23
  {"role": "user", "content": prompt}
24
  ],
25
+ model=model,
26
+ temperature=1.0,
27
+ top_p=1.0,
28
  stream=True
29
  )
30
 
 
46
  media_type="text/event-stream"
47
  )
48
 
 
49
  def get_app():
50
  return app