Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,22 +12,41 @@ logger = logging.getLogger(__name__)
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
|
|
|
|
|
|
15 |
class GenerateRequest(BaseModel):
|
16 |
prompt: str
|
|
|
17 |
|
18 |
-
async def generate_ai_response(prompt: str, model: str):
|
19 |
-
logger.debug(f"Received prompt: {prompt}, model: {model}")
|
|
|
|
|
20 |
token = os.getenv("GITHUB_TOKEN")
|
21 |
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference")
|
|
|
22 |
|
23 |
if not token:
|
24 |
logger.error("GitHub token not configured")
|
25 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
26 |
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
29 |
|
30 |
try:
|
|
|
31 |
stream = await client.chat.completions.create(
|
32 |
messages=[
|
33 |
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
|
@@ -36,7 +55,8 @@ async def generate_ai_response(prompt: str, model: str):
|
|
36 |
model=model,
|
37 |
temperature=1.0,
|
38 |
top_p=1.0,
|
39 |
-
stream=True
|
|
|
40 |
)
|
41 |
|
42 |
async for chunk in stream:
|
@@ -52,10 +72,22 @@ async def generate_ai_response(prompt: str, model: str):
|
|
52 |
async def generate_response(
|
53 |
model: str = Query("default-model", description="The AI model to use"),
|
54 |
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
|
|
55 |
request: Optional[GenerateRequest] = None
|
56 |
):
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
|
|
|
|
|
59 |
|
60 |
if not final_prompt or not final_prompt.strip():
|
61 |
logger.error("Prompt cannot be empty")
|
@@ -66,7 +98,7 @@ async def generate_response(
|
|
66 |
raise HTTPException(status_code=400, detail="Model cannot be empty")
|
67 |
|
68 |
return StreamingResponse(
|
69 |
-
generate_ai_response(final_prompt, model),
|
70 |
media_type="text/event-stream"
|
71 |
)
|
72 |
|
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
15 |
+
# Define valid models (replace with actual models supported by your AI server)
|
16 |
+
VALID_MODELS = ["default-model", "another-model"] # Update this list
|
17 |
+
|
18 |
class GenerateRequest(BaseModel):
|
19 |
prompt: str
|
20 |
+
publisher: Optional[str] = None # Allow publisher in the body if needed
|
21 |
|
22 |
+
async def generate_ai_response(prompt: str, model: str, publisher: Optional[str]):
|
23 |
+
logger.debug(f"Received prompt: {prompt}, model: {model}, publisher: {publisher}")
|
24 |
+
|
25 |
+
# Configuration for AI endpoint
|
26 |
token = os.getenv("GITHUB_TOKEN")
|
27 |
endpoint = os.getenv("AI_SERVER_URL", "https://models.github.ai/inference")
|
28 |
+
default_publisher = os.getenv("DEFAULT_PUBLISHER", "abdullahalioo") # Fallback publisher
|
29 |
|
30 |
if not token:
|
31 |
logger.error("GitHub token not configured")
|
32 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
33 |
|
34 |
+
# Use provided publisher or fallback to environment variable
|
35 |
+
final_publisher = publisher or default_publisher
|
36 |
+
if not final_publisher:
|
37 |
+
logger.error("Publisher is required")
|
38 |
+
raise HTTPException(status_code=400, detail="Publisher is required")
|
39 |
+
|
40 |
+
# Validate model
|
41 |
+
if model not in VALID_MODELS:
|
42 |
+
logger.error(f"Invalid model: {model}. Valid models: {VALID_MODELS}")
|
43 |
+
raise HTTPException(status_code=400, detail=f"Invalid model. Valid models: {VALID_MODELS}")
|
44 |
+
|
45 |
+
logger.debug(f"Using endpoint: {endpoint}, publisher: {final_publisher}")
|
46 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
47 |
|
48 |
try:
|
49 |
+
# Include publisher in the request payload (modify as needed based on AI server requirements)
|
50 |
stream = await client.chat.completions.create(
|
51 |
messages=[
|
52 |
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
|
|
|
55 |
model=model,
|
56 |
temperature=1.0,
|
57 |
top_p=1.0,
|
58 |
+
stream=True,
|
59 |
+
extra_body={"publisher": final_publisher} # Add publisher to extra_body
|
60 |
)
|
61 |
|
62 |
async for chunk in stream:
|
|
|
72 |
async def generate_response(
|
73 |
model: str = Query("default-model", description="The AI model to use"),
|
74 |
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
75 |
+
publisher: Optional[str] = Query(None, description="Publisher identifier (optional, defaults to DEFAULT_PUBLISHER env var)"),
|
76 |
request: Optional[GenerateRequest] = None
|
77 |
):
|
78 |
+
"""
|
79 |
+
Generate a streaming AI response based on the provided prompt, model, and publisher.
|
80 |
+
|
81 |
+
- **model**: The AI model to use (e.g., default-model)
|
82 |
+
- **prompt**: The input text prompt for the AI (query param or body)
|
83 |
+
- **publisher**: The publisher identifier (optional, defaults to DEFAULT_PUBLISHER env var)
|
84 |
+
"""
|
85 |
+
logger.debug(f"Request received - model: {model}, prompt: {prompt}, publisher: {publisher}, body: {request}")
|
86 |
+
|
87 |
+
# Determine prompt source: query parameter or request body
|
88 |
final_prompt = prompt if prompt is not None else (request.prompt if request is not None else None)
|
89 |
+
# Determine publisher source: query parameter or request body
|
90 |
+
final_publisher = publisher if publisher is not None else (request.publisher if request is not None else None)
|
91 |
|
92 |
if not final_prompt or not final_prompt.strip():
|
93 |
logger.error("Prompt cannot be empty")
|
|
|
98 |
raise HTTPException(status_code=400, detail="Model cannot be empty")
|
99 |
|
100 |
return StreamingResponse(
|
101 |
+
generate_ai_response(final_prompt, model, final_publisher),
|
102 |
media_type="text/event-stream"
|
103 |
)
|
104 |
|