Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,10 +10,19 @@ from typing import Optional
|
|
10 |
logging.basicConfig(level=logging.DEBUG)
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
13 |
-
app = FastAPI(
|
|
|
|
|
|
|
|
|
14 |
|
15 |
-
# Define valid models (replace with actual models supported by
|
16 |
-
VALID_MODELS = [
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
class GenerateRequest(BaseModel):
|
19 |
prompt: str
|
@@ -46,7 +55,7 @@ async def generate_ai_response(prompt: str, model: str, publisher: Optional[str]
|
|
46 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
47 |
|
48 |
try:
|
49 |
-
# Include publisher in the request payload
|
50 |
stream = await client.chat.completions.create(
|
51 |
messages=[
|
52 |
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
|
@@ -70,7 +79,7 @@ async def generate_ai_response(prompt: str, model: str, publisher: Optional[str]
|
|
70 |
|
71 |
@app.post("/generate", summary="Generate AI response", response_description="Streaming AI response")
|
72 |
async def generate_response(
|
73 |
-
model: str = Query("
|
74 |
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
75 |
publisher: Optional[str] = Query(None, description="Publisher identifier (optional, defaults to DEFAULT_PUBLISHER env var)"),
|
76 |
request: Optional[GenerateRequest] = None
|
@@ -78,7 +87,7 @@ async def generate_response(
|
|
78 |
"""
|
79 |
Generate a streaming AI response based on the provided prompt, model, and publisher.
|
80 |
|
81 |
-
- **model**: The AI model to use (e.g.,
|
82 |
- **prompt**: The input text prompt for the AI (query param or body)
|
83 |
- **publisher**: The publisher identifier (optional, defaults to DEFAULT_PUBLISHER env var)
|
84 |
"""
|
@@ -102,5 +111,12 @@ async def generate_response(
|
|
102 |
media_type="text/event-stream"
|
103 |
)
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
def get_app():
|
106 |
return app
|
|
|
10 |
logging.basicConfig(level=logging.DEBUG)
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
13 |
+
app = FastAPI(
|
14 |
+
title="Orion AI API",
|
15 |
+
description="API for streaming AI responses with model selection and publisher via URL",
|
16 |
+
version="1.0.0"
|
17 |
+
)
|
18 |
|
19 |
+
# Define valid models (replace with actual models supported by https://models.github.ai/inference)
|
20 |
+
VALID_MODELS = [
|
21 |
+
"deepseek/DeepSeek-V3-0324", # Added based on your request
|
22 |
+
"gpt-3.5-turbo", # Common model (placeholder)
|
23 |
+
"llama-3", # Common model (placeholder)
|
24 |
+
"mistral-7b" # Common model (placeholder)
|
25 |
+
]
|
26 |
|
27 |
class GenerateRequest(BaseModel):
|
28 |
prompt: str
|
|
|
55 |
client = AsyncOpenAI(base_url=endpoint, api_key=token)
|
56 |
|
57 |
try:
|
58 |
+
# Include publisher in the request payload
|
59 |
stream = await client.chat.completions.create(
|
60 |
messages=[
|
61 |
{"role": "system", "content": "You are a helpful assistant named Orion, created by Abdullah Ali"},
|
|
|
79 |
|
80 |
@app.post("/generate", summary="Generate AI response", response_description="Streaming AI response")
|
81 |
async def generate_response(
|
82 |
+
model: str = Query("deepseek/DeepSeek-V3-0324", description="The AI model to use"),
|
83 |
prompt: Optional[str] = Query(None, description="The input text prompt for the AI"),
|
84 |
publisher: Optional[str] = Query(None, description="Publisher identifier (optional, defaults to DEFAULT_PUBLISHER env var)"),
|
85 |
request: Optional[GenerateRequest] = None
|
|
|
87 |
"""
|
88 |
Generate a streaming AI response based on the provided prompt, model, and publisher.
|
89 |
|
90 |
+
- **model**: The AI model to use (e.g., deepseek/DeepSeek-V3-0324)
|
91 |
- **prompt**: The input text prompt for the AI (query param or body)
|
92 |
- **publisher**: The publisher identifier (optional, defaults to DEFAULT_PUBLISHER env var)
|
93 |
"""
|
|
|
111 |
media_type="text/event-stream"
|
112 |
)
|
113 |
|
114 |
+
@app.get("/models", summary="List available models")
|
115 |
+
async def list_models():
|
116 |
+
"""
|
117 |
+
List all available models supported by the AI server.
|
118 |
+
"""
|
119 |
+
return {"models": VALID_MODELS}
|
120 |
+
|
121 |
def get_app():
|
122 |
return app
|