Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -16,13 +16,6 @@ app = FastAPI(
|
|
16 |
version="1.0.0"
|
17 |
)
|
18 |
|
19 |
-
# Define valid models (replace with actual models supported by https://models.github.ai/inference)
|
20 |
-
VALID_MODELS = [
|
21 |
-
"DeepSeek-V3-0324", # Try without deepseek/ prefix
|
22 |
-
"gpt-3.5-turbo", # Placeholder
|
23 |
-
"llama-3", # Placeholder
|
24 |
-
"mistral-7b" # Placeholder
|
25 |
-
]
|
26 |
|
27 |
class GenerateRequest(BaseModel):
|
28 |
prompt: str
|
@@ -40,18 +33,7 @@ async def generate_ai_response(prompt: str, model: str, publisher: Optional[str]
|
|
40 |
logger.error("GitHub token not configured")
|
41 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
42 |
|
43 |
-
|
44 |
-
final_publisher = publisher or default_publisher
|
45 |
-
if not final_publisher:
|
46 |
-
logger.error("Publisher is required")
|
47 |
-
raise HTTPException(status_code=400, detail="Publisher is required")
|
48 |
-
|
49 |
-
# Validate model
|
50 |
-
if model not in VALID_MODELS:
|
51 |
-
logger.error(f"Invalid model: {model}. Valid models: {VALID_MODELS}")
|
52 |
-
raise HTTPException(status_code=400, detail=f"Invalid model. Valid models: {VALID_MODELS}")
|
53 |
-
|
54 |
-
logger.debug(f"Using endpoint: {endpoint}, publisher: {final_publisher}")
|
55 |
client = AsyncOpenAI(
|
56 |
base_url=endpoint,
|
57 |
api_key=token,
|
|
|
16 |
version="1.0.0"
|
17 |
)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
class GenerateRequest(BaseModel):
|
21 |
prompt: str
|
|
|
33 |
logger.error("GitHub token not configured")
|
34 |
raise HTTPException(status_code=500, detail="GitHub token not configured")
|
35 |
|
36 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
client = AsyncOpenAI(
|
38 |
base_url=endpoint,
|
39 |
api_key=token,
|