Auronsays commited on
Commit
ce727ee
·
verified ·
1 Parent(s): 95e4375

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -2
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import json
2
  import os
3
  import fastapi
4
- from fastapi.responses import StreamingResponse
5
  from fastapi.middleware.cors import CORSMiddleware
6
  from openai import AsyncOpenAI
7
  import uvicorn
@@ -45,11 +45,36 @@ class ChatCompletionRequest(BaseModel):
45
  model: str
46
  temperature: Optional[float] = 0.7
47
  max_tokens: Optional[int] = None
48
- stream: Optional[bool] = True # Default to True for ElevenLabs
49
  user_id: Optional[str] = None
50
  extra_headers: Optional[Dict[str, str]] = None
51
  extra_body: Optional[Dict[str, Any]] = None
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  @app.post("/v1/chat/completions")
54
  async def create_chat_completion(request: ChatCompletionRequest) -> StreamingResponse:
55
  try:
@@ -71,6 +96,8 @@ async def create_chat_completion(request: ChatCompletionRequest) -> StreamingRes
71
  if not oai_request["model"].startswith("openai/") and "/" not in oai_request["model"]:
72
  oai_request["model"] = "nousresearch/hermes-3-llama-3.1-405b"
73
 
 
 
74
  # Create the chat completion
75
  chat_completion_coroutine = await oai_client.chat.completions.create(**oai_request)
76
 
@@ -122,5 +149,8 @@ async def list_models():
122
  ]
123
  }
124
 
 
 
 
125
  if __name__ == "__main__":
126
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
1
  import json
2
  import os
3
  import fastapi
4
+ from fastapi.responses import StreamingResponse, HTMLResponse
5
  from fastapi.middleware.cors import CORSMiddleware
6
  from openai import AsyncOpenAI
7
  import uvicorn
 
45
  model: str
46
  temperature: Optional[float] = 0.7
47
  max_tokens: Optional[int] = None
48
+ stream: Optional[bool] = True
49
  user_id: Optional[str] = None
50
  extra_headers: Optional[Dict[str, str]] = None
51
  extra_body: Optional[Dict[str, Any]] = None
52
 
53
+ # Root path handler
54
+ @app.get("/", response_class=HTMLResponse)
55
+ async def root():
56
+ return """
57
+ <html>
58
+ <head>
59
+ <title>OpenRouter Proxy Server</title>
60
+ </head>
61
+ <body>
62
+ <h1>OpenRouter Proxy Server</h1>
63
+ <p>This is a proxy server for OpenRouter API. Available endpoints:</p>
64
+ <ul>
65
+ <li>/v1/chat/completions - Chat completions endpoint</li>
66
+ <li>/v1/models - List available models</li>
67
+ <li>/health - Health check</li>
68
+ </ul>
69
+ </body>
70
+ </html>
71
+ """
72
+
73
+ # Robots.txt handler
74
+ @app.get("/robots.txt")
75
+ async def robots():
76
+ return "User-agent: *\nDisallow: /"
77
+
78
  @app.post("/v1/chat/completions")
79
  async def create_chat_completion(request: ChatCompletionRequest) -> StreamingResponse:
80
  try:
 
96
  if not oai_request["model"].startswith("openai/") and "/" not in oai_request["model"]:
97
  oai_request["model"] = "nousresearch/hermes-3-llama-3.1-405b"
98
 
99
+ logging.info(f"Sending request to OpenRouter with model: {oai_request['model']}")
100
+
101
  # Create the chat completion
102
  chat_completion_coroutine = await oai_client.chat.completions.create(**oai_request)
103
 
 
149
  ]
150
  }
151
 
152
+ # Configure logging
153
+ logging.basicConfig(level=logging.INFO)
154
+
155
  if __name__ == "__main__":
156
  uvicorn.run(app, host="0.0.0.0", port=7860)