Niansuh commited on
Commit
0d812a5
·
verified ·
1 Parent(s): 4e59b46

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +32 -116
main.py CHANGED
@@ -1,14 +1,12 @@
1
  from __future__ import annotations
2
-
3
  import re
4
  import random
5
  import string
6
- import uuid # Already added
7
- from datetime import datetime # Add this line
8
  from aiohttp import ClientSession
9
  from fastapi import FastAPI, HTTPException
10
  from pydantic import BaseModel
11
- from typing import List, Dict, Any, Optional
 
12
 
13
  # Mock implementations for ImageResponse and to_data_uri
14
  class ImageResponse:
@@ -17,7 +15,6 @@ class ImageResponse:
17
  self.alt = alt
18
 
19
  def to_data_uri(image: Any) -> str:
20
- # Placeholder for actual image encoding
21
  return "data:image/png;base64,..." # Replace with actual base64 data
22
 
23
  class AsyncGeneratorProvider:
@@ -29,10 +26,6 @@ class ProviderModelMixin:
29
  class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
30
  url = "https://www.blackbox.ai"
31
  api_endpoint = "https://www.blackbox.ai/api/chat"
32
- working = True
33
- supports_stream = True
34
- supports_system_message = True
35
- supports_message_history = True
36
 
37
  default_model = 'blackbox'
38
  models = [
@@ -47,39 +40,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
47
  'claude-sonnet-3.5',
48
  ]
49
 
50
- agentMode = {
51
- 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
52
- }
53
-
54
- trendingAgentMode = {
55
- "blackbox": {},
56
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
57
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
58
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
59
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
60
- }
61
-
62
- userSelectedModel = {
63
- "gpt-4o": "gpt-4o",
64
- "gemini-pro": "gemini-pro",
65
- 'claude-sonnet-3.5': "claude-sonnet-3.5",
66
- }
67
-
68
- model_aliases = {
69
- "gemini-flash": "gemini-1.5-flash",
70
- "flux": "ImageGenerationLV45LJp",
71
- }
72
-
73
  @classmethod
74
  def get_model(cls, model: str) -> str:
75
  if model in cls.models:
76
  return model
77
- elif model in cls.userSelectedModel:
78
- return model
79
- elif model in cls.model_aliases:
80
- return cls.model_aliases[model]
81
- else:
82
- return cls.default_model
83
 
84
  @classmethod
85
  async def create_async_generator(
@@ -89,89 +54,38 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
89
  proxy: Optional[str] = None,
90
  image: Optional[Any] = None,
91
  image_name: Optional[str] = None,
 
92
  **kwargs
93
- ) -> Any:
94
  model = cls.get_model(model)
95
-
96
  headers = {
97
- "accept": "*/*",
98
- "accept-language": "en-US,en;q=0.9",
99
- "cache-control": "no-cache",
100
- "content-type": "application/json",
101
- "origin": cls.url,
102
- "pragma": "no-cache",
103
- "referer": f"{cls.url}/",
104
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
105
- "sec-ch-ua-mobile": "?0",
106
- "sec-ch-ua-platform": '"Linux"',
107
- "sec-fetch-dest": "empty",
108
- "sec-fetch-mode": "cors",
109
- "sec-fetch-site": "same-origin",
110
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
111
  }
112
 
113
- if model in cls.userSelectedModel:
114
- prefix = f"@{cls.userSelectedModel[model]}"
115
- if not messages[0]['content'].startswith(prefix):
116
- messages[0]['content'] = f"{prefix} {messages[0]['content']}"
117
-
118
  async with ClientSession(headers=headers) as session:
119
  if image is not None:
120
  messages[-1]["data"] = {
121
  "fileText": image_name,
122
  "imageBase64": to_data_uri(image)
123
  }
124
-
125
- random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
126
 
127
  data = {
 
128
  "messages": messages,
129
- "id": random_id,
130
- "previewToken": None,
131
- "userId": None,
132
- "codeModelMode": True,
133
- "agentMode": {},
134
- "trendingAgentMode": {},
135
- "userSelectedModel": None,
136
- "userSystemPrompt": None,
137
- "isMicMode": False,
138
- "maxTokens": 1024,
139
- "playgroundTopP": 0.9,
140
- "playgroundTemperature": 0.5,
141
- "isChromeExt": False,
142
- "githubToken": None,
143
- "clickedAnswer2": False,
144
- "clickedAnswer3": False,
145
- "clickedForceWebSearch": False,
146
- "visitFromDelta": False,
147
- "mobileClient": False,
148
- "webSearchMode": False,
149
  }
150
 
151
- if model in cls.agentMode:
152
- data["agentMode"] = cls.agentMode[model]
153
- elif model in cls.trendingAgentMode:
154
- data["trendingAgentMode"] = cls.trendingAgentMode[model]
155
- elif model in cls.userSelectedModel:
156
- data["userSelectedModel"] = cls.userSelectedModel[model]
157
-
158
  async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
159
  response.raise_for_status()
160
- if model == 'ImageGenerationLV45LJp':
161
- response_text = await response.text()
162
- url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
163
- if url_match:
164
- image_url = url_match.group(0)
165
- yield ImageResponse(image_url, alt=messages[-1]['content'])
166
- else:
167
- raise Exception("Image URL not found in the response")
168
- else:
169
  async for chunk in response.content.iter_any():
170
- if chunk:
171
- decoded_chunk = chunk.decode()
172
- decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
173
- if decoded_chunk.strip():
174
- yield decoded_chunk
175
 
176
  # FastAPI app setup
177
  app = FastAPI()
@@ -183,8 +97,7 @@ class Message(BaseModel):
183
  class ChatRequest(BaseModel):
184
  model: str
185
  messages: List[Message]
186
-
187
- from fastapi.responses import Response
188
 
189
  @app.post("/v1/chat/completions")
190
  async def chat_completions(request: ChatRequest):
@@ -192,19 +105,22 @@ async def chat_completions(request: ChatRequest):
192
 
193
  async_generator = Blackbox.create_async_generator(
194
  model=request.model,
195
- messages=messages
 
196
  )
197
 
198
- response_content = ""
199
- async for chunk in async_generator:
200
- if isinstance(chunk, str):
201
- response_content += chunk
202
- else:
203
- response_content += chunk.content
204
 
205
- # Clean up the response to get only the plain text content
206
- start = response_content.find('"content": "') + len('"content": "')
207
- end = response_content.find('"', start)
208
- clean_content = response_content[start:end].replace('\\n', '\n') # Handle newline characters
 
 
 
209
 
210
- return Response(content=clean_content.strip(), media_type="text/plain") # Return plain text
 
1
  from __future__ import annotations
 
2
  import re
3
  import random
4
  import string
 
 
5
  from aiohttp import ClientSession
6
  from fastapi import FastAPI, HTTPException
7
  from pydantic import BaseModel
8
+ from typing import List, Dict, Any, Optional, Union
9
+ import asyncio
10
 
11
  # Mock implementations for ImageResponse and to_data_uri
12
  class ImageResponse:
 
15
  self.alt = alt
16
 
17
  def to_data_uri(image: Any) -> str:
 
18
  return "data:image/png;base64,..." # Replace with actual base64 data
19
 
20
  class AsyncGeneratorProvider:
 
26
  class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
27
  url = "https://www.blackbox.ai"
28
  api_endpoint = "https://www.blackbox.ai/api/chat"
 
 
 
 
29
 
30
  default_model = 'blackbox'
31
  models = [
 
40
  'claude-sonnet-3.5',
41
  ]
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  @classmethod
44
  def get_model(cls, model: str) -> str:
45
  if model in cls.models:
46
  return model
47
+ return cls.default_model
 
 
 
 
 
48
 
49
  @classmethod
50
  async def create_async_generator(
 
54
  proxy: Optional[str] = None,
55
  image: Optional[Any] = None,
56
  image_name: Optional[str] = None,
57
+ stream: bool = False,
58
  **kwargs
59
+ ) -> Union[Dict[str, Any], AsyncGenerator[str, None]]:
60
  model = cls.get_model(model)
61
+
62
  headers = {
63
+ "Content-Type": "application/json",
64
+ "User-Agent": "Mozilla/5.0"
 
 
 
 
 
 
 
 
 
 
 
 
65
  }
66
 
 
 
 
 
 
67
  async with ClientSession(headers=headers) as session:
68
  if image is not None:
69
  messages[-1]["data"] = {
70
  "fileText": image_name,
71
  "imageBase64": to_data_uri(image)
72
  }
 
 
73
 
74
  data = {
75
+ "model": model,
76
  "messages": messages,
77
+ "max_tokens": 1024,
78
+ "temperature": 0.7,
79
+ "stream": stream
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  }
81
 
 
 
 
 
 
 
 
82
  async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
83
  response.raise_for_status()
84
+ if stream:
 
 
 
 
 
 
 
 
85
  async for chunk in response.content.iter_any():
86
+ yield chunk.decode()
87
+ else:
88
+ return await response.json()
 
 
89
 
90
  # FastAPI app setup
91
  app = FastAPI()
 
97
  class ChatRequest(BaseModel):
98
  model: str
99
  messages: List[Message]
100
+ stream: Optional[bool] = False # Add stream option
 
101
 
102
  @app.post("/v1/chat/completions")
103
  async def chat_completions(request: ChatRequest):
 
105
 
106
  async_generator = Blackbox.create_async_generator(
107
  model=request.model,
108
+ messages=messages,
109
+ stream=request.stream # Pass the stream flag
110
  )
111
 
112
+ if request.stream:
113
+ async def event_stream():
114
+ async for chunk in async_generator:
115
+ # If chunk is a string, it should be processed as needed
116
+ yield {"choices": [{"text": chunk.strip()}]}
 
117
 
118
+ return app.streaming_response(event_stream(), media_type="application/json")
119
+
120
+ # Handle non-streaming response
121
+ response = await async_generator
122
+ if "choices" in response and len(response["choices"]) > 0:
123
+ clean_content = response["choices"][0]["message"]["content"]
124
+ return {"choices": [{"text": clean_content.strip()}]}
125
 
126
+ raise HTTPException(status_code=500, detail="No valid response received.")