Niansuh commited on
Commit
110efbd
·
verified ·
1 Parent(s): a36f60f

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +28 -160
main.py CHANGED
@@ -1,38 +1,26 @@
1
- from __future__ import annotations
2
-
 
 
3
  import re
4
  import random
5
  import string
6
  from aiohttp import ClientSession
7
- from fastapi import FastAPI, HTTPException
8
- from pydantic import BaseModel
9
- from typing import List, Dict, Any, Optional
10
 
11
- # Mock implementations for ImageResponse and to_data_uri
12
- class ImageResponse:
13
- def __init__(self, url: str, alt: str):
14
- self.url = url
15
- self.alt = alt
16
-
17
- def to_data_uri(image: Any) -> str:
18
- # Placeholder for actual image encoding
19
- return "data:image/png;base64,..." # Replace with actual base64 data
20
-
21
- class AsyncGeneratorProvider:
22
- pass
23
 
24
- class ProviderModelMixin:
25
- pass
 
26
 
27
- class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
 
28
  url = "https://www.blackbox.ai"
29
  api_endpoint = "https://www.blackbox.ai/api/chat"
30
- working = True
31
- supports_stream = True
32
- supports_system_message = True
33
- supports_message_history = True
34
 
35
- default_model = 'blackbox'
36
  models = [
37
  'blackbox',
38
  'gemini-1.5-flash',
@@ -45,170 +33,50 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
45
  'claude-sonnet-3.5',
46
  ]
47
 
48
- agentMode = {
49
- 'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
50
- }
51
-
52
- trendingAgentMode = {
53
- "blackbox": {},
54
- "gemini-1.5-flash": {'mode': True, 'id': 'Gemini'},
55
- "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
56
- 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
57
- 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405b"},
58
- }
59
-
60
- userSelectedModel = {
61
- "gpt-4o": "gpt-4o",
62
- "gemini-pro": "gemini-pro",
63
- 'claude-sonnet-3.5': "claude-sonnet-3.5",
64
- }
65
-
66
- model_aliases = {
67
- "gemini-flash": "gemini-1.5-flash",
68
- "flux": "ImageGenerationLV45LJp",
69
- }
70
-
71
  @classmethod
72
  def get_model(cls, model: str) -> str:
73
- if model in cls.models:
74
- return model
75
- elif model in cls.userSelectedModel:
76
- return model
77
- elif model in cls.model_aliases:
78
- return cls.model_aliases[model]
79
- else:
80
- return cls.default_model
81
 
82
  @classmethod
83
- async def create_async_generator(
84
- cls,
85
- model: str,
86
- messages: List[Dict[str, str]],
87
- proxy: Optional[str] = None,
88
- image: Optional[Any] = None,
89
- image_name: Optional[str] = None,
90
- **kwargs
91
- ) -> Any:
92
  model = cls.get_model(model)
93
-
94
  headers = {
95
  "accept": "*/*",
96
- "accept-language": "en-US,en;q=0.9",
97
- "cache-control": "no-cache",
98
  "content-type": "application/json",
99
- "origin": cls.url,
100
- "pragma": "no-cache",
101
- "referer": f"{cls.url}/",
102
- "sec-ch-ua": '"Not;A=Brand";v="24", "Chromium";v="128"',
103
- "sec-ch-ua-mobile": "?0",
104
- "sec-ch-ua-platform": '"Linux"',
105
- "sec-fetch-dest": "empty",
106
- "sec-fetch-mode": "cors",
107
- "sec-fetch-site": "same-origin",
108
- "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
109
  }
110
 
111
- if model in cls.userSelectedModel:
112
- prefix = f"@{cls.userSelectedModel[model]}"
113
- if not messages[0]['content'].startswith(prefix):
114
- messages[0]['content'] = f"{prefix} {messages[0]['content']}"
115
-
116
  async with ClientSession(headers=headers) as session:
117
- if image is not None:
118
- messages[-1]["data"] = {
119
- "fileText": image_name,
120
- "imageBase64": to_data_uri(image)
121
- }
122
-
123
  random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
124
-
125
  data = {
126
  "messages": messages,
127
  "id": random_id,
128
- "previewToken": None,
129
- "userId": None,
130
- "codeModelMode": True,
131
- "agentMode": {},
132
- "trendingAgentMode": {},
133
- "userSelectedModel": None,
134
- "userSystemPrompt": None,
135
- "isMicMode": False,
136
  "maxTokens": 1024,
137
- "playgroundTopP": 0.9,
138
- "playgroundTemperature": 0.5,
139
- "isChromeExt": False,
140
- "githubToken": None,
141
- "clickedAnswer2": False,
142
- "clickedAnswer3": False,
143
- "clickedForceWebSearch": False,
144
- "visitFromDelta": False,
145
- "mobileClient": False,
146
- "webSearchMode": False,
147
  }
148
 
149
- if model in cls.agentMode:
150
- data["agentMode"] = cls.agentMode[model]
151
- elif model in cls.trendingAgentMode:
152
- data["trendingAgentMode"] = cls.trendingAgentMode[model]
153
- elif model in cls.userSelectedModel:
154
- data["userSelectedModel"] = cls.userSelectedModel[model]
155
-
156
- async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
157
  response.raise_for_status()
158
- if model == 'ImageGenerationLV45LJp':
159
- response_text = await response.text()
160
- url_match = re.search(r'https://storage\.googleapis\.com/[^\s\)]+', response_text)
161
- if url_match:
162
- image_url = url_match.group(0)
163
- yield ImageResponse(image_url, alt=messages[-1]['content'])
164
- else:
165
- raise Exception("Image URL not found in the response")
166
- else:
167
- async for chunk in response.content.iter_any():
168
- if chunk:
169
- decoded_chunk = chunk.decode()
170
- decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
171
- if decoded_chunk.strip():
172
- yield decoded_chunk
173
 
174
- # FastAPI app setup
175
  app = FastAPI()
176
 
177
- class Message(BaseModel):
178
- role: str
179
- content: str
180
-
181
- class ChatRequest(BaseModel):
182
- model: str
183
- messages: List[Message]
184
-
185
  @app.post("/v1/chat/completions")
186
  async def chat_completions(request: ChatRequest):
187
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
188
 
189
- # Use an async generator to get the response
190
  async_generator = Blackbox.create_async_generator(
191
  model=request.model,
192
  messages=messages
193
  )
194
 
195
- response_content = ""
196
- async for chunk in async_generator:
197
- response_content += chunk if isinstance(chunk, str) else chunk.content # Concatenate response
198
 
199
- return {
200
- "id": "chatcmpl-1234", # Example ID, generate as needed
201
- "object": "chat.completion",
202
- "created": 1690000000, # Replace with actual timestamp
203
- "model": request.model,
204
- "choices": [
205
- {
206
- "message": {
207
- "role": "assistant",
208
- "content": response_content
209
- },
210
- "finish_reason": "stop",
211
- "index": 0
212
- }
213
- ]
214
- }
 
1
+ from fastapi import FastAPI
2
+ from fastapi.responses import StreamingResponse
3
+ from pydantic import BaseModel
4
+ import json
5
  import re
6
  import random
7
  import string
8
  from aiohttp import ClientSession
 
 
 
9
 
10
+ # Pydantic models for request
11
+ class Message(BaseModel):
12
+ role: str
13
+ content: str
 
 
 
 
 
 
 
 
14
 
15
+ class ChatRequest(BaseModel):
16
+ model: str
17
+ messages: list[Message]
18
 
19
+ # Blackbox class
20
+ class Blackbox:
21
  url = "https://www.blackbox.ai"
22
  api_endpoint = "https://www.blackbox.ai/api/chat"
 
 
 
 
23
 
 
24
  models = [
25
  'blackbox',
26
  'gemini-1.5-flash',
 
33
  'claude-sonnet-3.5',
34
  ]
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  @classmethod
37
  def get_model(cls, model: str) -> str:
38
+ return model if model in cls.models else 'blackbox'
 
 
 
 
 
 
 
39
 
40
  @classmethod
41
+ async def create_async_generator(cls, model: str, messages: list) -> str:
 
 
 
 
 
 
 
 
42
  model = cls.get_model(model)
43
+
44
  headers = {
45
  "accept": "*/*",
 
 
46
  "content-type": "application/json",
47
+ "user-agent": "Mozilla/5.0"
 
 
 
 
 
 
 
 
 
48
  }
49
 
 
 
 
 
 
50
  async with ClientSession(headers=headers) as session:
 
 
 
 
 
 
51
  random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=7))
 
52
  data = {
53
  "messages": messages,
54
  "id": random_id,
 
 
 
 
 
 
 
 
55
  "maxTokens": 1024,
 
 
 
 
 
 
 
 
 
 
56
  }
57
 
58
+ async with session.post(cls.api_endpoint, json=data) as response:
 
 
 
 
 
 
 
59
  response.raise_for_status()
60
+ async for chunk in response.content.iter_any():
61
+ if chunk:
62
+ decoded_chunk = chunk.decode()
63
+ decoded_chunk = re.sub(r'\$@\$v=[^$]+\$@\$', '', decoded_chunk)
64
+ yield decoded_chunk.strip()
 
 
 
 
 
 
 
 
 
 
65
 
66
+ # FastAPI app
67
  app = FastAPI()
68
 
 
 
 
 
 
 
 
 
69
  @app.post("/v1/chat/completions")
70
  async def chat_completions(request: ChatRequest):
71
  messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
72
 
 
73
  async_generator = Blackbox.create_async_generator(
74
  model=request.model,
75
  messages=messages
76
  )
77
 
78
+ async def event_stream():
79
+ async for chunk in async_generator:
80
+ yield f"data: {json.dumps({'choices': [{'message': {'role': 'assistant', 'content': chunk}}]}})}\n\n"
81
 
82
+ return StreamingResponse(event_stream(), media_type="text/event-stream")