Niansuh commited on
Commit
5f8de6f
·
verified ·
1 Parent(s): 3dbe7a0

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +21 -228
api/utils.py CHANGED
@@ -1,229 +1,22 @@
1
- # api/utils.py
2
-
3
- from datetime import datetime
4
- import json
5
- from typing import AsyncGenerator, Union
6
-
7
- import aiohttp
8
  from fastapi import HTTPException
9
- from api.config import GIZAI_API_ENDPOINT, GIZAI_BASE_URL
10
- from api.models import ChatRequest, ImageResponseModel, ChatCompletionResponse
11
- from api.logger import setup_logger
12
-
13
- logger = setup_logger(__name__)
14
-
15
- class GizAI:
16
- # Chat models
17
- default_model = 'chat-gemini-flash'
18
- chat_models = [
19
- default_model,
20
- 'chat-gemini-pro',
21
- 'chat-gpt4m',
22
- 'chat-gpt4',
23
- 'claude-sonnet',
24
- 'claude-haiku',
25
- 'llama-3-70b',
26
- 'llama-3-8b',
27
- 'mistral-large',
28
- 'chat-o1-mini'
29
- ]
30
-
31
- # Image models
32
- image_models = [
33
- 'flux1',
34
- 'sdxl',
35
- 'sd',
36
- 'sd35',
37
- ]
38
-
39
- models = [*chat_models, *image_models]
40
-
41
- model_aliases = {
42
- # Chat model aliases
43
- "gemini-flash": "chat-gemini-flash",
44
- "gemini-pro": "chat-gemini-pro",
45
- "gpt-4o-mini": "chat-gpt4m",
46
- "gpt-4o": "chat-gpt4",
47
- "claude-3.5-sonnet": "claude-sonnet",
48
- "claude-3-haiku": "claude-haiku",
49
- "llama-3.1-70b": "llama-3-70b",
50
- "llama-3.1-8b": "llama-3-8b",
51
- "o1-mini": "chat-o1-mini",
52
- # Image model aliases
53
- "sd-1.5": "sd",
54
- "sd-3.5": "sd35",
55
- "flux-schnell": "flux1",
56
- }
57
-
58
- @classmethod
59
- def get_model(cls, model: str) -> str:
60
- if model in cls.models:
61
- return model
62
- elif model in cls.model_aliases:
63
- return cls.model_aliases[model]
64
- else:
65
- return cls.default_model
66
-
67
- @classmethod
68
- def is_image_model(cls, model: str) -> bool:
69
- return model in cls.image_models
70
-
71
- async def process_gizai_stream_response(request: ChatRequest, model: str) -> AsyncGenerator[str, None]:
72
- async with aiohttp.ClientSession() as session:
73
- # Set up headers
74
- headers = {
75
- 'Accept': 'application/json, text/plain, */*',
76
- 'Accept-Language': 'en-US,en;q=0.9',
77
- 'Cache-Control': 'no-cache',
78
- 'Connection': 'keep-alive',
79
- 'Content-Type': 'application/json',
80
- 'Origin': 'https://app.giz.ai',
81
- 'Pragma': 'no-cache',
82
- 'Sec-Fetch-Dest': 'empty',
83
- 'Sec-Fetch-Mode': 'cors',
84
- 'Sec-Fetch-Site': 'same-origin',
85
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
86
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
87
- 'sec-ch-ua-mobile': '?0',
88
- 'sec-ch-ua-platform': '"Linux"'
89
- }
90
-
91
- if GizAI.is_image_model(model):
92
- # Image generation logic (streaming might not make sense here)
93
- raise HTTPException(status_code=400, detail="Image generation does not support streaming.")
94
- else:
95
- # Chat completion logic
96
- messages_formatted = [
97
- {
98
- "type": "human",
99
- "content": msg.content if isinstance(msg.content, str) else msg.content[0].get("text", "")
100
- } for msg in request.messages
101
- ]
102
- data = {
103
- "model": model,
104
- "input": {
105
- "messages": messages_formatted,
106
- "mode": "plan"
107
- },
108
- "noStream": False # Enable streaming
109
- }
110
- try:
111
- async with session.post(
112
- GIZAI_API_ENDPOINT,
113
- headers=headers,
114
- json=data
115
- ) as response:
116
- response.raise_for_status()
117
- async for line in response.content:
118
- if line:
119
- decoded_line = line.decode('utf-8').strip()
120
- if decoded_line.startswith("data:"):
121
- content = decoded_line.replace("data: ", "")
122
- yield f"data: {content}\n\n"
123
- # Indicate the end of the stream
124
- yield "data: [DONE]\n\n"
125
- except aiohttp.ClientResponseError as e:
126
- logger.error(f"HTTP error occurred: {e.status} - {e.message}")
127
- raise HTTPException(status_code=e.status, detail=str(e))
128
- except Exception as e:
129
- logger.error(f"Unexpected error: {str(e)}")
130
- raise HTTPException(status_code=500, detail=str(e))
131
-
132
- async def process_gizai_non_stream_response(request: ChatRequest, model: str) -> Union[ImageResponseModel, ChatCompletionResponse]:
133
- async with aiohttp.ClientSession() as session:
134
- # Set up headers
135
- headers = {
136
- 'Accept': 'application/json, text/plain, */*',
137
- 'Accept-Language': 'en-US,en;q=0.9',
138
- 'Cache-Control': 'no-cache',
139
- 'Connection': 'keep-alive',
140
- 'Content-Type': 'application/json',
141
- 'Origin': 'https://app.giz.ai',
142
- 'Pragma': 'no-cache',
143
- 'Sec-Fetch-Dest': 'empty',
144
- 'Sec-Fetch-Mode': 'cors',
145
- 'Sec-Fetch-Site': 'same-origin',
146
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
147
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
148
- 'sec-ch-ua-mobile': '?0',
149
- 'sec-ch-ua-platform': '"Linux"'
150
- }
151
-
152
- if GizAI.is_image_model(model):
153
- # Image generation logic
154
- prompt = request.messages[-1].content if isinstance(request.messages[-1].content, str) else request.messages[-1].content[0].get("text", "")
155
- data = {
156
- "model": model,
157
- "input": {
158
- "width": "1024",
159
- "height": "1024",
160
- "steps": 4,
161
- "output_format": "webp",
162
- "batch_size": 1,
163
- "mode": "plan",
164
- "prompt": prompt
165
- }
166
- }
167
- try:
168
- async with session.post(
169
- GIZAI_API_ENDPOINT,
170
- headers=headers,
171
- json=data
172
- ) as response:
173
- response.raise_for_status()
174
- response_data = await response.json()
175
- if response_data.get('status') == 'completed' and response_data.get('output'):
176
- images = response_data['output']
177
- return ImageResponseModel(images=images, alt="Generated Image")
178
- else:
179
- raise HTTPException(status_code=500, detail="Image generation failed.")
180
- except aiohttp.ClientResponseError as e:
181
- logger.error(f"HTTP error occurred: {e.status} - {e.message}")
182
- raise HTTPException(status_code=e.status, detail=str(e))
183
- except Exception as e:
184
- logger.error(f"Unexpected error: {str(e)}")
185
- raise HTTPException(status_code=500, detail=str(e))
186
- else:
187
- # Chat completion logic
188
- messages_formatted = [
189
- {
190
- "type": "human",
191
- "content": msg.content if isinstance(msg.content, str) else msg.content[0].get("text", "")
192
- } for msg in request.messages
193
- ]
194
- data = {
195
- "model": model,
196
- "input": {
197
- "messages": messages_formatted,
198
- "mode": "plan"
199
- },
200
- "noStream": True # Disable streaming
201
- }
202
- try:
203
- async with session.post(
204
- GIZAI_API_ENDPOINT,
205
- headers=headers,
206
- json=data
207
- ) as response:
208
- response.raise_for_status()
209
- result = await response.json()
210
- return ChatCompletionResponse(
211
- id=f"chatcmpl-{uuid.uuid4()}",
212
- object="chat.completion",
213
- created=int(datetime.now().timestamp()),
214
- model=model,
215
- choices=[
216
- {
217
- "index": 0,
218
- "message": {"role": "assistant", "content": result.get('output', '')},
219
- "finish_reason": "stop",
220
- }
221
- ],
222
- usage=None,
223
- )
224
- except aiohttp.ClientResponseError as e:
225
- logger.error(f"HTTP error occurred: {e.status} - {e.message}")
226
- raise HTTPException(status_code=e.status, detail=str(e))
227
- except Exception as e:
228
- logger.error(f"Unexpected error: {str(e)}")
229
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
 
 
 
1
  from fastapi import HTTPException
2
+ from api.config import MODEL_PROVIDER_MAPPING
3
+ from api.models import ChatRequest
4
+ from api.provider import blackboxai, gizai
5
+
6
+ async def process_streaming_response(request: ChatRequest):
7
+ provider_name = MODEL_PROVIDER_MAPPING.get(request.model)
8
+ if provider_name == 'blackboxai':
9
+ return await blackboxai.process_streaming_response(request)
10
+ elif provider_name == 'gizai':
11
+ return await gizai.process_streaming_response(request)
12
+ else:
13
+ raise HTTPException(status_code=400, detail=f"Model {request.model} is not supported for streaming.")
14
+
15
+ async def process_non_streaming_response(request: ChatRequest):
16
+ provider_name = MODEL_PROVIDER_MAPPING.get(request.model)
17
+ if provider_name == 'blackboxai':
18
+ return await blackboxai.process_non_streaming_response(request)
19
+ elif provider_name == 'gizai':
20
+ return await gizai.process_non_streaming_response(request)
21
+ else:
22
+ raise HTTPException(status_code=400, detail=f"Model {request.model} is not supported.")