Niansuh commited on
Commit
8c3ee02
·
verified ·
1 Parent(s): 3973519

Create provider/gizai.py

Browse files
Files changed (1) hide show
  1. api/provider/gizai.py +153 -0
api/provider/gizai.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # api/provider/gizai.py
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from aiohttp import ClientSession
7
+
8
+ from ..typing import AsyncResult, Messages
9
+ from ..image import ImageResponse
10
+ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
11
+ from .helper import format_prompt
12
+
13
+ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
14
+ url = "https://app.giz.ai/assistant/"
15
+ api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
16
+ working = True
17
+
18
+ supports_system_message = True
19
+ supports_message_history = True
20
+
21
+ # Chat models
22
+ default_model = 'chat-gemini-flash'
23
+ chat_models = [
24
+ default_model,
25
+ 'chat-gemini-pro',
26
+ 'chat-gpt4m',
27
+ 'chat-gpt4',
28
+ 'claude-sonnet',
29
+ 'claude-haiku',
30
+ 'llama-3-70b',
31
+ 'llama-3-8b',
32
+ 'mistral-large',
33
+ 'chat-o1-mini'
34
+ ]
35
+
36
+ # Image models
37
+ image_models = [
38
+ 'flux1',
39
+ 'sdxl',
40
+ 'sd',
41
+ 'sd35',
42
+ ]
43
+
44
+ models = [*chat_models, *image_models]
45
+
46
+ model_aliases = {
47
+ # Chat model aliases
48
+ "gemini-flash": "chat-gemini-flash",
49
+ "gemini-pro": "chat-gemini-pro",
50
+ "gpt-4o-mini": "chat-gpt4m",
51
+ "gpt-4o": "chat-gpt4",
52
+ "claude-3.5-sonnet": "claude-sonnet",
53
+ "claude-3-haiku": "claude-haiku",
54
+ "llama-3.1-70b": "llama-3-70b",
55
+ "llama-3.1-8b": "llama-3-8b",
56
+ "o1-mini": "chat-o1-mini",
57
+ # Image model aliases
58
+ "sd-1.5": "sd",
59
+ "sd-3.5": "sd35",
60
+ "flux-schnell": "flux1",
61
+ }
62
+
63
+ @classmethod
64
+ def get_model(cls, model: str) -> str:
65
+ if model in cls.models:
66
+ return model
67
+ elif model in cls.model_aliases:
68
+ return cls.model_aliases[model]
69
+ else:
70
+ return cls.default_model
71
+
72
+ @classmethod
73
+ def is_image_model(cls, model: str) -> bool:
74
+ return model in cls.image_models
75
+
76
+ @classmethod
77
+ async def create_async_generator(
78
+ cls,
79
+ model: str,
80
+ messages: Messages,
81
+ proxy: str = None,
82
+ **kwargs
83
+ ) -> AsyncResult:
84
+ model = cls.get_model(model)
85
+
86
+ headers = {
87
+ 'Accept': 'application/json, text/plain, */*',
88
+ 'Accept-Language': 'en-US,en;q=0.9',
89
+ 'Cache-Control': 'no-cache',
90
+ 'Connection': 'keep-alive',
91
+ 'Content-Type': 'application/json',
92
+ 'Origin': 'https://app.giz.ai',
93
+ 'Pragma': 'no-cache',
94
+ 'Sec-Fetch-Dest': 'empty',
95
+ 'Sec-Fetch-Mode': 'cors',
96
+ 'Sec-Fetch-Site': 'same-origin',
97
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
98
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
99
+ 'sec-ch-ua-mobile': '?0',
100
+ 'sec-ch-ua-platform': '"Linux"'
101
+ }
102
+
103
+ async with ClientSession() as session:
104
+ if cls.is_image_model(model):
105
+ # Image generation
106
+ prompt = messages[-1]["content"]
107
+ data = {
108
+ "model": model,
109
+ "input": {
110
+ "width": "1024",
111
+ "height": "1024",
112
+ "steps": 4,
113
+ "output_format": "webp",
114
+ "batch_size": 1,
115
+ "mode": "plan",
116
+ "prompt": prompt
117
+ }
118
+ }
119
+ async with session.post(
120
+ cls.api_endpoint,
121
+ headers=headers,
122
+ data=json.dumps(data),
123
+ proxy=proxy
124
+ ) as response:
125
+ response.raise_for_status()
126
+ response_data = await response.json()
127
+ if response_data.get('status') == 'completed' and response_data.get('output'):
128
+ for url in response_data['output']:
129
+ yield ImageResponse(images=url, alt="Generated Image")
130
+ else:
131
+ # Chat completion
132
+ data = {
133
+ "model": model,
134
+ "input": {
135
+ "messages": [
136
+ {
137
+ "type": "human",
138
+ "content": format_prompt(messages)
139
+ }
140
+ ],
141
+ "mode": "plan"
142
+ },
143
+ "noStream": True
144
+ }
145
+ async with session.post(
146
+ cls.api_endpoint,
147
+ headers=headers,
148
+ data=json.dumps(data),
149
+ proxy=proxy
150
+ ) as response:
151
+ response.raise_for_status()
152
+ result = await response.json()
153
+ yield result.get('output', '')