masteroko commited on
Commit
d424e5a
·
verified ·
1 Parent(s): 413f2a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +149 -75
app.py CHANGED
@@ -7,8 +7,7 @@ import os
7
  from io import BytesIO
8
  import html
9
  import re
10
- import PIL
11
- from PIL import Image # Убедитесь, что PIL импортируется для работы с изображениями
12
 
13
 
14
  class Prodia:
@@ -17,63 +16,70 @@ class Prodia:
17
  self.headers = {
18
  "X-Prodia-Key": api_key
19
  }
20
-
21
  def generate(self, params):
22
- return self._post(f"{self.base}/sd/generate", params).json()
23
-
 
24
  def transform(self, params):
25
- return self._post(f"{self.base}/sd/transform", params).json()
26
-
 
27
  def controlnet(self, params):
28
- return self._post(f"{self.base}/sd/controlnet", params).json()
29
-
 
30
  def get_job(self, job_id):
31
- return self._get(f"{self.base}/job/{job_id}").json()
 
32
 
33
  def wait(self, job):
34
  job_result = job
 
35
  while job_result['status'] not in ['succeeded', 'failed']:
36
  time.sleep(0.25)
37
  job_result = self.get_job(job['job'])
 
38
  return job_result
39
 
40
  def list_models(self):
41
- return self._get(f"{self.base}/sd/models").json()
 
42
 
43
  def list_samplers(self):
44
- return self._get(f"{self.base}/sd/samplers").json()
 
45
 
46
- # Изменение: добавлены повторные попытки при сбоях сети
47
  def _post(self, url, params):
48
- headers = {**self.headers, "Content-Type": "application/json"}
49
- for _ in range(3):
50
- try:
51
- response = requests.post(url, headers=headers, data=json.dumps(params))
52
- response.raise_for_status()
53
- return response
54
- except requests.exceptions.RequestException as e:
55
- print(f"Request failed: {e}, retrying...")
56
- time.sleep(1)
57
- raise Exception("Failed after 3 attempts")
58
-
59
- # Изменение: добавлены повторные попытки при сбоях сети
60
  def _get(self, url):
61
- for _ in range(3):
62
- try:
63
- response = requests.get(url, headers=self.headers)
64
- response.raise_for_status()
65
- return response
66
- except requests.exceptions.RequestException as e:
67
- print(f"Request failed: {e}, retrying...")
68
- time.sleep(1)
69
- raise Exception("Failed after 3 attempts")
70
-
71
-
72
- def image_to_base64(image, format="PNG"):
73
  buffered = BytesIO()
74
- image.save(buffered, format=format)
75
- img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
76
- return img_str
 
 
 
77
 
78
 
79
  def remove_id_and_ext(text):
@@ -86,44 +92,68 @@ def remove_id_and_ext(text):
86
  return text
87
 
88
 
89
- # Изменение: оптимизация функции get_data
90
  def get_data(text):
 
91
  patterns = {
92
  'prompt': r'(.*)',
93
  'negative_prompt': r'Negative prompt: (.*)',
94
  'steps': r'Steps: (\d+),',
95
  'seed': r'Seed: (\d+),',
96
- 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
97
  'model': r'Model:\s*([^\s,]+)',
98
  'cfg_scale': r'CFG scale:\s*([\d\.]+)',
99
  'size': r'Size:\s*([0-9]+x[0-9]+)'
100
- }
101
- results = {key: re.search(pattern, text).group(1) if re.search(pattern, text) else None for key, pattern in patterns.items()}
102
- if results['size']:
103
- results['w'], results['h'] = map(int, results['size'].split("x"))
 
 
 
 
 
 
 
104
  else:
105
- results['w'], results['h'] = None, None
 
106
  return results
107
 
108
 
109
- # Изменение: оптимизация функции send_to_txt2img
110
  def send_to_txt2img(image):
 
111
  result = {tabs: gr.update(selected="t2i")}
 
112
  try:
113
  text = image.info['parameters']
114
  data = get_data(text)
115
- fields = ['prompt', 'negative_prompt', 'steps', 'seed', 'cfg_scale', 'w', 'h', 'sampler', 'model']
116
- for field in fields:
117
- result[field] = gr.update(value=data[field]) if data[field] is not None else gr.update()
 
 
 
 
 
 
 
 
 
118
  return result
 
119
  except Exception as e:
120
  print(e)
 
121
  return result
122
 
123
 
124
  prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
125
  model_list = prodia_client.list_models()
126
- model_names = {remove_id_and_ext(model_name): model_name for model_name in model_list}
 
 
 
 
127
 
128
 
129
  def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
@@ -138,7 +168,9 @@ def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, he
138
  "height": height,
139
  "seed": seed
140
  })
 
141
  job = prodia_client.wait(result)
 
142
  return job["imageUrl"]
143
 
144
 
@@ -156,7 +188,9 @@ def img2img(input_image, denoising, prompt, negative_prompt, model, steps, sampl
156
  "height": height,
157
  "seed": seed
158
  })
 
159
  job = prodia_client.wait(result)
 
160
  return job["imageUrl"]
161
 
162
 
@@ -169,7 +203,7 @@ css = """
169
  with gr.Blocks(css=css) as demo:
170
  with gr.Row():
171
  with gr.Column(scale=6):
172
- model = gr.Dropdown(interactive=True, value="childrensStories_v1ToonAnime.safetensors [2ec7b88b]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
173
 
174
  with gr.Column(scale=1):
175
  gr.Markdown(elem_id="powered-by-prodia", value="AUTOMATIC1111 Stable Diffusion Web UI переделано masteroko.<br>Powered by [Prodia](https://prodia.com).<br>For more features and faster generation times check out our [API Docs](https://docs.prodia.com/reference/getting-started-guide).")
@@ -208,46 +242,86 @@ with gr.Blocks(css=css) as demo:
208
  with gr.Column(scale=2):
209
  image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
210
 
211
- # Изменение: добавлен limit на одновременные запросы
212
- text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed], outputs=image_output, concurrency_limit=1024)
213
 
214
  with gr.Tab("img2img", id='i2i'):
215
  with gr.Row():
216
  with gr.Column(scale=6, min_width=600):
217
- image = gr.Image(show_label=False)
218
- prompt = gr.Textbox(placeholder="Prompt", show_label=False, lines=3)
219
- negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3)
220
  with gr.Column():
221
- text_button = gr.Button("Трансформировать", variant='primary', elem_id="generate")
222
 
223
  with gr.Row():
224
  with gr.Column(scale=3):
225
  with gr.Tab("Генерация"):
 
 
226
  with gr.Row():
227
  with gr.Column(scale=1):
228
- sampler = gr.Dropdown(value="DPM++ 2M SDE Exponential", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
229
 
230
  with gr.Column(scale=1):
231
- steps = gr.Slider(label="количество обработок", minimum=1, maximum=100, value=20, step=1)
232
-
233
  with gr.Row():
234
  with gr.Column(scale=1):
235
- width = gr.Slider(label="Ширина", maximum=1024, value=512, step=8)
236
- height = gr.Slider(label="Высота", maximum=1024, value=512, step=8)
237
 
238
  with gr.Column(scale=1):
239
- batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
240
- batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
241
 
242
- cfg_scale = gr.Slider(label="CFG Scale(степень фантазии ии)", minimum=1, maximum=20, value=7, step=1)
243
- seed = gr.Number(label="Семя рандома", value=-1)
244
- denoising = gr.Slider(label="Denoising", value=0.5)
245
 
246
  with gr.Column(scale=2):
247
- image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
248
 
249
- # Изменение: добавлен limit на одновременные запросы
250
- text_button.click(img2img, inputs=[image, denoising, prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed], outputs=image_output, concurrency_limit=1024)
251
-
252
- # Запуск Gradio интерфейса
253
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  from io import BytesIO
8
  import html
9
  import re
10
+
 
11
 
12
 
13
  class Prodia:
 
16
  self.headers = {
17
  "X-Prodia-Key": api_key
18
  }
19
+
20
  def generate(self, params):
21
+ response = self._post(f"{self.base}/sd/generate", params)
22
+ return response.json()
23
+
24
  def transform(self, params):
25
+ response = self._post(f"{self.base}/sd/transform", params)
26
+ return response.json()
27
+
28
  def controlnet(self, params):
29
+ response = self._post(f"{self.base}/sd/controlnet", params)
30
+ return response.json()
31
+
32
  def get_job(self, job_id):
33
+ response = self._get(f"{self.base}/job/{job_id}")
34
+ return response.json()
35
 
36
  def wait(self, job):
37
  job_result = job
38
+
39
  while job_result['status'] not in ['succeeded', 'failed']:
40
  time.sleep(0.25)
41
  job_result = self.get_job(job['job'])
42
+
43
  return job_result
44
 
45
  def list_models(self):
46
+ response = self._get(f"{self.base}/sd/models")
47
+ return response.json()
48
 
49
  def list_samplers(self):
50
+ response = self._get(f"{self.base}/sd/samplers")
51
+ return response.json()
52
 
 
53
  def _post(self, url, params):
54
+ headers = {
55
+ **self.headers,
56
+ "Content-Type": "application/json"
57
+ }
58
+ response = requests.post(url, headers=headers, data=json.dumps(params))
59
+
60
+ if response.status_code != 200:
61
+ raise Exception(f"Bad Prodia Response: {response.status_code}")
62
+
63
+ return response
64
+
 
65
  def _get(self, url):
66
+ response = requests.get(url, headers=self.headers)
67
+
68
+ if response.status_code != 200:
69
+ raise Exception(f"Bad Prodia Response: {response.status_code}")
70
+
71
+ return response
72
+
73
+
74
+ def image_to_base64(image):
75
+ # Convert the image to bytes
 
 
76
  buffered = BytesIO()
77
+ image.save(buffered, format="PNG") # You can change format to PNG if needed
78
+
79
+ # Encode the bytes to base64
80
+ img_str = base64.b64encode(buffered.getvalue())
81
+
82
+ return img_str.decode('utf-8') # Convert bytes to string
83
 
84
 
85
  def remove_id_and_ext(text):
 
92
  return text
93
 
94
 
 
95
  def get_data(text):
96
+ results = {}
97
  patterns = {
98
  'prompt': r'(.*)',
99
  'negative_prompt': r'Negative prompt: (.*)',
100
  'steps': r'Steps: (\d+),',
101
  'seed': r'Seed: (\d+),',
102
+ 'sampler': r'Sampler:\s*([^\s,]+(?:\s+[^\s,]+)*)',
103
  'model': r'Model:\s*([^\s,]+)',
104
  'cfg_scale': r'CFG scale:\s*([\d\.]+)',
105
  'size': r'Size:\s*([0-9]+x[0-9]+)'
106
+ }
107
+ for key in ['prompt', 'negative_prompt', 'steps', 'seed', 'sampler', 'model', 'cfg_scale', 'size']:
108
+ match = re.search(patterns[key], text)
109
+ if match:
110
+ results[key] = match.group(1)
111
+ else:
112
+ results[key] = None
113
+ if results['size'] is not None:
114
+ w, h = results['size'].split("x")
115
+ results['w'] = w
116
+ results['h'] = h
117
  else:
118
+ results['w'] = None
119
+ results['h'] = None
120
  return results
121
 
122
 
 
123
  def send_to_txt2img(image):
124
+
125
  result = {tabs: gr.update(selected="t2i")}
126
+
127
  try:
128
  text = image.info['parameters']
129
  data = get_data(text)
130
+ result[prompt] = gr.update(value=data['prompt'])
131
+ result[negative_prompt] = gr.update(value=data['negative_prompt']) if data['negative_prompt'] is not None else gr.update()
132
+ result[steps] = gr.update(value=int(data['steps'])) if data['steps'] is not None else gr.update()
133
+ result[seed] = gr.update(value=int(data['seed'])) if data['seed'] is not None else gr.update()
134
+ result[cfg_scale] = gr.update(value=float(data['cfg_scale'])) if data['cfg_scale'] is not None else gr.update()
135
+ result[width] = gr.update(value=int(data['w'])) if data['w'] is not None else gr.update()
136
+ result[height] = gr.update(value=int(data['h'])) if data['h'] is not None else gr.update()
137
+ result[sampler] = gr.update(value=data['sampler']) if data['sampler'] is not None else gr.update()
138
+ if model in model_names:
139
+ result[model] = gr.update(value=model_names[model])
140
+ else:
141
+ result[model] = gr.update()
142
  return result
143
+
144
  except Exception as e:
145
  print(e)
146
+
147
  return result
148
 
149
 
150
  prodia_client = Prodia(api_key=os.getenv("PRODIA_API_KEY"))
151
  model_list = prodia_client.list_models()
152
+ model_names = {}
153
+
154
+ for model_name in model_list:
155
+ name_without_ext = remove_id_and_ext(model_name)
156
+ model_names[name_without_ext] = model_name
157
 
158
 
159
  def txt2img(prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height, seed):
 
168
  "height": height,
169
  "seed": seed
170
  })
171
+
172
  job = prodia_client.wait(result)
173
+
174
  return job["imageUrl"]
175
 
176
 
 
188
  "height": height,
189
  "seed": seed
190
  })
191
+
192
  job = prodia_client.wait(result)
193
+
194
  return job["imageUrl"]
195
 
196
 
 
203
  with gr.Blocks(css=css) as demo:
204
  with gr.Row():
205
  with gr.Column(scale=6):
206
+ model = gr.Dropdown(interactive=True,value="childrensStories_v1ToonAnime.safetensors [2ec7b88b]", show_label=True, label="Stable Diffusion Checkpoint", choices=prodia_client.list_models())
207
 
208
  with gr.Column(scale=1):
209
  gr.Markdown(elem_id="powered-by-prodia", value="AUTOMATIC1111 Stable Diffusion Web UI переделано masteroko.<br>Powered by [Prodia](https://prodia.com).<br>For more features and faster generation times check out our [API Docs](https://docs.prodia.com/reference/getting-started-guide).")
 
242
  with gr.Column(scale=2):
243
  image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
244
 
245
+ text_button.click(txt2img, inputs=[prompt, negative_prompt, model, steps, sampler, cfg_scale, width, height,
246
+ seed], outputs=image_output, concurrency_limit=1024)
247
 
248
  with gr.Tab("img2img", id='i2i'):
249
  with gr.Row():
250
  with gr.Column(scale=6, min_width=600):
251
+ i2i_prompt = gr.Textbox("тута переделай фотку ", placeholder="Prompt", show_label=False, lines=3)
252
+ i2i_negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=False, lines=3, value="3d, cartoon, anime, (deformed eyes, nose, ears, nose), bad anatomy, ugly")
 
253
  with gr.Column():
254
+ i2i_text_button = gr.Button("Сгенерировать", variant='primary', elem_id="generate")
255
 
256
  with gr.Row():
257
  with gr.Column(scale=3):
258
  with gr.Tab("Генерация"):
259
+ i2i_image_input = gr.Image(type="pil")
260
+
261
  with gr.Row():
262
  with gr.Column(scale=1):
263
+ i2i_sampler = gr.Dropdown(value="DPM++ 3M SDE Exponential", show_label=True, label="Sampling Method", choices=prodia_client.list_samplers())
264
 
265
  with gr.Column(scale=1):
266
+ i2i_steps = gr.Slider(label="количество обработок", minimum=1, maximum=100, value=20, step=1)
267
+
268
  with gr.Row():
269
  with gr.Column(scale=1):
270
+ i2i_width = gr.Slider(label="Ширина", maximum=1024, value=512, step=8)
271
+ i2i_height = gr.Slider(label="Высота", maximum=1024, value=512, step=8)
272
 
273
  with gr.Column(scale=1):
274
+ i2i_batch_size = gr.Slider(label="Batch Size", maximum=1, value=1)
275
+ i2i_batch_count = gr.Slider(label="Batch Count", maximum=1, value=1)
276
 
277
+ i2i_cfg_scale = gr.Slider(label="CFG Scale(степень фантазии ии)", minimum=1, maximum=20, value=7, step=1)
278
+ i2i_denoising = gr.Slider(label="сила изменения фото", minimum=0, maximum=5, value=0.7, step=0.05)
279
+ i2i_seed = gr.Number(label="Семя рандома ", value=-1)
280
 
281
  with gr.Column(scale=2):
282
+ i2i_image_output = gr.Image(value="https://images.prodia.xyz/8ede1a7c-c0ee-4ded-987d-6ffed35fc477.png")
283
 
284
+ i2i_text_button.click(img2img, inputs=[i2i_image_input, i2i_denoising, i2i_prompt, i2i_negative_prompt,
285
+ model, i2i_steps, i2i_sampler, i2i_cfg_scale, i2i_width, i2i_height,
286
+ i2i_seed], outputs=i2i_image_output, concurrency_limit=1024)
287
+
288
+ with gr.Tab("PNG Info"):
289
+ def plaintext_to_html(text, classname=None):
290
+ content = "<br>\n".join(html.escape(x) for x in text.split('\n'))
291
+
292
+ return f"<p class='{classname}'>{content}</p>" if classname else f"<p>{content}</p>"
293
+
294
+
295
+ def get_exif_data(image):
296
+ items = image.info
297
+
298
+ info = ''
299
+ for key, text in items.items():
300
+ info += f"""
301
+ <div>
302
+ <p><b>{plaintext_to_html(str(key))}</b></p>
303
+ <p>{plaintext_to_html(str(text))}</p>
304
+ </div>
305
+ """.strip()+"\n"
306
+
307
+ if len(info) == 0:
308
+ message = "Nothing found in the image."
309
+ info = f"<div><p>{message}<p></div>"
310
+
311
+ return info
312
+
313
+ with gr.Row():
314
+ with gr.Column():
315
+ image_input = gr.Image(type="pil")
316
+
317
+ with gr.Column():
318
+ exif_output = gr.HTML(label="EXIF Data")
319
+ send_to_txt2img_btn = gr.Button("копировать в данные в txt2img")
320
+
321
+ image_input.upload(get_exif_data, inputs=[image_input], outputs=exif_output)
322
+ send_to_txt2img_btn.click(send_to_txt2img, inputs=[image_input], outputs=[tabs, prompt, negative_prompt,
323
+ steps, seed, model, sampler,
324
+ width, height, cfg_scale],
325
+ concurrency_limit=1024)
326
+
327
+ demo.queue(max_size=80, api_open=False).launch(max_threads=8192, show_api=False)