wusize commited on
Commit
1efd005
·
verified ·
1 Parent(s): 047453c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -37
app.py CHANGED
@@ -164,20 +164,47 @@ css = '''
164
  '''
165
  with gr.Blocks(css=css) as demo:
166
  gr.Markdown("# Harmon 1.5B")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  with gr.Tab("Multimodal Understanding"):
168
  gr.Markdown(value="## Multimodal Understanding")
169
  image_input = gr.Image()
170
  with gr.Column():
171
  question_input = gr.Textbox(label="Question")
172
-
173
  understanding_button = gr.Button("Chat")
174
  understanding_output = gr.Textbox(label="Response")
175
-
176
  with gr.Accordion("Advanced options", open=False):
177
- und_seed_input = gr.Number(label="Seed", precision=0, value=42)
178
- top_p = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.05, label="top_p")
179
- temperature = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.05, label="temperature")
180
-
181
  examples_inpainting = gr.Examples(
182
  label="Multimodal Understanding examples",
183
  examples=[
@@ -192,44 +219,18 @@ with gr.Blocks(css=css) as demo:
192
  ],
193
  inputs=[question_input, image_input],
194
  )
195
-
196
- with gr.Tab("Text-to-Image Generation"):
197
- gr.Markdown(value="## Text-to-Image Generation")
198
 
199
- prompt_input = gr.Textbox(label="Prompt.")
200
-
201
- generation_button = gr.Button("Generate Images")
202
-
203
- image_output = gr.Gallery(label="Generated Images", columns=4, rows=1)
204
 
205
- with gr.Accordion("Advanced options", open=False):
206
- with gr.Row():
207
- cfg_weight_input = gr.Slider(minimum=1, maximum=10, value=3, step=0.5, label="CFG Weight")
208
- t2i_temperature = gr.Slider(minimum=0, maximum=1, value=1.0, step=0.05, label="temperature")
209
- seed_input = gr.Number(label="Seed (Optional)", precision=0, value=1234)
210
-
211
- examples_t2i = gr.Examples(
212
- label="Text to image generation examples.",
213
- examples=[
214
- "a dog on the left and a cat on the right.",
215
- "a photo of a pink stop sign.",
216
- "Paper artwork, layered paper, colorful Chinese dragon surrounded by clouds.",
217
- "A golden retriever lying peacefully on a wooden porch, with autumn leaves scattered around.",
218
- ],
219
- inputs=prompt_input,
220
- )
221
-
222
  understanding_button.click(
223
  multimodal_understanding,
224
  inputs=[image_input, question_input, und_seed_input, top_p, temperature],
225
  outputs=understanding_output
226
  )
227
-
228
- generation_button.click(
229
- fn=generate_image,
230
- inputs=[prompt_input, seed_input, cfg_weight_input, t2i_temperature],
231
- outputs=image_output
232
- )
233
 
234
  demo.launch(share=True)
235
 
 
164
  '''
165
  with gr.Blocks(css=css) as demo:
166
  gr.Markdown("# Harmon 1.5B")
167
+
168
+ with gr.Tab("Text-to-Image Generation"):
169
+ gr.Markdown(value="## Text-to-Image Generation")
170
+
171
+ prompt_input = gr.Textbox(label="Prompt.")
172
+
173
+ generation_button = gr.Button("Generate Images")
174
+
175
+ image_output = gr.Gallery(label="Generated Images", columns=4, rows=1)
176
+
177
+ with gr.Accordion("Advanced options", open=False):
178
+ with gr.Row():
179
+ cfg_weight_input = gr.Slider(minimum=1, maximum=10, value=5, step=0.5, label="CFG Weight")
180
+ t2i_temperature = gr.Slider(minimum=0, maximum=10, value=1.0, step=0.05, label="temperature")
181
+ seed_input = gr.Number(label="Seed (Optional)", precision=0, value=1234)
182
+
183
+ examples_t2i = gr.Examples(
184
+ label="Text to image generation examples.",
185
+ examples=[
186
+ "a dog on the left and a cat on the right.",
187
+ "a photo of a pink stop sign.",
188
+ "Paper artwork, layered paper, colorful Chinese dragon surrounded by clouds.",
189
+ "a golden retriever lying peacefully on a wooden porch, with autumn leaves scattered around.",
190
+ ],
191
+ inputs=prompt_input,
192
+ )
193
+
194
  with gr.Tab("Multimodal Understanding"):
195
  gr.Markdown(value="## Multimodal Understanding")
196
  image_input = gr.Image()
197
  with gr.Column():
198
  question_input = gr.Textbox(label="Question")
199
+
200
  understanding_button = gr.Button("Chat")
201
  understanding_output = gr.Textbox(label="Response")
202
+
203
  with gr.Accordion("Advanced options", open=False):
204
+ und_seed_input = gr.Number(label="Seed", precision=0, value=42)
205
+ top_p = gr.Slider(minimum=0, maximum=1, value=0.95, step=0.05, label="top_p")
206
+ temperature = gr.Slider(minimum=0, maximum=1, value=0.1, step=0.05, label="temperature")
207
+
208
  examples_inpainting = gr.Examples(
209
  label="Multimodal Understanding examples",
210
  examples=[
 
219
  ],
220
  inputs=[question_input, image_input],
221
  )
 
 
 
222
 
223
+ generation_button.click(
224
+ fn=generate_image,
225
+ inputs=[prompt_input, seed_input, cfg_weight_input, t2i_temperature],
226
+ outputs=image_output
227
+ )
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  understanding_button.click(
230
  multimodal_understanding,
231
  inputs=[image_input, question_input, und_seed_input, top_p, temperature],
232
  outputs=understanding_output
233
  )
 
 
 
 
 
 
234
 
235
  demo.launch(share=True)
236