cwhuh commited on
Commit
e841ca6
·
1 Parent(s): 57a34a9

add : structured output

Browse files
.gradio/cached_examples/19/Result/36f4c25c9f9b3f438281/image.webp ADDED
.gradio/cached_examples/19/Result/b62d7595ca8cb25f5456/image.webp ADDED
.gradio/cached_examples/19/indices.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 1
2
+ 0
.gradio/cached_examples/19/log.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ Result,Seed,timestamp
2
+ "{""path"": "".gradio/cached_examples/19/Result/b62d7595ca8cb25f5456/image.webp"", ""url"": ""/gradio_api/file=/tmp/gradio/6414decced94e8faa0bebd4d105bf6fbcbfb496d221122a4cf1b1a449de31f87/image.webp"", ""size"": null, ""orig_name"": ""image.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",42,2025-03-10 06:01:40.924059
3
+ "{""path"": "".gradio/cached_examples/19/Result/36f4c25c9f9b3f438281/image.webp"", ""url"": ""/gradio_api/file=/tmp/gradio/04be7c4b38d246744b99e6ac36b7ff571528369078988e11fbf5f63b9dee6d68/image.webp"", ""size"": null, ""orig_name"": ""image.webp"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}",42,2025-03-10 06:07:53.994344
__pycache__/live_preview_helpers.cpython-310.pyc CHANGED
Binary files a/__pycache__/live_preview_helpers.cpython-310.pyc and b/__pycache__/live_preview_helpers.cpython-310.pyc differ
 
__pycache__/llm_wrapper.cpython-310.pyc CHANGED
Binary files a/__pycache__/llm_wrapper.cpython-310.pyc and b/__pycache__/llm_wrapper.cpython-310.pyc differ
 
app.py CHANGED
@@ -46,13 +46,14 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
46
  seed = random.randint(0, MAX_SEED)
47
  generator = torch.Generator().manual_seed(seed)
48
 
49
- # refined_prompt = run_gemini(
50
- # target_prompt=prompt,
51
- # prompt_in_path="prompt.json",
52
- # )
 
53
 
54
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
55
- prompt=prompt,
56
  guidance_scale=guidance_scale,
57
  num_inference_steps=num_inference_steps,
58
  width=width,
 
46
  seed = random.randint(0, MAX_SEED)
47
  generator = torch.Generator().manual_seed(seed)
48
 
49
+ refined_prompt = run_gemini(
50
+ target_prompt=prompt,
51
+ prompt_in_path="prompt.json",
52
+ output_structure=RefinedPrompt,
53
+ )
54
 
55
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
56
+ prompt=refined_prompt.prompt,
57
  guidance_scale=guidance_scale,
58
  num_inference_steps=num_inference_steps,
59
  width=width,
llm_wrapper.py CHANGED
@@ -60,6 +60,7 @@ def encode_image(image_source):
60
  def run_gemini(
61
  target_prompt: str,
62
  prompt_in_path: str,
 
63
  img_in_data: str = None,
64
  model: str = "gemini-2.0-flash",
65
  ) -> str:
@@ -88,6 +89,11 @@ def run_gemini(
88
  chat_completion = client.models.generate_content(
89
  model=model,
90
  contents=input_content,
 
 
 
 
 
91
  )
92
 
93
  chat_output = chat_completion.parsed
 
60
  def run_gemini(
61
  target_prompt: str,
62
  prompt_in_path: str,
63
+ output_structure
64
  img_in_data: str = None,
65
  model: str = "gemini-2.0-flash",
66
  ) -> str:
 
89
  chat_completion = client.models.generate_content(
90
  model=model,
91
  contents=input_content,
92
+ config={
93
+ "system_instruction": system_prompt,
94
+ "response_mime_type": "application/json",
95
+ "response_schema": output_structure
96
+ }
97
  )
98
 
99
  chat_output = chat_completion.parsed