cwhuh commited on
Commit
63fb7f7
·
1 Parent(s): 586b09a

fix : pydantic error

Browse files
__pycache__/live_preview_helpers.cpython-310.pyc CHANGED
Binary files a/__pycache__/live_preview_helpers.cpython-310.pyc and b/__pycache__/live_preview_helpers.cpython-310.pyc differ
 
__pycache__/llm_wrapper.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
app.py CHANGED
@@ -15,11 +15,6 @@ import subprocess
15
  subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
16
 
17
 
18
- from pydantic import BaseModel
19
-
20
- class RefinedPrompt(BaseModel):
21
- prompt: str
22
-
23
  dtype = torch.bfloat16
24
  device = "cuda" if torch.cuda.is_available() else "cpu"
25
 
@@ -49,11 +44,10 @@ def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidan
49
  refined_prompt = run_gemini(
50
  target_prompt=prompt,
51
  prompt_in_path="prompt.json",
52
- output_structure=RefinedPrompt,
53
  )
54
 
55
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
56
- prompt=refined_prompt.prompt,
57
  guidance_scale=guidance_scale,
58
  num_inference_steps=num_inference_steps,
59
  width=width,
 
15
  subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
16
 
17
 
 
 
 
 
 
18
  dtype = torch.bfloat16
19
  device = "cuda" if torch.cuda.is_available() else "cpu"
20
 
 
44
  refined_prompt = run_gemini(
45
  target_prompt=prompt,
46
  prompt_in_path="prompt.json",
 
47
  )
48
 
49
  for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
50
+ prompt=refined_prompt,
51
  guidance_scale=guidance_scale,
52
  num_inference_steps=num_inference_steps,
53
  width=width,
llm_wrapper.py CHANGED
@@ -60,7 +60,6 @@ def encode_image(image_source):
60
  def run_gemini(
61
  target_prompt: str,
62
  prompt_in_path: str,
63
- output_structure,
64
  img_in_data: str = None,
65
  model: str = "gemini-2.0-flash",
66
  ) -> str:
@@ -89,11 +88,6 @@ def run_gemini(
89
  chat_completion = client.models.generate_content(
90
  model=model,
91
  contents=input_content,
92
- config={
93
- "system_instruction": system_prompt,
94
- "response_mime_type": "application/json",
95
- "response_schema": output_structure,
96
- }
97
  )
98
 
99
  chat_output = chat_completion.parsed
 
60
  def run_gemini(
61
  target_prompt: str,
62
  prompt_in_path: str,
 
63
  img_in_data: str = None,
64
  model: str = "gemini-2.0-flash",
65
  ) -> str:
 
88
  chat_completion = client.models.generate_content(
89
  model=model,
90
  contents=input_content,
 
 
 
 
 
91
  )
92
 
93
  chat_output = chat_completion.parsed