Spaces:
Running
Running
Fix app.py
Browse files
app.py
CHANGED
@@ -42,6 +42,7 @@ def create_prompt_for_image_generation(user_prompt: str) -> str:
|
|
42 |
|
43 |
Args:
|
44 |
user_prompt: The user's text prompt to be processed by the language model.
|
|
|
45 |
"""
|
46 |
# Prompt parts
|
47 |
prefix="Generate a detailed and structured FLUX-Schnell-compatible prompt based on the following short description of an image: "
|
@@ -65,11 +66,11 @@ def create_prompt_for_image_generation(user_prompt: str) -> str:
|
|
65 |
The color palette includes rich greens, warm browns for the cabin, and soft gray mist.
|
66 |
The perspective is slightly elevated as if viewed from a drone camera at sunrise,
|
67 |
capturing golden hour lighting for soft shadows and warm highlights.
|
68 |
-
The aspect ratio is
|
69 |
'''
|
70 |
"""
|
71 |
model = HfApiModel(
|
72 |
-
max_tokens=
|
73 |
temperature=1.0,
|
74 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
75 |
custom_role_conversions=None,
|
@@ -77,13 +78,8 @@ def create_prompt_for_image_generation(user_prompt: str) -> str:
|
|
77 |
prompt = prefix + user_prompt + '. ' + postfix
|
78 |
try:
|
79 |
response = model.generate(
|
80 |
-
prompt=prompt, temperature=1., max_tokens=
|
81 |
-
|
82 |
-
response = openai.Completion.create(
|
83 |
-
engine=model_name, # Use 'engine' for older models, 'model' for newer
|
84 |
-
prompt=prompt
|
85 |
-
)
|
86 |
-
return response.choices[0].text.strip()
|
87 |
|
88 |
except Exception as e:
|
89 |
return f"Error during LLM call: {str(e)}"
|
|
|
42 |
|
43 |
Args:
|
44 |
user_prompt: The user's text prompt to be processed by the language model.
|
45 |
+
Output type: str
|
46 |
"""
|
47 |
# Prompt parts
|
48 |
prefix="Generate a detailed and structured FLUX-Schnell-compatible prompt based on the following short description of an image: "
|
|
|
66 |
The color palette includes rich greens, warm browns for the cabin, and soft gray mist.
|
67 |
The perspective is slightly elevated as if viewed from a drone camera at sunrise,
|
68 |
capturing golden hour lighting for soft shadows and warm highlights.
|
69 |
+
The aspect ratio is 1:1, using seed 42 for reproducibility.
|
70 |
'''
|
71 |
"""
|
72 |
model = HfApiModel(
|
73 |
+
max_tokens=512,
|
74 |
temperature=1.0,
|
75 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
76 |
custom_role_conversions=None,
|
|
|
78 |
prompt = prefix + user_prompt + '. ' + postfix
|
79 |
try:
|
80 |
response = model.generate(
|
81 |
+
prompt=prompt, temperature=1., max_tokens=512)
|
82 |
+
return response['choices'][0]['text']
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
except Exception as e:
|
85 |
return f"Error during LLM call: {str(e)}"
|