Spaces:
Running
Running
Flux prompt max tokens 512->384
Browse files
app.py
CHANGED
@@ -67,7 +67,7 @@ def create_prompt_for_image_generation(user_prompt: str) -> str:
|
|
67 |
The aspect ratio is 1:1, using seed 42 for reproducibility.
|
68 |
"""
|
69 |
model = HfApiModel(
|
70 |
-
max_tokens=
|
71 |
temperature=1.0,
|
72 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
73 |
# custom_role_conversions=None,
|
@@ -84,6 +84,7 @@ def create_prompt_for_image_generation(user_prompt: str) -> str:
|
|
84 |
return response
|
85 |
|
86 |
except Exception as e:
|
|
|
87 |
return f"Error during LLM call: {str(e)}"
|
88 |
|
89 |
|
@@ -113,7 +114,7 @@ agent = CodeAgent(
|
|
113 |
verbosity_level=1,
|
114 |
grammar=None,
|
115 |
planning_interval=None,
|
116 |
-
name=
|
117 |
description=None,
|
118 |
prompt_templates=prompt_templates
|
119 |
)
|
|
|
67 |
The aspect ratio is 1:1, using seed 42 for reproducibility.
|
68 |
"""
|
69 |
model = HfApiModel(
|
70 |
+
max_tokens=384,
|
71 |
temperature=1.0,
|
72 |
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
|
73 |
# custom_role_conversions=None,
|
|
|
84 |
return response
|
85 |
|
86 |
except Exception as e:
|
87 |
+
print(f"Error during LLM call: {str(e)}")
|
88 |
return f"Error during LLM call: {str(e)}"
|
89 |
|
90 |
|
|
|
114 |
verbosity_level=1,
|
115 |
grammar=None,
|
116 |
planning_interval=None,
|
117 |
+
name="Agent-Unit1",
|
118 |
description=None,
|
119 |
prompt_templates=prompt_templates
|
120 |
)
|