Update app.py
Browse files
app.py
CHANGED
@@ -17,6 +17,30 @@ except KeyError:
|
|
17 |
raise ValueError("Please set the GEMINI_API_KEY environment variable.")
|
18 |
client = genai.Client(api_key=api_key)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
def clean_response_text(response_text):
|
21 |
"""
|
22 |
Clean the API response by removing Markdown code block markers.
|
@@ -65,7 +89,10 @@ def generate_ideas(user_input):
|
|
65 |
response = client.models.generate_content(
|
66 |
model='gemini-2.0-flash',
|
67 |
contents=[prompt],
|
68 |
-
config=types.GenerateContentConfig(
|
|
|
|
|
|
|
69 |
)
|
70 |
print(f"Raw response for ideas: {response.text}") # Debugging
|
71 |
if not response.text or response.text.isspace():
|
@@ -91,7 +118,10 @@ def generate_ideas(user_input):
|
|
91 |
validation_response = client.models.generate_content(
|
92 |
model='gemini-2.0-flash-lite',
|
93 |
contents=[validation_prompt],
|
94 |
-
config=types.GenerateContentConfig(
|
|
|
|
|
|
|
95 |
)
|
96 |
print(f"Validation response for idea '{idea}': {validation_response.text}") # Debugging
|
97 |
if not validation_response.text or validation_response.text.isspace():
|
@@ -169,7 +199,10 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
169 |
response = client.models.generate_content(
|
170 |
model='gemini-2.0-flash',
|
171 |
contents=[prompt],
|
172 |
-
config=types.GenerateContentConfig(
|
|
|
|
|
|
|
173 |
)
|
174 |
print(f"Raw response for item (image attempt {image_attempt + 1}, total attempt {total_attempts}): {response.text}") # Debugging
|
175 |
if not response.text or response.text.isspace():
|
@@ -192,7 +225,8 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
192 |
prompt=image_prompt,
|
193 |
config=types.GenerateImagesConfig(
|
194 |
aspect_ratio="9:16",
|
195 |
-
number_of_images=1
|
|
|
196 |
)
|
197 |
)
|
198 |
if imagen.generated_images and len(imagen.generated_images) > 0:
|
@@ -272,7 +306,8 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
272 |
aspect_ratio="9:16",
|
273 |
number_of_videos=1,
|
274 |
duration_seconds=8,
|
275 |
-
negative_prompt="blurry, low quality, text, letters"
|
|
|
276 |
)
|
277 |
)
|
278 |
# Wait for video to generate
|
|
|
17 |
raise ValueError("Please set the GEMINI_API_KEY environment variable.")
|
18 |
client = genai.Client(api_key=api_key)
|
19 |
|
20 |
+
# Define safety settings to disable all filters
|
21 |
+
SAFETY_SETTINGS = [
|
22 |
+
types.SafetySetting(
|
23 |
+
category=types.HarmCategory.HARM_CATEGORY_HARASSMENT,
|
24 |
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
25 |
+
),
|
26 |
+
types.SafetySetting(
|
27 |
+
category=types.HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
28 |
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
29 |
+
),
|
30 |
+
types.SafetySetting(
|
31 |
+
category=types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
32 |
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
33 |
+
),
|
34 |
+
types.SafetySetting(
|
35 |
+
category=types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
36 |
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
37 |
+
),
|
38 |
+
types.SafetySetting(
|
39 |
+
category=types.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY,
|
40 |
+
threshold=types.HarmBlockThreshold.BLOCK_NONE,
|
41 |
+
),
|
42 |
+
]
|
43 |
+
|
44 |
def clean_response_text(response_text):
|
45 |
"""
|
46 |
Clean the API response by removing Markdown code block markers.
|
|
|
89 |
response = client.models.generate_content(
|
90 |
model='gemini-2.0-flash',
|
91 |
contents=[prompt],
|
92 |
+
config=types.GenerateContentConfig(
|
93 |
+
temperature=1.2,
|
94 |
+
safety_settings=SAFETY_SETTINGS
|
95 |
+
)
|
96 |
)
|
97 |
print(f"Raw response for ideas: {response.text}") # Debugging
|
98 |
if not response.text or response.text.isspace():
|
|
|
118 |
validation_response = client.models.generate_content(
|
119 |
model='gemini-2.0-flash-lite',
|
120 |
contents=[validation_prompt],
|
121 |
+
config=types.GenerateContentConfig(
|
122 |
+
temperature=0.0, # Low temperature for deterministic output
|
123 |
+
safety_settings=SAFETY_SETTINGS
|
124 |
+
)
|
125 |
)
|
126 |
print(f"Validation response for idea '{idea}': {validation_response.text}") # Debugging
|
127 |
if not validation_response.text or validation_response.text.isspace():
|
|
|
199 |
response = client.models.generate_content(
|
200 |
model='gemini-2.0-flash',
|
201 |
contents=[prompt],
|
202 |
+
config=types.GenerateContentConfig(
|
203 |
+
temperature=1.2,
|
204 |
+
safety_settings=SAFETY_SETTINGS
|
205 |
+
)
|
206 |
)
|
207 |
print(f"Raw response for item (image attempt {image_attempt + 1}, total attempt {total_attempts}): {response.text}") # Debugging
|
208 |
if not response.text or response.text.isspace():
|
|
|
225 |
prompt=image_prompt,
|
226 |
config=types.GenerateImagesConfig(
|
227 |
aspect_ratio="9:16",
|
228 |
+
number_of_images=1,
|
229 |
+
safety_settings=SAFETY_SETTINGS
|
230 |
)
|
231 |
)
|
232 |
if imagen.generated_images and len(imagen.generated_images) > 0:
|
|
|
306 |
aspect_ratio="9:16",
|
307 |
number_of_videos=1,
|
308 |
duration_seconds=8,
|
309 |
+
negative_prompt="blurry, low quality, text, letters",
|
310 |
+
safety_settings=SAFETY_SETTINGS
|
311 |
)
|
312 |
)
|
313 |
# Wait for video to generate
|