Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,10 @@ import json
|
|
9 |
import random
|
10 |
import urllib.parse
|
11 |
import time
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Initialize the Google Generative AI client with the API key from environment variables
|
14 |
try:
|
@@ -79,6 +83,7 @@ def clean_response_text(response_text):
|
|
79 |
"""
|
80 |
Clean the API response by removing Markdown code block markers.
|
81 |
"""
|
|
|
82 |
cleaned_text = response_text.strip()
|
83 |
if cleaned_text.startswith("```json"):
|
84 |
cleaned_text = cleaned_text[len("```json"):].strip()
|
@@ -90,6 +95,7 @@ def generate_ideas(user_input):
|
|
90 |
"""
|
91 |
Generate a diverse set of ideas based on the user's input concept using the LLM.
|
92 |
"""
|
|
|
93 |
prompt = f"""
|
94 |
The user has provided the concept: "{user_input}". You must generate 5 diverse and creative ideas for a TikTok video that are directly and explicitly related to "{user_input}".
|
95 |
Each idea must clearly incorporate and focus on the core theme of "{user_input}" without deviating into unrelated topics.
|
@@ -106,15 +112,17 @@ def generate_ideas(user_input):
|
|
106 |
safety_settings=SAFETY_SETTINGS
|
107 |
)
|
108 |
)
|
|
|
109 |
if not response.text or response.text.isspace():
|
110 |
raise ValueError("Empty response from API")
|
111 |
cleaned_text = clean_response_text(response.text)
|
112 |
response_json = json.loads(cleaned_text)
|
113 |
if 'ideas' not in response_json or not isinstance(response_json['ideas'], list) or len(response_json['ideas']) != 5:
|
114 |
raise ValueError("Invalid JSON format: 'ideas' key missing, not a list, or incorrect length")
|
|
|
115 |
return response_json['ideas']
|
116 |
except Exception as e:
|
117 |
-
|
118 |
return [
|
119 |
f"A dramatic {user_input} scene with cinematic lighting",
|
120 |
f"A close-up of {user_input} in a futuristic setting",
|
@@ -128,12 +136,15 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
128 |
Generate a single feed item with progress updates.
|
129 |
Yields progress stage and message for UI updates.
|
130 |
"""
|
|
|
131 |
video_base64 = None
|
132 |
max_total_attempts = 3
|
133 |
-
|
|
|
134 |
total_attempts = 0
|
135 |
while total_attempts < max_total_attempts:
|
136 |
total_attempts += 1
|
|
|
137 |
|
138 |
yield {"stage": "initializing", "message": random.choice(PROGRESS_STAGES["initializing"]), "progress": 10}
|
139 |
|
@@ -143,6 +154,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
143 |
image_prompt = None
|
144 |
|
145 |
for image_attempt in range(max_retries):
|
|
|
146 |
yield {"stage": "creating_image", "message": random.choice(PROGRESS_STAGES["creating_image"]), "progress": 30 + (image_attempt * 10)}
|
147 |
|
148 |
selected_idea = random.choice(ideas)
|
@@ -155,6 +167,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
155 |
Ensure the response is strictly in JSON format.
|
156 |
"""
|
157 |
try:
|
|
|
158 |
response = client.models.generate_content(
|
159 |
model='gemini-2.0-flash-lite',
|
160 |
contents=[prompt],
|
@@ -163,16 +176,20 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
163 |
safety_settings=SAFETY_SETTINGS
|
164 |
)
|
165 |
)
|
|
|
|
|
|
|
166 |
cleaned_text = clean_response_text(response.text)
|
167 |
response_json = json.loads(cleaned_text)
|
168 |
text = response_json['caption']
|
169 |
image_prompt = response_json['image_prompt']
|
170 |
except Exception as e:
|
171 |
-
|
172 |
text = f"Amazing {user_input}! 🔥 #{user_input.replace(' ', '')}"
|
173 |
image_prompt = f"A vivid scene of {selected_idea} related to {user_input}, in a vibrant pop art style, no text or letters"
|
174 |
|
175 |
try:
|
|
|
176 |
imagen = client.models.generate_images(
|
177 |
model='imagen-3.0-generate-002',
|
178 |
prompt=image_prompt,
|
@@ -181,6 +198,8 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
181 |
number_of_images=1
|
182 |
)
|
183 |
)
|
|
|
|
|
184 |
if imagen.generated_images and len(imagen.generated_images) > 0:
|
185 |
generated_image = imagen.generated_images[0]
|
186 |
image = Image.open(BytesIO(generated_image.image.image_bytes))
|
@@ -190,42 +209,43 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
190 |
buffered = BytesIO()
|
191 |
image.save(buffered, format="PNG")
|
192 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
|
193 |
break
|
194 |
else:
|
195 |
-
|
196 |
-
|
197 |
-
image = Image.new('RGB', (360, 640), color='gray')
|
198 |
-
buffered = BytesIO()
|
199 |
-
image.save(buffered, format="PNG")
|
200 |
-
img_str = base64.b64encode(buffered.getvalue()).decode()
|
201 |
-
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
202 |
-
return {
|
203 |
-
'text': text,
|
204 |
-
'image_base64': img_str,
|
205 |
-
'video_base64': None,
|
206 |
-
'ideas': ideas
|
207 |
-
}
|
208 |
-
break
|
209 |
-
yield {"stage": "retrying_image", "message": random.choice(PROGRESS_STAGES["retrying_image"]), "progress": 40 + (image_attempt * 10)}
|
210 |
-
except Exception as e:
|
211 |
-
print(f"Error generating image: {e}")
|
212 |
-
if image_attempt == max_retries - 1:
|
213 |
-
if total_attempts == max_total_attempts:
|
214 |
image = Image.new('RGB', (360, 640), color='gray')
|
215 |
buffered = BytesIO()
|
216 |
image.save(buffered, format="PNG")
|
217 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
218 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
|
|
219 |
return {
|
220 |
'text': text,
|
221 |
'image_base64': img_str,
|
222 |
'video_base64': None,
|
223 |
'ideas': ideas
|
224 |
}
|
225 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
yield {"stage": "retrying_image", "message": random.choice(PROGRESS_STAGES["retrying_image"]), "progress": 40 + (image_attempt * 10)}
|
227 |
|
228 |
if generate_video and generated_image is not None:
|
|
|
229 |
yield {"stage": "generating_video", "message": random.choice(PROGRESS_STAGES["generating_video"]), "progress": 70}
|
230 |
try:
|
231 |
video_prompt = f"""
|
@@ -233,6 +253,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
233 |
Use a close-up shot with a slow dolly shot circling around the subject,
|
234 |
using shallow focus on the main subject to emphasize details, in a realistic style with cinematic lighting.
|
235 |
"""
|
|
|
236 |
operation = client.models.generate_videos(
|
237 |
model="veo-2.0-generate-001",
|
238 |
prompt=video_prompt,
|
@@ -245,7 +266,9 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
245 |
)
|
246 |
)
|
247 |
while not operation.done:
|
248 |
-
time.
|
|
|
|
|
249 |
operation = client.operations.get(operation)
|
250 |
if operation.error:
|
251 |
raise ValueError(f"Video generation failed: {operation.error.message}")
|
@@ -257,6 +280,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
257 |
video_bytes = video_data if isinstance(video_data, bytes) else BytesIO(video_data).getvalue()
|
258 |
video_base64 = base64.b64encode(video_bytes).decode()
|
259 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
|
|
260 |
return {
|
261 |
'text': text,
|
262 |
'image_base64': img_str,
|
@@ -267,9 +291,11 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
267 |
else:
|
268 |
raise ValueError("Video generation operation failed: No generated_videos in response")
|
269 |
except Exception as e:
|
270 |
-
|
|
|
271 |
yield {"stage": "generating_video", "message": random.choice(PROGRESS_STAGES["generating_video"]), "progress": 80}
|
272 |
try:
|
|
|
273 |
operation = client.models.generate_videos(
|
274 |
model="veo-2.0-generate-001",
|
275 |
prompt=video_prompt,
|
@@ -281,7 +307,9 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
281 |
)
|
282 |
)
|
283 |
while not operation.done:
|
284 |
-
time.
|
|
|
|
|
285 |
operation = client.operations.get(operation)
|
286 |
if operation.error:
|
287 |
raise ValueError(f"Video generation failed: {operation.error.message}")
|
@@ -293,6 +321,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
293 |
video_bytes = video_data if isinstance(video_data, bytes) else BytesIO(video_data).getvalue()
|
294 |
video_base64 = base64.b64encode(video_bytes).decode()
|
295 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
|
|
296 |
return {
|
297 |
'text': text,
|
298 |
'image_base64': img_str,
|
@@ -303,9 +332,10 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
303 |
else:
|
304 |
raise ValueError("Video generation operation failed: No generated_videos in response")
|
305 |
except Exception as e:
|
306 |
-
|
307 |
if total_attempts == max_total_attempts:
|
308 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
|
|
309 |
return {
|
310 |
'text': text,
|
311 |
'image_base64': img_str,
|
@@ -315,6 +345,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
315 |
|
316 |
if img_str is not None:
|
317 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
|
|
318 |
return {
|
319 |
'text': text,
|
320 |
'image_base64': img_str,
|
@@ -327,6 +358,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
327 |
image.save(buffered, format="PNG")
|
328 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
329 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
|
|
330 |
return {
|
331 |
'text': f"Amazing {user_input}! 🔥 #{user_input.replace(' ', '')}",
|
332 |
'image_base64': img_str,
|
@@ -338,6 +370,7 @@ def start_feed(user_input, generate_video, current_index, feed_items):
|
|
338 |
"""
|
339 |
Start or update the feed based on the user input with progress updates.
|
340 |
"""
|
|
|
341 |
if not user_input.strip():
|
342 |
user_input = "trending"
|
343 |
current_user_input = user_input
|
@@ -354,6 +387,7 @@ def start_feed(user_input, generate_video, current_index, feed_items):
|
|
354 |
|
355 |
item_generator = generate_item(user_input, ideas, generate_video=generate_video)
|
356 |
for progress in item_generator:
|
|
|
357 |
if isinstance(progress, dict) and "stage" in progress:
|
358 |
html_content = generate_html([], False, 0, user_input, is_loading, progress["stage"], progress["message"], progress["progress"])
|
359 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
@@ -364,10 +398,11 @@ def start_feed(user_input, generate_video, current_index, feed_items):
|
|
364 |
share_links = generate_share_links(item['image_base64'], item['video_base64'], item['text'])
|
365 |
is_loading = False
|
366 |
html_content = generate_html(feed_items, False, current_index, user_input, is_loading)
|
|
|
367 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
368 |
return
|
369 |
except Exception as e:
|
370 |
-
|
371 |
html_content = """
|
372 |
<div style="
|
373 |
display: flex;
|
@@ -394,6 +429,7 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
394 |
"""
|
395 |
Load the next item in the feed with progress updates.
|
396 |
"""
|
|
|
397 |
current_user_input = user_input if user_input.strip() else "trending"
|
398 |
user_input = current_user_input
|
399 |
is_loading = True
|
@@ -412,6 +448,7 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
412 |
feed_items[current_index]['video_base64'],
|
413 |
feed_items[current_index]['text']
|
414 |
)
|
|
|
415 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
416 |
return
|
417 |
else:
|
@@ -421,6 +458,7 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
421 |
|
422 |
item_generator = generate_item(user_input, ideas, generate_video=generate_video)
|
423 |
for progress in item_generator:
|
|
|
424 |
if isinstance(progress, dict) and "stage" in progress:
|
425 |
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, progress["stage"], progress["message"], progress["progress"])
|
426 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
@@ -435,10 +473,11 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
435 |
feed_items[current_index]['video_base64'],
|
436 |
feed_items[current_index]['text']
|
437 |
)
|
|
|
438 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
439 |
return
|
440 |
except Exception as e:
|
441 |
-
|
442 |
html_content = """
|
443 |
<div style="
|
444 |
display: flex;
|
@@ -465,6 +504,7 @@ def load_previous(user_input, generate_video, current_index, feed_items):
|
|
465 |
"""
|
466 |
Load the previous item in the feed.
|
467 |
"""
|
|
|
468 |
current_user_input = user_input if user_input.strip() else "trending"
|
469 |
|
470 |
if current_index > 0:
|
@@ -475,12 +515,14 @@ def load_previous(user_input, generate_video, current_index, feed_items):
|
|
475 |
feed_items[current_index]['video_base64'],
|
476 |
feed_items[current_index]['text']
|
477 |
)
|
|
|
478 |
return current_user_input, current_index, feed_items, html_content, share_links, False
|
479 |
|
480 |
def generate_share_links(image_base64, video_base64, caption):
|
481 |
"""
|
482 |
Generate share links for social media platforms with download links for image and video.
|
483 |
"""
|
|
|
484 |
image_data_url = f"data:image/png;base64,{image_base64}"
|
485 |
encoded_caption = urllib.parse.quote(caption)
|
486 |
|
@@ -623,6 +665,7 @@ def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_inpu
|
|
623 |
"""
|
624 |
Generate an HTML string to display the current feed item or a loading screen with a progress bar.
|
625 |
"""
|
|
|
626 |
if is_loading:
|
627 |
progress_percent = max(0, min(100, progress_percent))
|
628 |
return f"""
|
@@ -692,6 +735,7 @@ def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_inpu
|
|
692 |
"""
|
693 |
|
694 |
if not feed_items or current_index >= len(feed_items):
|
|
|
695 |
return """
|
696 |
<div style="
|
697 |
display: flex;
|
@@ -788,6 +832,7 @@ def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_inpu
|
|
788 |
}}
|
789 |
</script>
|
790 |
"""
|
|
|
791 |
return html_str
|
792 |
|
793 |
# Define the Gradio interface
|
@@ -821,7 +866,7 @@ with gr.Blocks(
|
|
821 |
label="Generate Video (may take longer)",
|
822 |
value=False
|
823 |
)
|
824 |
-
magic_button = gr.Button("✨
|
825 |
|
826 |
feed_html = gr.HTML()
|
827 |
share_html = gr.HTML(label="Share this item:")
|
|
|
9 |
import random
|
10 |
import urllib.parse
|
11 |
import time
|
12 |
+
import logging
|
13 |
+
|
14 |
+
# Set up logging
|
15 |
+
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
16 |
|
17 |
# Initialize the Google Generative AI client with the API key from environment variables
|
18 |
try:
|
|
|
83 |
"""
|
84 |
Clean the API response by removing Markdown code block markers.
|
85 |
"""
|
86 |
+
logging.debug("Cleaning response text")
|
87 |
cleaned_text = response_text.strip()
|
88 |
if cleaned_text.startswith("```json"):
|
89 |
cleaned_text = cleaned_text[len("```json"):].strip()
|
|
|
95 |
"""
|
96 |
Generate a diverse set of ideas based on the user's input concept using the LLM.
|
97 |
"""
|
98 |
+
logging.debug(f"Generating ideas for input: {user_input}")
|
99 |
prompt = f"""
|
100 |
The user has provided the concept: "{user_input}". You must generate 5 diverse and creative ideas for a TikTok video that are directly and explicitly related to "{user_input}".
|
101 |
Each idea must clearly incorporate and focus on the core theme of "{user_input}" without deviating into unrelated topics.
|
|
|
112 |
safety_settings=SAFETY_SETTINGS
|
113 |
)
|
114 |
)
|
115 |
+
logging.debug(f"Generate ideas response: {response.text}")
|
116 |
if not response.text or response.text.isspace():
|
117 |
raise ValueError("Empty response from API")
|
118 |
cleaned_text = clean_response_text(response.text)
|
119 |
response_json = json.loads(cleaned_text)
|
120 |
if 'ideas' not in response_json or not isinstance(response_json['ideas'], list) or len(response_json['ideas']) != 5:
|
121 |
raise ValueError("Invalid JSON format: 'ideas' key missing, not a list, or incorrect length")
|
122 |
+
logging.debug(f"Generated ideas: {response_json['ideas']}")
|
123 |
return response_json['ideas']
|
124 |
except Exception as e:
|
125 |
+
logging.error(f"Error generating ideas: {e}")
|
126 |
return [
|
127 |
f"A dramatic {user_input} scene with cinematic lighting",
|
128 |
f"A close-up of {user_input} in a futuristic setting",
|
|
|
136 |
Generate a single feed item with progress updates.
|
137 |
Yields progress stage and message for UI updates.
|
138 |
"""
|
139 |
+
logging.debug("Starting generate_item")
|
140 |
video_base64 = None
|
141 |
max_total_attempts = 3
|
142 |
+
timeout_seconds = 60 # Timeout for API calls
|
143 |
+
|
144 |
total_attempts = 0
|
145 |
while total_attempts < max_total_attempts:
|
146 |
total_attempts += 1
|
147 |
+
logging.debug(f"Total attempt {total_attempts}")
|
148 |
|
149 |
yield {"stage": "initializing", "message": random.choice(PROGRESS_STAGES["initializing"]), "progress": 10}
|
150 |
|
|
|
154 |
image_prompt = None
|
155 |
|
156 |
for image_attempt in range(max_retries):
|
157 |
+
logging.debug(f"Image attempt {image_attempt + 1}")
|
158 |
yield {"stage": "creating_image", "message": random.choice(PROGRESS_STAGES["creating_image"]), "progress": 30 + (image_attempt * 10)}
|
159 |
|
160 |
selected_idea = random.choice(ideas)
|
|
|
167 |
Ensure the response is strictly in JSON format.
|
168 |
"""
|
169 |
try:
|
170 |
+
start_time = time.time()
|
171 |
response = client.models.generate_content(
|
172 |
model='gemini-2.0-flash-lite',
|
173 |
contents=[prompt],
|
|
|
176 |
safety_settings=SAFETY_SETTINGS
|
177 |
)
|
178 |
)
|
179 |
+
if time.time() - start_time > timeout_seconds:
|
180 |
+
raise TimeoutError("Image caption generation timed out")
|
181 |
+
logging.debug(f"Generate content response: {response.text}")
|
182 |
cleaned_text = clean_response_text(response.text)
|
183 |
response_json = json.loads(cleaned_text)
|
184 |
text = response_json['caption']
|
185 |
image_prompt = response_json['image_prompt']
|
186 |
except Exception as e:
|
187 |
+
logging.error(f"Error generating item: {e}")
|
188 |
text = f"Amazing {user_input}! 🔥 #{user_input.replace(' ', '')}"
|
189 |
image_prompt = f"A vivid scene of {selected_idea} related to {user_input}, in a vibrant pop art style, no text or letters"
|
190 |
|
191 |
try:
|
192 |
+
start_time = time.time()
|
193 |
imagen = client.models.generate_images(
|
194 |
model='imagen-3.0-generate-002',
|
195 |
prompt=image_prompt,
|
|
|
198 |
number_of_images=1
|
199 |
)
|
200 |
)
|
201 |
+
if time.time() - start_time > timeout_seconds:
|
202 |
+
raise TimeoutError("Image generation timed out")
|
203 |
if imagen.generated_images and len(imagen.generated_images) > 0:
|
204 |
generated_image = imagen.generated_images[0]
|
205 |
image = Image.open(BytesIO(generated_image.image.image_bytes))
|
|
|
209 |
buffered = BytesIO()
|
210 |
image.save(buffered, format="PNG")
|
211 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
212 |
+
logging.debug("Image generated successfully")
|
213 |
break
|
214 |
else:
|
215 |
+
logging.warning("No images generated")
|
216 |
+
if image_attempt == max_retries - 1 and total_attempts == max_total_attempts:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
217 |
image = Image.new('RGB', (360, 640), color='gray')
|
218 |
buffered = BytesIO()
|
219 |
image.save(buffered, format="PNG")
|
220 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
221 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
222 |
+
logging.debug("Returning with placeholder image")
|
223 |
return {
|
224 |
'text': text,
|
225 |
'image_base64': img_str,
|
226 |
'video_base64': None,
|
227 |
'ideas': ideas
|
228 |
}
|
229 |
+
yield {"stage": "retrying_image", "message": random.choice(PROGRESS_STAGES["retrying_image"]), "progress": 40 + (image_attempt * 10)}
|
230 |
+
except Exception as e:
|
231 |
+
logging.error(f"Error generating image: {e}")
|
232 |
+
if image_attempt == max_retries - 1 and total_attempts == max_total_attempts:
|
233 |
+
image = Image.new('RGB', (360, 640), color='gray')
|
234 |
+
buffered = BytesIO()
|
235 |
+
image.save(buffered, format="PNG")
|
236 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
237 |
+
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
238 |
+
logging.debug("Returning with placeholder image")
|
239 |
+
return {
|
240 |
+
'text': text,
|
241 |
+
'image_base64': img_str,
|
242 |
+
'video_base64': None,
|
243 |
+
'ideas': ideas
|
244 |
+
}
|
245 |
yield {"stage": "retrying_image", "message": random.choice(PROGRESS_STAGES["retrying_image"]), "progress": 40 + (image_attempt * 10)}
|
246 |
|
247 |
if generate_video and generated_image is not None:
|
248 |
+
logging.debug("Attempting video generation")
|
249 |
yield {"stage": "generating_video", "message": random.choice(PROGRESS_STAGES["generating_video"]), "progress": 70}
|
250 |
try:
|
251 |
video_prompt = f"""
|
|
|
253 |
Use a close-up shot with a slow dolly shot circling around the subject,
|
254 |
using shallow focus on the main subject to emphasize details, in a realistic style with cinematic lighting.
|
255 |
"""
|
256 |
+
start_time = time.time()
|
257 |
operation = client.models.generate_videos(
|
258 |
model="veo-2.0-generate-001",
|
259 |
prompt=video_prompt,
|
|
|
266 |
)
|
267 |
)
|
268 |
while not operation.done:
|
269 |
+
if time.time() - start_time > timeout_seconds:
|
270 |
+
raise TimeoutError("Video generation timed out")
|
271 |
+
time.sleep(5) # Reduced polling interval
|
272 |
operation = client.operations.get(operation)
|
273 |
if operation.error:
|
274 |
raise ValueError(f"Video generation failed: {operation.error.message}")
|
|
|
280 |
video_bytes = video_data if isinstance(video_data, bytes) else BytesIO(video_data).getvalue()
|
281 |
video_base64 = base64.b64encode(video_bytes).decode()
|
282 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
283 |
+
logging.debug("Video generated successfully")
|
284 |
return {
|
285 |
'text': text,
|
286 |
'image_base64': img_str,
|
|
|
291 |
else:
|
292 |
raise ValueError("Video generation operation failed: No generated_videos in response")
|
293 |
except Exception as e:
|
294 |
+
logging.error(f"Error generating video: {e}")
|
295 |
+
logging.debug("Falling back to text-to-video")
|
296 |
yield {"stage": "generating_video", "message": random.choice(PROGRESS_STAGES["generating_video"]), "progress": 80}
|
297 |
try:
|
298 |
+
start_time = time.time()
|
299 |
operation = client.models.generate_videos(
|
300 |
model="veo-2.0-generate-001",
|
301 |
prompt=video_prompt,
|
|
|
307 |
)
|
308 |
)
|
309 |
while not operation.done:
|
310 |
+
if time.time() - start_time > timeout_seconds:
|
311 |
+
raise TimeoutError("Text-to-video generation timed out")
|
312 |
+
time.sleep(5)
|
313 |
operation = client.operations.get(operation)
|
314 |
if operation.error:
|
315 |
raise ValueError(f"Video generation failed: {operation.error.message}")
|
|
|
321 |
video_bytes = video_data if isinstance(video_data, bytes) else BytesIO(video_data).getvalue()
|
322 |
video_base64 = base64.b64encode(video_bytes).decode()
|
323 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
324 |
+
logging.debug("Text-to-video generated successfully")
|
325 |
return {
|
326 |
'text': text,
|
327 |
'image_base64': img_str,
|
|
|
332 |
else:
|
333 |
raise ValueError("Video generation operation failed: No generated_videos in response")
|
334 |
except Exception as e:
|
335 |
+
logging.error(f"Error generating text-to-video: {e}")
|
336 |
if total_attempts == max_total_attempts:
|
337 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
338 |
+
logging.debug("Returning without video")
|
339 |
return {
|
340 |
'text': text,
|
341 |
'image_base64': img_str,
|
|
|
345 |
|
346 |
if img_str is not None:
|
347 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
348 |
+
logging.debug("Returning with image only")
|
349 |
return {
|
350 |
'text': text,
|
351 |
'image_base64': img_str,
|
|
|
358 |
image.save(buffered, format="PNG")
|
359 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
360 |
yield {"stage": "finalizing", "message": random.choice(PROGRESS_STAGES["finalizing"]), "progress": 90}
|
361 |
+
logging.debug("Returning with placeholder image")
|
362 |
return {
|
363 |
'text': f"Amazing {user_input}! 🔥 #{user_input.replace(' ', '')}",
|
364 |
'image_base64': img_str,
|
|
|
370 |
"""
|
371 |
Start or update the feed based on the user input with progress updates.
|
372 |
"""
|
373 |
+
logging.debug("Starting start_feed")
|
374 |
if not user_input.strip():
|
375 |
user_input = "trending"
|
376 |
current_user_input = user_input
|
|
|
387 |
|
388 |
item_generator = generate_item(user_input, ideas, generate_video=generate_video)
|
389 |
for progress in item_generator:
|
390 |
+
logging.debug(f"Progress update: {progress}")
|
391 |
if isinstance(progress, dict) and "stage" in progress:
|
392 |
html_content = generate_html([], False, 0, user_input, is_loading, progress["stage"], progress["message"], progress["progress"])
|
393 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
|
|
398 |
share_links = generate_share_links(item['image_base64'], item['video_base64'], item['text'])
|
399 |
is_loading = False
|
400 |
html_content = generate_html(feed_items, False, current_index, user_input, is_loading)
|
401 |
+
logging.debug("Feed generation complete")
|
402 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
403 |
return
|
404 |
except Exception as e:
|
405 |
+
logging.error(f"Error in start_feed: {e}")
|
406 |
html_content = """
|
407 |
<div style="
|
408 |
display: flex;
|
|
|
429 |
"""
|
430 |
Load the next item in the feed with progress updates.
|
431 |
"""
|
432 |
+
logging.debug("Starting load_next")
|
433 |
current_user_input = user_input if user_input.strip() else "trending"
|
434 |
user_input = current_user_input
|
435 |
is_loading = True
|
|
|
448 |
feed_items[current_index]['video_base64'],
|
449 |
feed_items[current_index]['text']
|
450 |
)
|
451 |
+
logging.debug("Loaded existing feed item")
|
452 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
453 |
return
|
454 |
else:
|
|
|
458 |
|
459 |
item_generator = generate_item(user_input, ideas, generate_video=generate_video)
|
460 |
for progress in item_generator:
|
461 |
+
logging.debug(f"Progress update: {progress}")
|
462 |
if isinstance(progress, dict) and "stage" in progress:
|
463 |
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, progress["stage"], progress["message"], progress["progress"])
|
464 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
|
|
473 |
feed_items[current_index]['video_base64'],
|
474 |
feed_items[current_index]['text']
|
475 |
)
|
476 |
+
logging.debug("New feed item generated")
|
477 |
yield current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
478 |
return
|
479 |
except Exception as e:
|
480 |
+
logging.error(f"Error in load_next: {e}")
|
481 |
html_content = """
|
482 |
<div style="
|
483 |
display: flex;
|
|
|
504 |
"""
|
505 |
Load the previous item in the feed.
|
506 |
"""
|
507 |
+
logging.debug("Loading previous item")
|
508 |
current_user_input = user_input if user_input.strip() else "trending"
|
509 |
|
510 |
if current_index > 0:
|
|
|
515 |
feed_items[current_index]['video_base64'],
|
516 |
feed_items[current_index]['text']
|
517 |
)
|
518 |
+
logging.debug("Previous item loaded")
|
519 |
return current_user_input, current_index, feed_items, html_content, share_links, False
|
520 |
|
521 |
def generate_share_links(image_base64, video_base64, caption):
|
522 |
"""
|
523 |
Generate share links for social media platforms with download links for image and video.
|
524 |
"""
|
525 |
+
logging.debug("Generating share links")
|
526 |
image_data_url = f"data:image/png;base64,{image_base64}"
|
527 |
encoded_caption = urllib.parse.quote(caption)
|
528 |
|
|
|
665 |
"""
|
666 |
Generate an HTML string to display the current feed item or a loading screen with a progress bar.
|
667 |
"""
|
668 |
+
logging.debug(f"Generating HTML, is_loading: {is_loading}, progress_stage: {progress_stage}")
|
669 |
if is_loading:
|
670 |
progress_percent = max(0, min(100, progress_percent))
|
671 |
return f"""
|
|
|
735 |
"""
|
736 |
|
737 |
if not feed_items or current_index >= len(feed_items):
|
738 |
+
logging.debug("No feed items to display")
|
739 |
return """
|
740 |
<div style="
|
741 |
display: flex;
|
|
|
832 |
}}
|
833 |
</script>
|
834 |
"""
|
835 |
+
logging.debug("Feed item HTML generated")
|
836 |
return html_str
|
837 |
|
838 |
# Define the Gradio interface
|
|
|
866 |
label="Generate Video (may take longer)",
|
867 |
value=False
|
868 |
)
|
869 |
+
magic_button = gr.Button("✨Create✨", elem_classes="gr-button")
|
870 |
|
871 |
feed_html = gr.HTML()
|
872 |
share_html = gr.HTML(label="Share this item:")
|