Update app.py
Browse files
app.py
CHANGED
@@ -61,14 +61,17 @@ def clean_response_text(response_text):
|
|
61 |
def generate_ideas(user_input):
|
62 |
"""
|
63 |
Generate a diverse set of ideas based on the user's input concept using the LLM.
|
|
|
64 |
|
65 |
Args:
|
66 |
-
user_input (str): The user's input concept or idea
|
67 |
|
68 |
-
|
69 |
-
|
|
|
70 |
"""
|
71 |
-
|
|
|
72 |
prompt = f"""
|
73 |
The user has provided the concept: "{user_input}". You must generate 5 diverse and creative ideas for a TikTok video that are directly and explicitly related to "{user_input}".
|
74 |
Each idea must clearly incorporate and focus on the core theme of "{user_input}" without deviating into unrelated topics.
|
@@ -102,10 +105,12 @@ def generate_ideas(user_input):
|
|
102 |
raise ValueError("Invalid JSON format: 'ideas' key missing, not a list, or incorrect length")
|
103 |
|
104 |
ideas = response_json['ideas']
|
|
|
105 |
return ideas
|
106 |
|
107 |
except Exception as e:
|
108 |
print(f"Error generating ideas: {e}")
|
|
|
109 |
return [
|
110 |
f"A dramatic {user_input} scene with cinematic lighting",
|
111 |
f"A close-up of {user_input} in a futuristic setting",
|
@@ -117,6 +122,7 @@ def generate_ideas(user_input):
|
|
117 |
def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
118 |
"""
|
119 |
Generate a single feed item (image and optionally one video) using one of the ideas.
|
|
|
120 |
|
121 |
Args:
|
122 |
user_input (str): The user's input concept or idea.
|
@@ -124,8 +130,9 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
124 |
generate_video (bool): Whether to generate a video from the image.
|
125 |
max_retries (int): Maximum number of retries for image generation per cycle.
|
126 |
|
127 |
-
|
128 |
-
|
|
|
129 |
"""
|
130 |
video_base64 = None
|
131 |
max_total_attempts = 3 # Maximum total attempts for combined image and video generation cycles
|
@@ -133,14 +140,16 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
133 |
total_attempts = 0
|
134 |
while total_attempts < max_total_attempts:
|
135 |
total_attempts += 1
|
|
|
136 |
|
137 |
# Step 1: Generate an image (retry up to max_retries times)
|
138 |
-
generated_image = None
|
139 |
text = None
|
140 |
img_str = None
|
141 |
image_prompt = None
|
142 |
|
143 |
for image_attempt in range(max_retries):
|
|
|
144 |
selected_idea = random.choice(ideas)
|
145 |
prompt = f"""
|
146 |
The user has provided the concept: "{user_input}". Based on this concept and the specific idea "{selected_idea}", create content for a TikTok video.
|
@@ -176,6 +185,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
176 |
|
177 |
# Attempt to generate the image
|
178 |
try:
|
|
|
179 |
imagen = client.models.generate_images(
|
180 |
model='imagen-3.0-generate-002',
|
181 |
prompt=image_prompt,
|
@@ -195,42 +205,43 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
195 |
buffered = BytesIO()
|
196 |
image.save(buffered, format="PNG")
|
197 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
|
198 |
break # Successfully generated image, exit image retry loop
|
199 |
else:
|
200 |
print(f"Image generation failed (image attempt {image_attempt + 1}, total attempt {total_attempts}): No images returned")
|
201 |
if image_attempt == max_retries - 1:
|
202 |
-
|
203 |
if total_attempts == max_total_attempts:
|
204 |
# Max total attempts reached, use a gray placeholder
|
205 |
image = Image.new('RGB', (360, 640), color='gray')
|
206 |
buffered = BytesIO()
|
207 |
image.save(buffered, format="PNG")
|
208 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
|
209 |
return {
|
210 |
'text': text,
|
211 |
'image_base64': img_str,
|
212 |
'video_base64': None,
|
213 |
'ideas': ideas
|
214 |
}
|
215 |
-
# Otherwise, continue to next cycle
|
216 |
break # Exit inner loop to retry with new idea
|
217 |
except Exception as e:
|
218 |
print(f"Error generating image (image attempt {image_attempt + 1}, total attempt {total_attempts}): {e}")
|
219 |
if image_attempt == max_retries - 1:
|
220 |
-
|
221 |
if total_attempts == max_total_attempts:
|
222 |
# Max total attempts reached, use a gray placeholder
|
223 |
image = Image.new('RGB', (360, 640), color='gray')
|
224 |
buffered = BytesIO()
|
225 |
image.save(buffered, format="PNG")
|
226 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
|
227 |
return {
|
228 |
'text': text,
|
229 |
'image_base64': img_str,
|
230 |
'video_base64': None,
|
231 |
'ideas': ideas
|
232 |
}
|
233 |
-
# Otherwise, continue to next cycle
|
234 |
break # Exit inner loop to retry with new idea
|
235 |
|
236 |
# Step 2: Generate video if enabled (with fallback to text-to-video if image-to-video fails)
|
@@ -240,6 +251,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
240 |
|
241 |
# First, try image-to-video generation (only once)
|
242 |
try:
|
|
|
243 |
video_prompt = f"""
|
244 |
The user concept is "{user_input}". Based on this and the scene: {image_prompt}, create a video.
|
245 |
Use a close-up shot with a slow dolly shot circling around the subject,
|
@@ -309,6 +321,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
309 |
# Encode the video bytes as base64
|
310 |
video_base64 = base64.b64encode(video_bytes).decode()
|
311 |
video_generated = True
|
|
|
312 |
# Successfully generated video, return the result
|
313 |
return {
|
314 |
'text': text,
|
@@ -320,12 +333,14 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
320 |
raise ValueError("No video was generated")
|
321 |
except Exception as e:
|
322 |
print(f"Error generating video (image-to-video, total attempt {total_attempts}): {e}")
|
|
|
323 |
print("Image-to-video generation failed. Falling back to text-to-video generation.")
|
324 |
|
325 |
# If image-to-video generation failed, try text-to-video generation
|
326 |
if not video_generated:
|
327 |
for video_attempt in range(max_video_retries_per_image):
|
328 |
try:
|
|
|
329 |
# Use the same video prompt but without the image
|
330 |
video_prompt_base = f"""
|
331 |
The user concept is "{user_input}". Based on this and the scene: {image_prompt}, create a video.
|
@@ -403,6 +418,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
403 |
# Encode the video bytes as base64
|
404 |
video_base64 = base64.b64encode(video_bytes).decode()
|
405 |
video_generated = True
|
|
|
406 |
# Successfully generated video, return the result
|
407 |
return {
|
408 |
'text': text,
|
@@ -415,9 +431,11 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
415 |
except Exception as e:
|
416 |
print(f"Error generating video (text-to-video attempt {video_attempt + 1}, total attempt {total_attempts}): {e}")
|
417 |
if video_attempt == max_video_retries_per_image - 1:
|
|
|
418 |
if total_attempts == max_total_attempts:
|
419 |
print("Max total attempts reached. Proceeding without video.")
|
420 |
video_base64 = None
|
|
|
421 |
return {
|
422 |
'text': text,
|
423 |
'image_base64': img_str,
|
@@ -431,6 +449,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
431 |
|
432 |
# If video generation is not enabled or image generation failed, return the result
|
433 |
if img_str is not None:
|
|
|
434 |
return {
|
435 |
'text': text,
|
436 |
'image_base64': img_str,
|
@@ -441,10 +460,12 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
441 |
|
442 |
# If max total attempts reached without success, use a gray placeholder image
|
443 |
print("Max total attempts reached without successful image generation. Using placeholder.")
|
|
|
444 |
image = Image.new('RGB', (360, 640), color='gray')
|
445 |
buffered = BytesIO()
|
446 |
image.save(buffered, format="PNG")
|
447 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
|
448 |
return {
|
449 |
'text': f"Amazing {user_input}! π₯ #{user_input.replace(' ', '')}",
|
450 |
'image_base64': img_str,
|
@@ -455,6 +476,7 @@ def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
|
455 |
def start_feed(user_input, generate_video, current_index, feed_items):
|
456 |
"""
|
457 |
Start or update the feed based on the user input.
|
|
|
458 |
|
459 |
Args:
|
460 |
user_input (str): The user's input concept or idea.
|
@@ -462,8 +484,9 @@ def start_feed(user_input, generate_video, current_index, feed_items):
|
|
462 |
current_index (int): The current item index.
|
463 |
feed_items (list): The current list of feed items.
|
464 |
|
465 |
-
|
466 |
-
tuple: (
|
|
|
467 |
"""
|
468 |
if not user_input.strip():
|
469 |
user_input = "trending"
|
@@ -471,14 +494,35 @@ def start_feed(user_input, generate_video, current_index, feed_items):
|
|
471 |
# Update current_user_input with the new user_input
|
472 |
current_user_input = user_input
|
473 |
|
474 |
-
# Set loading state
|
475 |
is_loading = True
|
476 |
-
html_content = generate_html([], False, 0, user_input, is_loading)
|
|
|
477 |
share_links = ""
|
478 |
|
479 |
try:
|
480 |
-
ideas
|
481 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
feed_items = [item]
|
483 |
current_index = 0
|
484 |
share_links = generate_share_links(
|
@@ -509,16 +553,19 @@ def start_feed(user_input, generate_video, current_index, feed_items):
|
|
509 |
</div>
|
510 |
"""
|
511 |
is_loading = False
|
|
|
512 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
513 |
|
514 |
# Set loading state to False and update UI
|
515 |
is_loading = False
|
516 |
-
html_content = generate_html(feed_items, False, current_index, user_input, is_loading)
|
|
|
517 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
518 |
|
519 |
def load_next(user_input, generate_video, current_index, feed_items):
|
520 |
"""
|
521 |
Load the next item in the feed.
|
|
|
522 |
|
523 |
Args:
|
524 |
user_input (str): The user's input concept or idea (updated from the textbox).
|
@@ -526,8 +573,9 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
526 |
current_index (int): The current item index.
|
527 |
feed_items (list): The current list of feed items.
|
528 |
|
529 |
-
|
530 |
-
tuple: (
|
|
|
531 |
"""
|
532 |
# Update current_user_input with the latest user_input from the textbox
|
533 |
current_user_input = user_input if user_input.strip() else "trending"
|
@@ -536,17 +584,41 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
536 |
user_input = current_user_input
|
537 |
|
538 |
is_loading = True
|
539 |
-
html_content = generate_html(feed_items, False, current_index, user_input, is_loading)
|
|
|
540 |
share_links = ""
|
541 |
|
542 |
try:
|
543 |
if current_index + 1 < len(feed_items):
|
544 |
current_index += 1
|
|
|
545 |
else:
|
546 |
-
ideas = feed_items[-1]['ideas'] if feed_items else
|
547 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
548 |
feed_items.append(new_item)
|
549 |
current_index = len(feed_items) - 1
|
|
|
550 |
share_links = generate_share_links(
|
551 |
feed_items[current_index]['image_base64'],
|
552 |
feed_items[current_index]['video_base64'],
|
@@ -573,10 +645,12 @@ def load_next(user_input, generate_video, current_index, feed_items):
|
|
573 |
</div>
|
574 |
"""
|
575 |
is_loading = False
|
|
|
576 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
577 |
|
578 |
is_loading = False
|
579 |
-
html_content = generate_html(feed_items, False, current_index, user_input, is_loading)
|
|
|
580 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
581 |
|
582 |
def load_previous(user_input, generate_video, current_index, feed_items):
|
@@ -597,7 +671,7 @@ def load_previous(user_input, generate_video, current_index, feed_items):
|
|
597 |
|
598 |
if current_index > 0:
|
599 |
current_index -= 1
|
600 |
-
html_content = generate_html(feed_items, False, current_index, user_input, False)
|
601 |
share_links = generate_share_links(
|
602 |
feed_items[current_index]['image_base64'],
|
603 |
feed_items[current_index]['video_base64'],
|
@@ -730,7 +804,7 @@ def generate_share_links(image_base64, video_base64, caption):
|
|
730 |
justify-content: center;
|
731 |
margin-top: 10px;
|
732 |
">
|
733 |
-
<a href="https://
|
734 |
background-color: #ff0000;
|
735 |
color: white;
|
736 |
padding: 8px 16px;
|
@@ -760,10 +834,9 @@ def generate_share_links(image_base64, video_base64, caption):
|
|
760 |
</div>
|
761 |
""".format(caption=encoded_caption)
|
762 |
|
763 |
-
def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_input="", is_loading=False):
|
764 |
"""
|
765 |
-
Generate an HTML string to display the current feed item with
|
766 |
-
Displays a video if available, otherwise falls back to the image.
|
767 |
|
768 |
Args:
|
769 |
feed_items (list): List of dictionaries containing 'text', 'image_base64', and 'video_base64'.
|
@@ -771,18 +844,12 @@ def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_inpu
|
|
771 |
current_index (int): The index of the item to display.
|
772 |
user_input (str): The user's input concept or idea for loading messages.
|
773 |
is_loading (bool): Whether the feed is currently loading.
|
|
|
|
|
774 |
|
775 |
Returns:
|
776 |
-
str: HTML string representing the feed.
|
777 |
"""
|
778 |
-
loading_messages = [
|
779 |
-
f"Cooking up a {user_input} masterpiece... π³",
|
780 |
-
f"Snapping a vibrant {user_input} moment... πΈ",
|
781 |
-
f"Creating a {user_input} vibe that pops... β¨",
|
782 |
-
f"Getting that perfect {user_input} shot... π₯",
|
783 |
-
f"Bringing {user_input} to life... π"
|
784 |
-
]
|
785 |
-
|
786 |
if is_loading:
|
787 |
return f"""
|
788 |
<div id="feed-container" style="
|
@@ -807,7 +874,7 @@ def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_inpu
|
|
807 |
margin-bottom: 20px;
|
808 |
text-shadow: 1px 1px 2px rgba(0,0,0,0.5);
|
809 |
">
|
810 |
-
{
|
811 |
</div>
|
812 |
<div style="
|
813 |
width: 80%;
|
@@ -816,30 +883,30 @@ def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_inpu
|
|
816 |
border-radius: 5px;
|
817 |
overflow: hidden;
|
818 |
">
|
819 |
-
<div style="
|
820 |
-
width:
|
821 |
height: 100%;
|
822 |
background: linear-gradient(to right, #ff2d55, #ff5e78);
|
823 |
-
|
824 |
"></div>
|
825 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
826 |
<style>
|
827 |
-
@keyframes
|
828 |
-
0% {{
|
829 |
-
50% {{
|
830 |
-
100% {{
|
831 |
}}
|
832 |
-
|
833 |
-
|
834 |
-
const messages = {json.dumps(loading_messages)};
|
835 |
-
let currentMessageIndex = 0;
|
836 |
-
const messageElement = document.getElementById('loading-message');
|
837 |
-
function rotateMessages() {{
|
838 |
-
currentMessageIndex = (currentMessageIndex + 1) % messages.length;
|
839 |
-
messageElement.textContent = messages[currentMessageIndex];
|
840 |
}}
|
841 |
-
|
842 |
-
</script>
|
843 |
</div>
|
844 |
"""
|
845 |
|
@@ -992,14 +1059,40 @@ with gr.Blocks(
|
|
992 |
user_input.submit(
|
993 |
fn=start_feed,
|
994 |
inputs=[user_input, generate_video_checkbox, current_index, feed_items],
|
995 |
-
outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
996 |
)
|
997 |
|
998 |
# Handle magic button click to generate next item
|
999 |
magic_button.click(
|
1000 |
fn=load_next,
|
1001 |
inputs=[user_input, generate_video_checkbox, current_index, feed_items],
|
1002 |
-
outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1003 |
)
|
1004 |
|
1005 |
# Hidden button for previous item navigation
|
|
|
61 |
def generate_ideas(user_input):
|
62 |
"""
|
63 |
Generate a diverse set of ideas based on the user's input concept using the LLM.
|
64 |
+
Yields progress updates for the loading UI.
|
65 |
|
66 |
Args:
|
67 |
+
user_input (str): The user's input concept or idea.
|
68 |
|
69 |
+
Yields:
|
70 |
+
tuple: (progress_percentage, message) for UI updates.
|
71 |
+
list: Final list of ideas as strings.
|
72 |
"""
|
73 |
+
yield (10, f"Brainstorming epic ideas for {user_input}... π")
|
74 |
+
|
75 |
prompt = f"""
|
76 |
The user has provided the concept: "{user_input}". You must generate 5 diverse and creative ideas for a TikTok video that are directly and explicitly related to "{user_input}".
|
77 |
Each idea must clearly incorporate and focus on the core theme of "{user_input}" without deviating into unrelated topics.
|
|
|
105 |
raise ValueError("Invalid JSON format: 'ideas' key missing, not a list, or incorrect length")
|
106 |
|
107 |
ideas = response_json['ideas']
|
108 |
+
yield (20, f"Ideas locked in for {user_input}! π")
|
109 |
return ideas
|
110 |
|
111 |
except Exception as e:
|
112 |
print(f"Error generating ideas: {e}")
|
113 |
+
yield (20, f"Oops, tweaking the plan for {user_input}... π§")
|
114 |
return [
|
115 |
f"A dramatic {user_input} scene with cinematic lighting",
|
116 |
f"A close-up of {user_input} in a futuristic setting",
|
|
|
122 |
def generate_item(user_input, ideas, generate_video=False, max_retries=3):
|
123 |
"""
|
124 |
Generate a single feed item (image and optionally one video) using one of the ideas.
|
125 |
+
Yields progress updates for the loading UI.
|
126 |
|
127 |
Args:
|
128 |
user_input (str): The user's input concept or idea.
|
|
|
130 |
generate_video (bool): Whether to generate a video from the image.
|
131 |
max_retries (int): Maximum number of retries for image generation per cycle.
|
132 |
|
133 |
+
Yields:
|
134 |
+
tuple: (progress_percentage, message) for UI updates.
|
135 |
+
dict: Final dictionary with 'text', 'image_base64', 'video_base64', and 'ideas'.
|
136 |
"""
|
137 |
video_base64 = None
|
138 |
max_total_attempts = 3 # Maximum total attempts for combined image and video generation cycles
|
|
|
140 |
total_attempts = 0
|
141 |
while total_attempts < max_total_attempts:
|
142 |
total_attempts += 1
|
143 |
+
yield (20 + total_attempts * 10, f"Attempt {total_attempts} to craft your {user_input} masterpiece... π¨")
|
144 |
|
145 |
# Step 1: Generate an image (retry up to max_retries times)
|
146 |
+
generated_image = None
|
147 |
text = None
|
148 |
img_str = None
|
149 |
image_prompt = None
|
150 |
|
151 |
for image_attempt in range(max_retries):
|
152 |
+
yield (20 + total_attempts * 10 + image_attempt * 5, f"Crafting a stunning image for {user_input}... πΈ")
|
153 |
selected_idea = random.choice(ideas)
|
154 |
prompt = f"""
|
155 |
The user has provided the concept: "{user_input}". Based on this concept and the specific idea "{selected_idea}", create content for a TikTok video.
|
|
|
185 |
|
186 |
# Attempt to generate the image
|
187 |
try:
|
188 |
+
yield (40 + image_attempt * 5, f"Rendering your {user_input} vision... β¨")
|
189 |
imagen = client.models.generate_images(
|
190 |
model='imagen-3.0-generate-002',
|
191 |
prompt=image_prompt,
|
|
|
205 |
buffered = BytesIO()
|
206 |
image.save(buffered, format="PNG")
|
207 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
208 |
+
yield (50, f"Image for {user_input} is ready! π")
|
209 |
break # Successfully generated image, exit image retry loop
|
210 |
else:
|
211 |
print(f"Image generation failed (image attempt {image_attempt + 1}, total attempt {total_attempts}): No images returned")
|
212 |
if image_attempt == max_retries - 1:
|
213 |
+
yield (50, f"Tweaking the image for {user_input}... π")
|
214 |
if total_attempts == max_total_attempts:
|
215 |
# Max total attempts reached, use a gray placeholder
|
216 |
image = Image.new('RGB', (360, 640), color='gray')
|
217 |
buffered = BytesIO()
|
218 |
image.save(buffered, format="PNG")
|
219 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
220 |
+
yield (60, f"Using a placeholder for {user_input}... πΌοΈ")
|
221 |
return {
|
222 |
'text': text,
|
223 |
'image_base64': img_str,
|
224 |
'video_base64': None,
|
225 |
'ideas': ideas
|
226 |
}
|
|
|
227 |
break # Exit inner loop to retry with new idea
|
228 |
except Exception as e:
|
229 |
print(f"Error generating image (image attempt {image_attempt + 1}, total attempt {total_attempts}): {e}")
|
230 |
if image_attempt == max_retries - 1:
|
231 |
+
yield (50, f"Retrying image for {user_input}... π")
|
232 |
if total_attempts == max_total_attempts:
|
233 |
# Max total attempts reached, use a gray placeholder
|
234 |
image = Image.new('RGB', (360, 640), color='gray')
|
235 |
buffered = BytesIO()
|
236 |
image.save(buffered, format="PNG")
|
237 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
238 |
+
yield (60, f"Using a placeholder for {user_input}... πΌοΈ")
|
239 |
return {
|
240 |
'text': text,
|
241 |
'image_base64': img_str,
|
242 |
'video_base64': None,
|
243 |
'ideas': ideas
|
244 |
}
|
|
|
245 |
break # Exit inner loop to retry with new idea
|
246 |
|
247 |
# Step 2: Generate video if enabled (with fallback to text-to-video if image-to-video fails)
|
|
|
251 |
|
252 |
# First, try image-to-video generation (only once)
|
253 |
try:
|
254 |
+
yield (60, f"Filming a viral video for {user_input}... π₯")
|
255 |
video_prompt = f"""
|
256 |
The user concept is "{user_input}". Based on this and the scene: {image_prompt}, create a video.
|
257 |
Use a close-up shot with a slow dolly shot circling around the subject,
|
|
|
321 |
# Encode the video bytes as base64
|
322 |
video_base64 = base64.b64encode(video_bytes).decode()
|
323 |
video_generated = True
|
324 |
+
yield (90, f"Video for {user_input} is a wrap! π¬")
|
325 |
# Successfully generated video, return the result
|
326 |
return {
|
327 |
'text': text,
|
|
|
333 |
raise ValueError("No video was generated")
|
334 |
except Exception as e:
|
335 |
print(f"Error generating video (image-to-video, total attempt {total_attempts}): {e}")
|
336 |
+
yield (70, f"Switching to a new video approach for {user_input}... ποΈ")
|
337 |
print("Image-to-video generation failed. Falling back to text-to-video generation.")
|
338 |
|
339 |
# If image-to-video generation failed, try text-to-video generation
|
340 |
if not video_generated:
|
341 |
for video_attempt in range(max_video_retries_per_image):
|
342 |
try:
|
343 |
+
yield (75 + video_attempt * 5, f"Trying a fresh video take for {user_input}... πΉ")
|
344 |
# Use the same video prompt but without the image
|
345 |
video_prompt_base = f"""
|
346 |
The user concept is "{user_input}". Based on this and the scene: {image_prompt}, create a video.
|
|
|
418 |
# Encode the video bytes as base64
|
419 |
video_base64 = base64.b64encode(video_bytes).decode()
|
420 |
video_generated = True
|
421 |
+
yield (90, f"Video for {user_input} is a wrap! π¬")
|
422 |
# Successfully generated video, return the result
|
423 |
return {
|
424 |
'text': text,
|
|
|
431 |
except Exception as e:
|
432 |
print(f"Error generating video (text-to-video attempt {video_attempt + 1}, total attempt {total_attempts}): {e}")
|
433 |
if video_attempt == max_video_retries_per_image - 1:
|
434 |
+
yield (85, f"Finalizing without video for {user_input}... π")
|
435 |
if total_attempts == max_total_attempts:
|
436 |
print("Max total attempts reached. Proceeding without video.")
|
437 |
video_base64 = None
|
438 |
+
yield (95, f"Polishing your {user_input} masterpiece... β¨")
|
439 |
return {
|
440 |
'text': text,
|
441 |
'image_base64': img_str,
|
|
|
449 |
|
450 |
# If video generation is not enabled or image generation failed, return the result
|
451 |
if img_str is not None:
|
452 |
+
yield (95, f"Polishing your {user_input} masterpiece... β¨")
|
453 |
return {
|
454 |
'text': text,
|
455 |
'image_base64': img_str,
|
|
|
460 |
|
461 |
# If max total attempts reached without success, use a gray placeholder image
|
462 |
print("Max total attempts reached without successful image generation. Using placeholder.")
|
463 |
+
yield (95, f"Falling back to a placeholder for {user_input}... πΌοΈ")
|
464 |
image = Image.new('RGB', (360, 640), color='gray')
|
465 |
buffered = BytesIO()
|
466 |
image.save(buffered, format="PNG")
|
467 |
img_str = base64.b64encode(buffered.getvalue()).decode()
|
468 |
+
yield (100, f"Ready to roll with {user_input}! π")
|
469 |
return {
|
470 |
'text': f"Amazing {user_input}! π₯ #{user_input.replace(' ', '')}",
|
471 |
'image_base64': img_str,
|
|
|
476 |
def start_feed(user_input, generate_video, current_index, feed_items):
|
477 |
"""
|
478 |
Start or update the feed based on the user input.
|
479 |
+
Yields progress updates and final feed content.
|
480 |
|
481 |
Args:
|
482 |
user_input (str): The user's input concept or idea.
|
|
|
484 |
current_index (int): The current item index.
|
485 |
feed_items (list): The current list of feed items.
|
486 |
|
487 |
+
Yields:
|
488 |
+
tuple: (progress_percentage, message, html_content, is_loading) for UI updates.
|
489 |
+
tuple: (current_user_input, current_index, feed_items, html_content, share_links, is_loading) for final result.
|
490 |
"""
|
491 |
if not user_input.strip():
|
492 |
user_input = "trending"
|
|
|
494 |
# Update current_user_input with the new user_input
|
495 |
current_user_input = user_input
|
496 |
|
497 |
+
# Set initial loading state
|
498 |
is_loading = True
|
499 |
+
html_content = generate_html([], False, 0, user_input, is_loading, 0, f"Getting started with {user_input}... π")
|
500 |
+
yield (0, f"Getting started with {user_input}... π", html_content, is_loading)
|
501 |
share_links = ""
|
502 |
|
503 |
try:
|
504 |
+
# Generate ideas with progress updates
|
505 |
+
ideas_gen = generate_ideas(user_input)
|
506 |
+
ideas = None
|
507 |
+
for update in ideas_gen:
|
508 |
+
if isinstance(update, tuple):
|
509 |
+
progress, message = update
|
510 |
+
html_content = generate_html([], False, 0, user_input, is_loading, progress, message)
|
511 |
+
yield (progress, message, html_content, is_loading)
|
512 |
+
else:
|
513 |
+
ideas = update
|
514 |
+
|
515 |
+
# Generate item with progress updates
|
516 |
+
item_gen = generate_item(user_input, ideas, generate_video=generate_video)
|
517 |
+
item = None
|
518 |
+
for update in item_gen:
|
519 |
+
if isinstance(update, tuple):
|
520 |
+
progress, message = update
|
521 |
+
html_content = generate_html([], False, 0, user_input, is_loading, progress, message)
|
522 |
+
yield (progress, message, html_content, is_loading)
|
523 |
+
else:
|
524 |
+
item = update
|
525 |
+
|
526 |
feed_items = [item]
|
527 |
current_index = 0
|
528 |
share_links = generate_share_links(
|
|
|
553 |
</div>
|
554 |
"""
|
555 |
is_loading = False
|
556 |
+
yield (100, "Oops, something went wrong! π
", html_content, is_loading)
|
557 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
558 |
|
559 |
# Set loading state to False and update UI
|
560 |
is_loading = False
|
561 |
+
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, 100, "")
|
562 |
+
yield (100, f"Ready to roll with {user_input}! π", html_content, is_loading)
|
563 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
564 |
|
565 |
def load_next(user_input, generate_video, current_index, feed_items):
|
566 |
"""
|
567 |
Load the next item in the feed.
|
568 |
+
Yields progress updates and final feed content.
|
569 |
|
570 |
Args:
|
571 |
user_input (str): The user's input concept or idea (updated from the textbox).
|
|
|
573 |
current_index (int): The current item index.
|
574 |
feed_items (list): The current list of feed items.
|
575 |
|
576 |
+
Yields:
|
577 |
+
tuple: (progress_percentage, message, html_content, is_loading) for UI updates.
|
578 |
+
tuple: (current_user_input, current_index, feed_items, html_content, share_links, is_loading) for final result.
|
579 |
"""
|
580 |
# Update current_user_input with the latest user_input from the textbox
|
581 |
current_user_input = user_input if user_input.strip() else "trending"
|
|
|
584 |
user_input = current_user_input
|
585 |
|
586 |
is_loading = True
|
587 |
+
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, 0, f"Loading next {user_input} vibe... π")
|
588 |
+
yield (0, f"Loading next {user_input} vibe... π", html_content, is_loading)
|
589 |
share_links = ""
|
590 |
|
591 |
try:
|
592 |
if current_index + 1 < len(feed_items):
|
593 |
current_index += 1
|
594 |
+
yield (50, f"Switching to the next {user_input} moment... π", html_content, is_loading)
|
595 |
else:
|
596 |
+
ideas = feed_items[-1]['ideas'] if feed_items else None
|
597 |
+
if not ideas:
|
598 |
+
# Generate new ideas if none exist
|
599 |
+
ideas_gen = generate_ideas(user_input)
|
600 |
+
for update in ideas_gen:
|
601 |
+
if isinstance(update, tuple):
|
602 |
+
progress, message = update
|
603 |
+
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, progress, message)
|
604 |
+
yield (progress, message, html_content, is_loading)
|
605 |
+
else:
|
606 |
+
ideas = update
|
607 |
+
|
608 |
+
# Generate new item
|
609 |
+
new_item_gen = generate_item(user_input, ideas, generate_video=generate_video)
|
610 |
+
new_item = None
|
611 |
+
for update in new_item_gen:
|
612 |
+
if isinstance(update, tuple):
|
613 |
+
progress, message = update
|
614 |
+
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, progress, message)
|
615 |
+
yield (progress, message, html_content, is_loading)
|
616 |
+
else:
|
617 |
+
new_item = update
|
618 |
+
|
619 |
feed_items.append(new_item)
|
620 |
current_index = len(feed_items) - 1
|
621 |
+
|
622 |
share_links = generate_share_links(
|
623 |
feed_items[current_index]['image_base64'],
|
624 |
feed_items[current_index]['video_base64'],
|
|
|
645 |
</div>
|
646 |
"""
|
647 |
is_loading = False
|
648 |
+
yield (100, "Oops, something went wrong! π
", html_content, is_loading)
|
649 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
650 |
|
651 |
is_loading = False
|
652 |
+
html_content = generate_html(feed_items, False, current_index, user_input, is_loading, 100, "")
|
653 |
+
yield (100, f"Next {user_input} vibe is live! π", html_content, is_loading)
|
654 |
return current_user_input, current_index, feed_items, html_content, share_links, is_loading
|
655 |
|
656 |
def load_previous(user_input, generate_video, current_index, feed_items):
|
|
|
671 |
|
672 |
if current_index > 0:
|
673 |
current_index -= 1
|
674 |
+
html_content = generate_html(feed_items, False, current_index, user_input, False, 100, "")
|
675 |
share_links = generate_share_links(
|
676 |
feed_items[current_index]['image_base64'],
|
677 |
feed_items[current_index]['video_base64'],
|
|
|
804 |
justify-content: center;
|
805 |
margin-top: 10px;
|
806 |
">
|
807 |
+
<a href="https://studio.youtube.com/channel/UC/videos/upload?description={caption}" target="_blank" style="
|
808 |
background-color: #ff0000;
|
809 |
color: white;
|
810 |
padding: 8px 16px;
|
|
|
834 |
</div>
|
835 |
""".format(caption=encoded_caption)
|
836 |
|
837 |
+
def generate_html(feed_items, scroll_to_latest=False, current_index=0, user_input="", is_loading=False, progress=0, message=""):
|
838 |
"""
|
839 |
+
Generate an HTML string to display the current feed item or loading state with progress bar.
|
|
|
840 |
|
841 |
Args:
|
842 |
feed_items (list): List of dictionaries containing 'text', 'image_base64', and 'video_base64'.
|
|
|
844 |
current_index (int): The index of the item to display.
|
845 |
user_input (str): The user's input concept or idea for loading messages.
|
846 |
is_loading (bool): Whether the feed is currently loading.
|
847 |
+
progress (float): Current progress percentage (0β100).
|
848 |
+
message (str): Current loading message to display.
|
849 |
|
850 |
Returns:
|
851 |
+
str: HTML string representing the feed or loading state.
|
852 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
853 |
if is_loading:
|
854 |
return f"""
|
855 |
<div id="feed-container" style="
|
|
|
874 |
margin-bottom: 20px;
|
875 |
text-shadow: 1px 1px 2px rgba(0,0,0,0.5);
|
876 |
">
|
877 |
+
{message}
|
878 |
</div>
|
879 |
<div style="
|
880 |
width: 80%;
|
|
|
883 |
border-radius: 5px;
|
884 |
overflow: hidden;
|
885 |
">
|
886 |
+
<div id="progress-bar" style="
|
887 |
+
width: {progress}%;
|
888 |
height: 100%;
|
889 |
background: linear-gradient(to right, #ff2d55, #ff5e78);
|
890 |
+
transition: width 0.5s ease-in-out;
|
891 |
"></div>
|
892 |
</div>
|
893 |
+
<div style="
|
894 |
+
margin-top: 10px;
|
895 |
+
font-size: 14px;
|
896 |
+
color: #ccc;
|
897 |
+
">
|
898 |
+
{int(progress)}% Complete
|
899 |
+
</div>
|
900 |
<style>
|
901 |
+
@keyframes pulse {{
|
902 |
+
0% {{ opacity: 1; }}
|
903 |
+
50% {{ opacity: 0.5; }}
|
904 |
+
100% {{ opacity: 1; }}
|
905 |
}}
|
906 |
+
#loading-message {{
|
907 |
+
animation: pulse 2s infinite;
|
|
|
|
|
|
|
|
|
|
|
|
|
908 |
}}
|
909 |
+
</style>
|
|
|
910 |
</div>
|
911 |
"""
|
912 |
|
|
|
1059 |
user_input.submit(
|
1060 |
fn=start_feed,
|
1061 |
inputs=[user_input, generate_video_checkbox, current_index, feed_items],
|
1062 |
+
outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading],
|
1063 |
+
_js="""
|
1064 |
+
async (user_input, generate_video, current_index, feed_items) => {
|
1065 |
+
const results = [];
|
1066 |
+
for await (const update of gradioApp().fn_stream(start_feed, [user_input, generate_video, current_index, feed_items])) {
|
1067 |
+
results.push(update);
|
1068 |
+
if (update.length === 4) { // Progress update
|
1069 |
+
const [progress, message, html_content, is_loading] = update;
|
1070 |
+
gradioApp().update_component('feed_html', { value: html_content });
|
1071 |
+
}
|
1072 |
+
}
|
1073 |
+
return results[results.length - 1]; // Return final result
|
1074 |
+
}
|
1075 |
+
"""
|
1076 |
)
|
1077 |
|
1078 |
# Handle magic button click to generate next item
|
1079 |
magic_button.click(
|
1080 |
fn=load_next,
|
1081 |
inputs=[user_input, generate_video_checkbox, current_index, feed_items],
|
1082 |
+
outputs=[current_user_input, current_index, feed_items, feed_html, share_html, is_loading],
|
1083 |
+
_js="""
|
1084 |
+
async (user_input, generate_video, current_index, feed_items) => {
|
1085 |
+
const results = [];
|
1086 |
+
for await (const update of gradioApp().fn_stream(load_next, [user_input, generate_video, current_index, feed_items])) {
|
1087 |
+
results.push(update);
|
1088 |
+
if (update.length === 4) { // Progress update
|
1089 |
+
const [progress, message, html_content, is_loading] = update;
|
1090 |
+
gradioApp().update_component('feed_html', { value: html_content });
|
1091 |
+
}
|
1092 |
+
}
|
1093 |
+
return results[results.length - 1]; // Return final result
|
1094 |
+
}
|
1095 |
+
"""
|
1096 |
)
|
1097 |
|
1098 |
# Hidden button for previous item navigation
|