Rishi Desai
commited on
Commit
·
3d274c1
1
Parent(s):
22334c2
init dump
Browse files- .gitignore +7 -0
- README.md +53 -1
- demo.py +52 -0
- main.py +294 -0
- requirements.txt +5 -0
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
venv/
|
2 |
+
output/
|
3 |
+
env/
|
4 |
+
.env
|
5 |
+
.venv/
|
6 |
+
__pycache__/
|
7 |
+
.gradio/
|
README.md
CHANGED
@@ -1 +1,53 @@
|
|
1 |
-
# CharVid
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CharVid: Text+Image to Video Workflow
|
2 |
+
|
3 |
+
This tool processes a reference image with a text prompt to generate a new image, which can then be used for video generation.
|
4 |
+
|
5 |
+
## Installation
|
6 |
+
|
7 |
+
1. Clone the repository
|
8 |
+
2. Install the required dependencies:
|
9 |
+
|
10 |
+
```bash
|
11 |
+
pip install -r requirements.txt
|
12 |
+
```
|
13 |
+
|
14 |
+
3. Set up your fal.ai API key:
|
15 |
+
|
16 |
+
```bash
|
17 |
+
# For macOS/Linux
|
18 |
+
export FAL_KEY="your-fal-ai-api-key"
|
19 |
+
|
20 |
+
# For Windows (Command Prompt)
|
21 |
+
set FAL_KEY=your-fal-ai-api-key
|
22 |
+
|
23 |
+
# For Windows (PowerShell)
|
24 |
+
$env:FAL_KEY="your-fal-ai-api-key"
|
25 |
+
```
|
26 |
+
|
27 |
+
You can get your API key by signing up at [fal.ai](https://fal.ai).
|
28 |
+
|
29 |
+
## Usage
|
30 |
+
|
31 |
+
### Step 1: Generate an image from text+image
|
32 |
+
|
33 |
+
Run the script with the following arguments:
|
34 |
+
|
35 |
+
```bash
|
36 |
+
python main.py --ref "https://example.com/reference_image.jpg" --prompt "your descriptive prompt" --output "./output"
|
37 |
+
```
|
38 |
+
|
39 |
+
Parameters:
|
40 |
+
- `--ref`: URL or path to the reference image
|
41 |
+
- `--prompt`: Text prompt describing the desired modifications
|
42 |
+
- `--output`: Directory where the generated image will be saved
|
43 |
+
|
44 |
+
## Examples
|
45 |
+
|
46 |
+
```bash
|
47 |
+
python main.py --ref "https://storage.googleapis.com/falserverless/gallery/example_inputs_liuyifei.png" --prompt "a woman holding sign with glowing green text 'Hello World'" --output "./output"
|
48 |
+
```
|
49 |
+
|
50 |
+
## Notes
|
51 |
+
|
52 |
+
- The `fal-client` library requires an API key to be set in your environment
|
53 |
+
- For text+image to image generation, we use the `fal-ai/flux-pulid` model
|
demo.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import tempfile
|
4 |
+
import base64
|
5 |
+
from main import process
|
6 |
+
|
7 |
+
def process_image_and_generate_video(image, prompt):
|
8 |
+
# Create a temporary directory for intermediate files
|
9 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
10 |
+
# Save the uploaded image to a temporary file
|
11 |
+
temp_image_path = os.path.join(temp_dir, "input_image.png")
|
12 |
+
image.save(temp_image_path)
|
13 |
+
|
14 |
+
# Encode the image as base64 and create a data URL
|
15 |
+
with open(temp_image_path, "rb") as f:
|
16 |
+
encoded_image = base64.b64encode(f.read()).decode("utf-8")
|
17 |
+
data_url = f"data:image/png;base64,{encoded_image}"
|
18 |
+
|
19 |
+
# Process the image and generate video
|
20 |
+
# The process function now handles saving to numbered directories
|
21 |
+
result, generated_image_path, video_path = process(data_url, prompt, temp_dir)
|
22 |
+
|
23 |
+
if result and video_path:
|
24 |
+
return video_path
|
25 |
+
else:
|
26 |
+
return None
|
27 |
+
|
28 |
+
# Create the Gradio interface
|
29 |
+
with gr.Blocks(title="Character Video Generation") as demo:
|
30 |
+
gr.Markdown("# Character Video Generation")
|
31 |
+
gr.Markdown("""
|
32 |
+
* Upload a high-quality image of a person
|
33 |
+
* Enter a prompt to generate a video
|
34 |
+
""")
|
35 |
+
|
36 |
+
with gr.Row():
|
37 |
+
with gr.Column():
|
38 |
+
input_image = gr.Image(type="pil", label="Upload Reference Image")
|
39 |
+
prompt = gr.Textbox(label="Enter your prompt")
|
40 |
+
generate_btn = gr.Button("Generate")
|
41 |
+
|
42 |
+
with gr.Column():
|
43 |
+
output_video = gr.Video(label="Generated Video")
|
44 |
+
|
45 |
+
generate_btn.click(
|
46 |
+
fn=process_image_and_generate_video,
|
47 |
+
inputs=[input_image, prompt],
|
48 |
+
outputs=[output_video]
|
49 |
+
)
|
50 |
+
|
51 |
+
if __name__ == "__main__":
|
52 |
+
demo.launch(share=True)
|
main.py
ADDED
@@ -0,0 +1,294 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import os
|
3 |
+
import asyncio
|
4 |
+
import fal_client
|
5 |
+
import base64
|
6 |
+
import io
|
7 |
+
from PIL import Image
|
8 |
+
import requests
|
9 |
+
import shutil
|
10 |
+
from together import Together
|
11 |
+
|
12 |
+
# Create a permanent directory for outputs
|
13 |
+
OUTPUT_DIR = "output"
|
14 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
15 |
+
|
16 |
+
def get_next_dir_number():
|
17 |
+
"""Get the next available directory number for output."""
|
18 |
+
existing_dirs = [d for d in os.listdir(OUTPUT_DIR)
|
19 |
+
if os.path.isdir(os.path.join(OUTPUT_DIR, d)) and d.isdigit()]
|
20 |
+
if not existing_dirs:
|
21 |
+
return 1
|
22 |
+
return max(map(int, existing_dirs)) + 1
|
23 |
+
|
24 |
+
def save_results(input_path, generated_image_path, video_path, user_prompt, optimized_prompt, output_dir=None):
|
25 |
+
"""
|
26 |
+
Save all generation results in a numbered directory within OUTPUT_DIR.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
input_path: Path to the input reference image
|
30 |
+
generated_image_path: Path to the generated image
|
31 |
+
video_path: Path to the generated video
|
32 |
+
user_prompt: The original text prompt used for generation
|
33 |
+
optimized_prompt: The optimized prompt used for generation
|
34 |
+
output_dir: Optional custom output directory
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
Tuple of (result_dir, saved_video_path)
|
38 |
+
"""
|
39 |
+
# If no custom output directory, create a numbered one
|
40 |
+
if output_dir is None:
|
41 |
+
dir_num = get_next_dir_number()
|
42 |
+
result_dir = os.path.join(OUTPUT_DIR, str(dir_num))
|
43 |
+
else:
|
44 |
+
result_dir = output_dir
|
45 |
+
|
46 |
+
os.makedirs(result_dir, exist_ok=True)
|
47 |
+
|
48 |
+
# Copy input image
|
49 |
+
input_image_path = os.path.join(result_dir, "input_image.png")
|
50 |
+
shutil.copy2(input_path, input_image_path)
|
51 |
+
|
52 |
+
# Copy generated image
|
53 |
+
output_image_path = os.path.join(result_dir, "generated_image.png")
|
54 |
+
shutil.copy2(generated_image_path, output_image_path)
|
55 |
+
|
56 |
+
# Copy the video file
|
57 |
+
saved_video_path = os.path.join(result_dir, "generated_video.mp4")
|
58 |
+
shutil.copy2(video_path, saved_video_path)
|
59 |
+
|
60 |
+
# Store the user prompt in a text file
|
61 |
+
with open(os.path.join(result_dir, "input_prompt.txt"), "w") as f:
|
62 |
+
f.write(user_prompt)
|
63 |
+
|
64 |
+
# Store the optimized prompt in a text file
|
65 |
+
with open(os.path.join(result_dir, "opt_prompt.txt"), "w") as f:
|
66 |
+
f.write(optimized_prompt)
|
67 |
+
|
68 |
+
print(f"All results saved to directory: {result_dir}")
|
69 |
+
return result_dir, saved_video_path
|
70 |
+
|
71 |
+
async def generate_image(ref_image, prompt):
|
72 |
+
print(f"Generating image")
|
73 |
+
|
74 |
+
handler = await fal_client.submit_async(
|
75 |
+
"fal-ai/flux-pulid",
|
76 |
+
arguments={
|
77 |
+
"prompt": prompt,
|
78 |
+
"reference_image_url": ref_image
|
79 |
+
},
|
80 |
+
)
|
81 |
+
|
82 |
+
# Wait for completion silently
|
83 |
+
async for _ in handler.iter_events():
|
84 |
+
pass
|
85 |
+
|
86 |
+
result = await handler.get()
|
87 |
+
return result
|
88 |
+
|
89 |
+
async def generate_video(image_path, prompt):
|
90 |
+
print(f"Generating video from image...'")
|
91 |
+
|
92 |
+
# Read the image file and convert to base64
|
93 |
+
with open(image_path, 'rb') as image_file:
|
94 |
+
image_data = image_file.read()
|
95 |
+
base64_image = base64.b64encode(image_data).decode('utf-8')
|
96 |
+
image_data_url = f"data:image/png;base64,{base64_image}"
|
97 |
+
|
98 |
+
handler = await fal_client.submit_async(
|
99 |
+
"fal-ai/wan-i2v",
|
100 |
+
arguments={
|
101 |
+
"prompt": prompt,
|
102 |
+
"image_url": image_data_url
|
103 |
+
},
|
104 |
+
)
|
105 |
+
|
106 |
+
# Wait for completion silently
|
107 |
+
async for _ in handler.iter_events():
|
108 |
+
pass
|
109 |
+
|
110 |
+
# Get the request ID from the handler
|
111 |
+
request_id = handler.request_id
|
112 |
+
|
113 |
+
# Fetch the result using the request ID
|
114 |
+
result = fal_client.result("fal-ai/wan-i2v", request_id)
|
115 |
+
return result
|
116 |
+
|
117 |
+
async def optimize_prompt(ref_image_path, user_prompt):
|
118 |
+
print(f"Optimizing prompt...")
|
119 |
+
|
120 |
+
# Initialize Together AI client
|
121 |
+
client = Together()
|
122 |
+
|
123 |
+
# Read and encode the image
|
124 |
+
with open(ref_image_path, 'rb') as image_file:
|
125 |
+
image_data = base64.b64encode(image_file.read()).decode('utf-8')
|
126 |
+
|
127 |
+
# First get a detailed caption of the image
|
128 |
+
messages = [
|
129 |
+
{"role": "system", "content": "You are an expert at describing images in detail, focusing on clothing, accessories, poses, and visual attributes."},
|
130 |
+
{
|
131 |
+
"role": "user",
|
132 |
+
"content": [
|
133 |
+
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_data}"}},
|
134 |
+
{"type": "text", "text": "Describe this image in detail, focusing on the clothing, accessories, pose, and any distinctive visual features."}
|
135 |
+
]
|
136 |
+
}
|
137 |
+
]
|
138 |
+
|
139 |
+
# Get image description from Llama 4
|
140 |
+
response = client.chat.completions.create(
|
141 |
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
142 |
+
messages=messages,
|
143 |
+
max_tokens=500
|
144 |
+
)
|
145 |
+
|
146 |
+
image_description = response.choices[0].message.content
|
147 |
+
|
148 |
+
# Now combine the user prompt with the image description
|
149 |
+
prompt_messages = [
|
150 |
+
{"role": "system", "content": "You are an expert at combining user prompts with detailed image descriptions to create optimal prompts for image generation. Focus on maintaining visual consistency while incorporating the user's desired changes. IMPORTANT: Return ONLY the optimized prompt without any explanations or additional text."},
|
151 |
+
{"role": "user", "content": f"""Here is a detailed description of the reference image:
|
152 |
+
{image_description}
|
153 |
+
|
154 |
+
And here is what the user wants to do with it:
|
155 |
+
{user_prompt}
|
156 |
+
|
157 |
+
Create an optimal prompt that maintains the visual details (especially clothing and accessories) while incorporating the user's desired changes. The prompt should be direct and descriptive. Return ONLY the prompt without any explanations."""}
|
158 |
+
]
|
159 |
+
|
160 |
+
# Get optimized prompt
|
161 |
+
response = client.chat.completions.create(
|
162 |
+
model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
|
163 |
+
messages=prompt_messages,
|
164 |
+
max_tokens=500
|
165 |
+
)
|
166 |
+
|
167 |
+
optimized_prompt = response.choices[0].message.content.strip()
|
168 |
+
print(f"Original prompt: {user_prompt}")
|
169 |
+
print(f"Optimized prompt: {optimized_prompt}")
|
170 |
+
|
171 |
+
return optimized_prompt
|
172 |
+
|
173 |
+
async def process_async(ref, prompt, output):
|
174 |
+
print(f"Processing image:")
|
175 |
+
|
176 |
+
# If ref is a URL, download it first
|
177 |
+
if ref.startswith('http'):
|
178 |
+
response = requests.get(ref)
|
179 |
+
temp_image_path = os.path.join(output, 'temp_ref_image.png')
|
180 |
+
with open(temp_image_path, 'wb') as f:
|
181 |
+
f.write(response.content)
|
182 |
+
ref_path = temp_image_path
|
183 |
+
else:
|
184 |
+
# If ref is a data URL, decode it and save
|
185 |
+
if ref.startswith('data:image'):
|
186 |
+
base64_data = ref.split(',')[1]
|
187 |
+
image_bytes = base64.b64decode(base64_data)
|
188 |
+
temp_image_path = os.path.join(output, 'temp_ref_image.png')
|
189 |
+
with open(temp_image_path, 'wb') as f:
|
190 |
+
f.write(image_bytes)
|
191 |
+
ref_path = temp_image_path
|
192 |
+
else:
|
193 |
+
ref_path = ref
|
194 |
+
|
195 |
+
# Optimize the prompt using Together AI
|
196 |
+
optimized_prompt = await optimize_prompt(ref_path, prompt)
|
197 |
+
|
198 |
+
# Generate image using text+image with optimized prompt
|
199 |
+
result = await generate_image(ref, optimized_prompt)
|
200 |
+
|
201 |
+
# Save the result
|
202 |
+
if result and 'images' in result and len(result['images']) > 0:
|
203 |
+
# Get the first image
|
204 |
+
image_data = result['images'][0]
|
205 |
+
|
206 |
+
# Handle base64 encoded images
|
207 |
+
if isinstance(image_data, str) and image_data.startswith('data:image'):
|
208 |
+
base64_data = image_data.split(',')[1]
|
209 |
+
image_bytes = base64.b64decode(base64_data)
|
210 |
+
image = Image.open(io.BytesIO(image_bytes))
|
211 |
+
# Handle URL responses
|
212 |
+
elif isinstance(image_data, dict) and 'url' in image_data:
|
213 |
+
response = requests.get(image_data['url'])
|
214 |
+
image = Image.open(io.BytesIO(response.content))
|
215 |
+
else:
|
216 |
+
print(f"Unexpected image format in response: {type(image_data)}")
|
217 |
+
return None
|
218 |
+
|
219 |
+
# Save the image
|
220 |
+
output_filename = os.path.join(output, 'generated_image.png')
|
221 |
+
image.save(output_filename)
|
222 |
+
print(f"Generated image saved to: {output_filename}")
|
223 |
+
|
224 |
+
# Generate video from the saved image using the optimized prompt
|
225 |
+
video_result = await generate_video(output_filename, optimized_prompt)
|
226 |
+
|
227 |
+
# Save the video if available
|
228 |
+
if video_result and isinstance(video_result, dict) and 'video' in video_result:
|
229 |
+
video_url = video_result['video']['url']
|
230 |
+
video_response = requests.get(video_url)
|
231 |
+
if video_response.status_code == 200:
|
232 |
+
video_filename = os.path.join(output, 'generated_video.mp4')
|
233 |
+
with open(video_filename, 'wb') as f:
|
234 |
+
f.write(video_response.content)
|
235 |
+
print(f"Generated video saved to: {video_filename}")
|
236 |
+
|
237 |
+
# Save the results to a numbered directory if output is not already a numbered directory
|
238 |
+
if output != os.path.join(OUTPUT_DIR, str(get_next_dir_number() - 1)):
|
239 |
+
result_dir, saved_video_path = save_results(
|
240 |
+
ref_path, output_filename, video_filename, prompt, optimized_prompt
|
241 |
+
)
|
242 |
+
return result, output_filename, saved_video_path
|
243 |
+
|
244 |
+
return result, output_filename, video_filename
|
245 |
+
else:
|
246 |
+
print(f"Failed to download video. Status code: {video_response.status_code}")
|
247 |
+
else:
|
248 |
+
print("Error: No video URL in response")
|
249 |
+
|
250 |
+
return result, output_filename, None
|
251 |
+
else:
|
252 |
+
print("Error: Failed to generate image")
|
253 |
+
return None
|
254 |
+
|
255 |
+
def process(ref, prompt, output):
|
256 |
+
return asyncio.run(process_async(ref, prompt, output))
|
257 |
+
|
258 |
+
def main():
|
259 |
+
# Set up command line argument parsing
|
260 |
+
parser = argparse.ArgumentParser(description='Process an image with a text prompt and generate a video')
|
261 |
+
parser.add_argument('--ref', type=str, required=True, help='URL or path to the reference image')
|
262 |
+
parser.add_argument('--prompt', type=str, required=True, help='Text prompt')
|
263 |
+
parser.add_argument('--output', type=str, default=None, help='Optional custom output directory. If not provided, a numbered directory will be created.')
|
264 |
+
|
265 |
+
# Parse arguments
|
266 |
+
args = parser.parse_args()
|
267 |
+
|
268 |
+
# Determine output directory
|
269 |
+
if args.output:
|
270 |
+
output_dir = args.output
|
271 |
+
os.makedirs(output_dir, exist_ok=True)
|
272 |
+
print(f"Using custom output directory: {output_dir}")
|
273 |
+
else:
|
274 |
+
# Create a temporary processing directory
|
275 |
+
temp_dir = os.path.join(OUTPUT_DIR, "temp")
|
276 |
+
os.makedirs(temp_dir, exist_ok=True)
|
277 |
+
output_dir = temp_dir
|
278 |
+
|
279 |
+
# Print the provided arguments
|
280 |
+
print(f"Reference image: {args.ref}")
|
281 |
+
print(f"Text prompt: {args.prompt}")
|
282 |
+
|
283 |
+
# Process the image and generate video
|
284 |
+
result, image_path, video_path = process(args.ref, args.prompt, output_dir)
|
285 |
+
|
286 |
+
if result and image_path and video_path:
|
287 |
+
print("Processing complete")
|
288 |
+
return 0
|
289 |
+
else:
|
290 |
+
print("Processing failed")
|
291 |
+
return 1
|
292 |
+
|
293 |
+
if __name__ == "__main__":
|
294 |
+
exit(main())
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fal-client
|
2 |
+
Pillow
|
3 |
+
requests
|
4 |
+
gradio
|
5 |
+
together
|