pandaphd commited on
Commit
72fca14
·
1 Parent(s): 300118f

First Demo!

Browse files
Files changed (1) hide show
  1. app.py +152 -0
app.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import torch
4
+ from inference_bokehK import load_models as load_bokeh_models, run_inference as run_bokeh_inference, OmegaConf
5
+ from inference_focal_length import load_models as load_focal_models, run_inference as run_focal_inference
6
+ from inference_shutter_speed import load_models as load_shutter_models, run_inference as run_shutter_inference
7
+ from inference_color_temperature import load_models as load_color_models, run_inference as run_color_inference
8
+
9
+ torch.manual_seed(42)
10
+
11
+ bokeh_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_bokehK.yaml")
12
+ bokeh_pipeline, bokeh_device = load_bokeh_models(bokeh_cfg)
13
+
14
+ focal_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_focal_length.yaml")
15
+ focal_pipeline, focal_device = load_focal_models(focal_cfg)
16
+
17
+ shutter_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_shutter_speed.yaml")
18
+ shutter_pipeline, shutter_device = load_shutter_models(shutter_cfg)
19
+
20
+ color_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_color_temperature.yaml")
21
+ color_pipeline, color_device = load_color_models(color_cfg)
22
+
23
+
24
+ def generate_bokeh_video(base_scene, bokehK_list):
25
+ try:
26
+ torch.manual_seed(42)
27
+ if len(json.loads(bokehK_list)) != 5:
28
+ raise ValueError("Exactly 5 Bokeh K values required")
29
+ return run_bokeh_inference(
30
+ pipeline=bokeh_pipeline, tokenizer=bokeh_pipeline.tokenizer,
31
+ text_encoder=bokeh_pipeline.text_encoder, base_scene=base_scene,
32
+ bokehK_list=bokehK_list, device=bokeh_device
33
+ )
34
+ except Exception as e:
35
+ return f"Error: {str(e)}"
36
+
37
+ def generate_focal_video(base_scene, focal_length_list):
38
+ try:
39
+ torch.manual_seed(42)
40
+ if len(json.loads(focal_length_list)) != 5:
41
+ raise ValueError("Exactly 5 focal length values required")
42
+ return run_focal_inference(
43
+ pipeline=focal_pipeline, tokenizer=focal_pipeline.tokenizer,
44
+ text_encoder=focal_pipeline.text_encoder, base_scene=base_scene,
45
+ focal_length_list=focal_length_list, device=focal_device
46
+ )
47
+ except Exception as e:
48
+ return f"Error: {str(e)}"
49
+
50
+ def generate_shutter_video(base_scene, shutter_speed_list):
51
+ try:
52
+ torch.manual_seed(42)
53
+ if len(json.loads(shutter_speed_list)) != 5:
54
+ raise ValueError("Exactly 5 shutter speed values required")
55
+ return run_shutter_inference(
56
+ pipeline=shutter_pipeline, tokenizer=shutter_pipeline.tokenizer,
57
+ text_encoder=shutter_pipeline.text_encoder, base_scene=base_scene,
58
+ shutter_speed_list=shutter_speed_list, device=shutter_device
59
+ )
60
+ except Exception as e:
61
+ return f"Error: {str(e)}"
62
+
63
+ def generate_color_video(base_scene, color_temperature_list):
64
+ try:
65
+ torch.manual_seed(42)
66
+ if len(json.loads(color_temperature_list)) != 5:
67
+ raise ValueError("Exactly 5 color temperature values required")
68
+ return run_color_inference(
69
+ pipeline=color_pipeline, tokenizer=color_pipeline.tokenizer,
70
+ text_encoder=color_pipeline.text_encoder, base_scene=base_scene,
71
+ color_temperature_list=color_temperature_list, device=color_device
72
+ )
73
+ except Exception as e:
74
+ return f"Error: {str(e)}"
75
+
76
+
77
+
78
+ bokeh_examples = [
79
+ ["A variety of potted plants are displayed on a window sill, with some of them placed in yellow and white cups. The plants are arranged in different sizes and shapes, creating a visually appealing display.", "[18.0, 14.0, 10.0, 6.0, 2.0]"],
80
+ ["A colorful backpack with a floral pattern is sitting on a table next to a computer monitor.", "[2.3, 5.8, 10.2, 14.8, 24.9]"]
81
+ ]
82
+
83
+ focal_examples = [
84
+ ["A small office cubicle with a desk.", "[25.1, 36.1, 47.1, 58.1, 69.1]"],
85
+ ["A large white couch in a living room.", "[55.0, 46.0, 37.0, 28.0, 25.0]"]
86
+ ]
87
+
88
+ shutter_examples = [
89
+ ["A brown and orange leather handbag.", "[0.11, 0.22, 0.33, 0.44, 0.55]"],
90
+ ["A variety of potted plants.", "[0.2, 0.49, 0.69, 0.75, 0.89]"]
91
+ ]
92
+
93
+ color_examples = [
94
+ ["A blue sky with mountains.", "[5455.0, 5155.0, 5555.0, 6555.0, 7555.0]"],
95
+ ["A red couch in front of a window.", "[3500.0, 5500.0, 6500.0, 7500.0, 8500.0]"]
96
+ ]
97
+
98
+
99
+ with gr.Blocks(title="Generative Photography") as demo:
100
+ gr.Markdown("# **Generative Photography: Scene-Consistent Camera Control for Realistic Text-to-Image Synthesis** ")
101
+
102
+ with gr.Tabs():
103
+ with gr.Tab("BokehK Effect"):
104
+ gr.Markdown("### Generate Frames with Bokeh Blur Effect")
105
+ with gr.Row():
106
+ with gr.Column():
107
+ scene_input_bokeh = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
108
+ bokeh_input = gr.Textbox(label="Bokeh Blur Values", placeholder="Enter 5 comma-separated values from 1-30, e.g., [2.44, 8.3, 10.1, 17.2, 24.0]")
109
+ submit_bokeh = gr.Button("Generate Video")
110
+ with gr.Column():
111
+ video_output_bokeh = gr.Video(label="Generated Video")
112
+ gr.Examples(bokeh_examples, [scene_input_bokeh, bokeh_input], [video_output_bokeh], generate_bokeh_video)
113
+ submit_bokeh.click(generate_bokeh_video, [scene_input_bokeh, bokeh_input], [video_output_bokeh])
114
+
115
+ with gr.Tab("Focal Length Effect"):
116
+ gr.Markdown("### Generate Frames with Focal Length Effect")
117
+ with gr.Row():
118
+ with gr.Column():
119
+ scene_input_focal = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
120
+ focal_input = gr.Textbox(label="Focal Length Values", placeholder="Enter 5 comma-separated values from 24-70, e.g., [25.1, 30.2, 33.3, 40.8, 54.0]")
121
+ submit_focal = gr.Button("Generate Video")
122
+ with gr.Column():
123
+ video_output_focal = gr.Video(label="Generated Video")
124
+ gr.Examples(focal_examples, [scene_input_focal, focal_input], [video_output_focal], generate_focal_video)
125
+ submit_focal.click(generate_focal_video, [scene_input_focal, focal_input], [video_output_focal])
126
+
127
+ with gr.Tab("Shutter Speed Effect"):
128
+ gr.Markdown("### Generate Frames with Shutter Speed Effect")
129
+ with gr.Row():
130
+ with gr.Column():
131
+ scene_input_shutter = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
132
+ shutter_input = gr.Textbox(label="Shutter Speed Values", placeholder="Enter 5 comma-separated values from 0.1-1.0, e.g., [0.15, 0.32, 0.53, 0.62, 0.82]")
133
+ submit_shutter = gr.Button("Generate Video")
134
+ with gr.Column():
135
+ video_output_shutter = gr.Video(label="Generated Video")
136
+ gr.Examples(shutter_examples, [scene_input_shutter, shutter_input], [video_output_shutter], generate_shutter_video)
137
+ submit_shutter.click(generate_shutter_video, [scene_input_shutter, shutter_input], [video_output_shutter])
138
+
139
+ with gr.Tab("Color Temperature Effect"):
140
+ gr.Markdown("### Generate Frames with Color Temperature Effect")
141
+ with gr.Row():
142
+ with gr.Column():
143
+ scene_input_color = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
144
+ color_input = gr.Textbox(label="Color Temperature Values", placeholder="Enter 5 comma-separated values from 2000-10000, e.g., [3001.3, 4000.2, 4400.34, 5488.23, 8888.82]")
145
+ submit_color = gr.Button("Generate Video")
146
+ with gr.Column():
147
+ video_output_color = gr.Video(label="Generated Video")
148
+ gr.Examples(color_examples, [scene_input_color, color_input], [video_output_color], generate_color_video)
149
+ submit_color.click(generate_color_video, [scene_input_color, color_input], [video_output_color])
150
+
151
+ if __name__ == "__main__":
152
+ demo.launch(share=True)