pandaphd commited on
Commit
e2515d4
·
1 Parent(s): 72fca14
.gitattributes CHANGED
@@ -1,35 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f978134ea372378fb27d2c9aaeb7db0a8d814207997bdad9ed8f368783d0a857
3
+ size 1593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,14 +1,3 @@
1
- ---
2
- title: Generative Photography
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 5.20.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc-by-nc-nd-4.0
11
- short_description: Demo for Generative Photography
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c83176a74a800ceebc4069a48b824b4c1a7b2f06d02ff5959e63eebc2a8d222
3
+ size 331
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,152 +1,3 @@
1
- import gradio as gr
2
- import json
3
- import torch
4
- from inference_bokehK import load_models as load_bokeh_models, run_inference as run_bokeh_inference, OmegaConf
5
- from inference_focal_length import load_models as load_focal_models, run_inference as run_focal_inference
6
- from inference_shutter_speed import load_models as load_shutter_models, run_inference as run_shutter_inference
7
- from inference_color_temperature import load_models as load_color_models, run_inference as run_color_inference
8
-
9
- torch.manual_seed(42)
10
-
11
- bokeh_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_bokehK.yaml")
12
- bokeh_pipeline, bokeh_device = load_bokeh_models(bokeh_cfg)
13
-
14
- focal_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_focal_length.yaml")
15
- focal_pipeline, focal_device = load_focal_models(focal_cfg)
16
-
17
- shutter_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_shutter_speed.yaml")
18
- shutter_pipeline, shutter_device = load_shutter_models(shutter_cfg)
19
-
20
- color_cfg = OmegaConf.load("configs/inference_genphoto/adv3_256_384_genphoto_relora_color_temperature.yaml")
21
- color_pipeline, color_device = load_color_models(color_cfg)
22
-
23
-
24
- def generate_bokeh_video(base_scene, bokehK_list):
25
- try:
26
- torch.manual_seed(42)
27
- if len(json.loads(bokehK_list)) != 5:
28
- raise ValueError("Exactly 5 Bokeh K values required")
29
- return run_bokeh_inference(
30
- pipeline=bokeh_pipeline, tokenizer=bokeh_pipeline.tokenizer,
31
- text_encoder=bokeh_pipeline.text_encoder, base_scene=base_scene,
32
- bokehK_list=bokehK_list, device=bokeh_device
33
- )
34
- except Exception as e:
35
- return f"Error: {str(e)}"
36
-
37
- def generate_focal_video(base_scene, focal_length_list):
38
- try:
39
- torch.manual_seed(42)
40
- if len(json.loads(focal_length_list)) != 5:
41
- raise ValueError("Exactly 5 focal length values required")
42
- return run_focal_inference(
43
- pipeline=focal_pipeline, tokenizer=focal_pipeline.tokenizer,
44
- text_encoder=focal_pipeline.text_encoder, base_scene=base_scene,
45
- focal_length_list=focal_length_list, device=focal_device
46
- )
47
- except Exception as e:
48
- return f"Error: {str(e)}"
49
-
50
- def generate_shutter_video(base_scene, shutter_speed_list):
51
- try:
52
- torch.manual_seed(42)
53
- if len(json.loads(shutter_speed_list)) != 5:
54
- raise ValueError("Exactly 5 shutter speed values required")
55
- return run_shutter_inference(
56
- pipeline=shutter_pipeline, tokenizer=shutter_pipeline.tokenizer,
57
- text_encoder=shutter_pipeline.text_encoder, base_scene=base_scene,
58
- shutter_speed_list=shutter_speed_list, device=shutter_device
59
- )
60
- except Exception as e:
61
- return f"Error: {str(e)}"
62
-
63
- def generate_color_video(base_scene, color_temperature_list):
64
- try:
65
- torch.manual_seed(42)
66
- if len(json.loads(color_temperature_list)) != 5:
67
- raise ValueError("Exactly 5 color temperature values required")
68
- return run_color_inference(
69
- pipeline=color_pipeline, tokenizer=color_pipeline.tokenizer,
70
- text_encoder=color_pipeline.text_encoder, base_scene=base_scene,
71
- color_temperature_list=color_temperature_list, device=color_device
72
- )
73
- except Exception as e:
74
- return f"Error: {str(e)}"
75
-
76
-
77
-
78
- bokeh_examples = [
79
- ["A variety of potted plants are displayed on a window sill, with some of them placed in yellow and white cups. The plants are arranged in different sizes and shapes, creating a visually appealing display.", "[18.0, 14.0, 10.0, 6.0, 2.0]"],
80
- ["A colorful backpack with a floral pattern is sitting on a table next to a computer monitor.", "[2.3, 5.8, 10.2, 14.8, 24.9]"]
81
- ]
82
-
83
- focal_examples = [
84
- ["A small office cubicle with a desk.", "[25.1, 36.1, 47.1, 58.1, 69.1]"],
85
- ["A large white couch in a living room.", "[55.0, 46.0, 37.0, 28.0, 25.0]"]
86
- ]
87
-
88
- shutter_examples = [
89
- ["A brown and orange leather handbag.", "[0.11, 0.22, 0.33, 0.44, 0.55]"],
90
- ["A variety of potted plants.", "[0.2, 0.49, 0.69, 0.75, 0.89]"]
91
- ]
92
-
93
- color_examples = [
94
- ["A blue sky with mountains.", "[5455.0, 5155.0, 5555.0, 6555.0, 7555.0]"],
95
- ["A red couch in front of a window.", "[3500.0, 5500.0, 6500.0, 7500.0, 8500.0]"]
96
- ]
97
-
98
-
99
- with gr.Blocks(title="Generative Photography") as demo:
100
- gr.Markdown("# **Generative Photography: Scene-Consistent Camera Control for Realistic Text-to-Image Synthesis** ")
101
-
102
- with gr.Tabs():
103
- with gr.Tab("BokehK Effect"):
104
- gr.Markdown("### Generate Frames with Bokeh Blur Effect")
105
- with gr.Row():
106
- with gr.Column():
107
- scene_input_bokeh = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
108
- bokeh_input = gr.Textbox(label="Bokeh Blur Values", placeholder="Enter 5 comma-separated values from 1-30, e.g., [2.44, 8.3, 10.1, 17.2, 24.0]")
109
- submit_bokeh = gr.Button("Generate Video")
110
- with gr.Column():
111
- video_output_bokeh = gr.Video(label="Generated Video")
112
- gr.Examples(bokeh_examples, [scene_input_bokeh, bokeh_input], [video_output_bokeh], generate_bokeh_video)
113
- submit_bokeh.click(generate_bokeh_video, [scene_input_bokeh, bokeh_input], [video_output_bokeh])
114
-
115
- with gr.Tab("Focal Length Effect"):
116
- gr.Markdown("### Generate Frames with Focal Length Effect")
117
- with gr.Row():
118
- with gr.Column():
119
- scene_input_focal = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
120
- focal_input = gr.Textbox(label="Focal Length Values", placeholder="Enter 5 comma-separated values from 24-70, e.g., [25.1, 30.2, 33.3, 40.8, 54.0]")
121
- submit_focal = gr.Button("Generate Video")
122
- with gr.Column():
123
- video_output_focal = gr.Video(label="Generated Video")
124
- gr.Examples(focal_examples, [scene_input_focal, focal_input], [video_output_focal], generate_focal_video)
125
- submit_focal.click(generate_focal_video, [scene_input_focal, focal_input], [video_output_focal])
126
-
127
- with gr.Tab("Shutter Speed Effect"):
128
- gr.Markdown("### Generate Frames with Shutter Speed Effect")
129
- with gr.Row():
130
- with gr.Column():
131
- scene_input_shutter = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
132
- shutter_input = gr.Textbox(label="Shutter Speed Values", placeholder="Enter 5 comma-separated values from 0.1-1.0, e.g., [0.15, 0.32, 0.53, 0.62, 0.82]")
133
- submit_shutter = gr.Button("Generate Video")
134
- with gr.Column():
135
- video_output_shutter = gr.Video(label="Generated Video")
136
- gr.Examples(shutter_examples, [scene_input_shutter, shutter_input], [video_output_shutter], generate_shutter_video)
137
- submit_shutter.click(generate_shutter_video, [scene_input_shutter, shutter_input], [video_output_shutter])
138
-
139
- with gr.Tab("Color Temperature Effect"):
140
- gr.Markdown("### Generate Frames with Color Temperature Effect")
141
- with gr.Row():
142
- with gr.Column():
143
- scene_input_color = gr.Textbox(label="Scene Description", placeholder="Describe the scene you want to generate...")
144
- color_input = gr.Textbox(label="Color Temperature Values", placeholder="Enter 5 comma-separated values from 2000-10000, e.g., [3001.3, 4000.2, 4400.34, 5488.23, 8888.82]")
145
- submit_color = gr.Button("Generate Video")
146
- with gr.Column():
147
- video_output_color = gr.Video(label="Generated Video")
148
- gr.Examples(color_examples, [scene_input_color, color_input], [video_output_color], generate_color_video)
149
- submit_color.click(generate_color_video, [scene_input_color, color_input], [video_output_color])
150
-
151
- if __name__ == "__main__":
152
- demo.launch(share=True)
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14e58bab9ed2b6eac8619e2b9c3c3ff03bf4689406c28de8eb49237f6f25c23b
3
+ size 8306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
configs/inference_genphoto/adv3_256_384_genphoto_relora_bokehK.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a74bacc98940eb895b1ac635f5e8b4fabb811d98c8a067ece44c0ac4ff460842
3
+ size 1823
configs/inference_genphoto/adv3_256_384_genphoto_relora_color_temperature.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6f6e2911a8e440f4796db8ae67b919659067b859bacd7575953da6c2b8bfb2d
3
+ size 1845
configs/inference_genphoto/adv3_256_384_genphoto_relora_focal_length.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c8c9059792e1ca206c44edd1cb29765c5ddb1f54551a1b1fc7010bf292420a8
3
+ size 1834
configs/inference_genphoto/adv3_256_384_genphoto_relora_shutter_speed.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c7d618e29249ce9086f5424f9a718b0faac002edd19ee0fd0335b85fdc8b7f
3
+ size 1837
environment.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a55fe5d623a3450e046bd7d0d095676d9d2ca62d36d19cfda8e9307007634970
3
+ size 435
genphoto/data/dataset.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c11d5ea01a3dd35a0987915a62ffb2c4c967ff4c81d2c9f0fe876f2daa93aad
3
+ size 38885
genphoto/models/attention.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82630247828d56f38b979a4a7b9bc12290ada3a1ce5be1d6153d07dbe4baaaa0
3
+ size 5313
genphoto/models/attention_processor.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc36c35808aed64eb238e3dba643b51961992388dd76d945dec36760ab87557
3
+ size 16681
genphoto/models/camera_adaptor.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b98af7dc452f718e7b74536412d017231a15d69933a224cd1cb9557fe5853ba5
3
+ size 9775
genphoto/models/ccl_embedding.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:599f8f75460a5b776dc213e624d4e7fc6080c8311d14ffe572501e46512141bf
3
+ size 2564
genphoto/models/motion_module.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a87e7341d4c8f441adbba3acf43b289589ed0825af8197262425ec35c708d32
3
+ size 15717
genphoto/models/resnet.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17d68816bfa42b445e7b3c9f6da088e08024a99b838bb1ca74a327e6a9116d50
3
+ size 17833
genphoto/models/unet.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66cd2f1e572a9d63f9ff6e1dc5bbacadd02916fc60cef9505761b6470c51f08e
3
+ size 61839
genphoto/models/unet_blocks.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:767e2392b19861d964d37159b591b9d489abc9a30332fb1a337694d7f3a94f28
3
+ size 34808
genphoto/pipelines/pipeline_animation.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:453fc7220c98fbe0fa70b19aade5b4403e470c09efed70147f2fcf35dd782d5b
3
+ size 34090
genphoto/utils/convert_from_ckpt.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ca60e78e034ed48ea1b7d48c09d2707940b1e25b749ee68bb6b601a96270435
3
+ size 25125
genphoto/utils/convert_lora_safetensor_to_diffusers.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c9162744237b045715cfe587c2be0117a49f538a99c1a853a2bf4c2d3695b69
3
+ size 5981
genphoto/utils/util.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb53dbb7da4c905c1a68d9f74d5ac1e01ea13e82a2506117bc3d3436109bb1b4
3
+ size 4875
inference_bokehK.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d71226af9e998b6f458bf837712b9ebcffab037ac09c29ea48742ba4d832b257
3
+ size 8968
inference_color_temperature.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed5fe8385e56e837fdb7c8ca21973136a42f4c3b09c6223c800dcc60955d61d
3
+ size 14631
inference_focal_length.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c41bc79a24be2dce1457e285e6fcd5cb3396b677bae30ae010e3f23ae993817c
3
+ size 15177
inference_shutter_speed.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12eb2507454a07a5e565233b738991782d191e932470176783be93773fb0f209
3
+ size 13888
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1766bd0739223e95b2fde76b862d853da41c15b0d97273e7e90f4cd4a4d77a60
3
+ size 290