File size: 2,735 Bytes
74a2a96
 
b7e10c3
74a2a96
b7e10c3
74a2a96
b7e10c3
 
 
 
 
 
 
 
 
74a2a96
 
 
b7e10c3
 
 
 
 
 
 
74a2a96
 
 
 
b7e10c3
 
74a2a96
 
 
b7e10c3
6dba575
b7e10c3
6dba575
74a2a96
b7e10c3
6dba575
74a2a96
b7e10c3
 
74a2a96
b7e10c3
 
74a2a96
b7e10c3
 
 
74a2a96
 
 
b7e10c3
 
74a2a96
b7e10c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import gradio as gr
import os
from diffusers.utils import load_image
import spaces
from panna import Depth2Image, DepthAnythingV2

model_image = Depth2Image("stabilityai/stable-diffusion-2-depth")
model_depth = DepthAnythingV2("depth-anything/Depth-Anything-V2-Large-hf")
title = ("# [Depth2Image](https://huggingface.co/stabilityai/stable-diffusion-2-depth) with [DepthAnythingV2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large-hf)\n"
         "Depth2Image with depth map predicted by DepthAnything V2. The demo is part of [panna](https://github.com/abacws-abacus/panna) project.")
example_files = []
for n in range(10):
    url = f"https://huggingface.co/spaces/depth-anything/Depth-Anything-V2/resolve/main/assets/examples/demo{n:0>2}.jpg"
    load_image(url).save(os.path.basename(url))
    example_files += [os.path.basename(url)]


@spaces.GPU
def infer(init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps):
    depth = model_depth.image2depth([init_image])
    return model_image.text2image(
        [init_image],
        depth_maps=depth,
        prompt=[prompt],
        negative_prompt=[negative_prompt],
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        height=height,
        width=width,
        seed=seed
    )[0]


with gr.Blocks() as demo:
    gr.Markdown(title)
    with gr.Row():
        prompt = gr.Text(label="Prompt", show_label=True, max_lines=1, placeholder="Enter your prompt", container=False)
        run_button = gr.Button("Run", scale=0)
    with gr.Row():
        init_image = gr.Image(label="Input Image", type='pil')
        result = gr.Image(label="Result")
    with gr.Accordion("Advanced Settings", open=False):
        negative_prompt = gr.Text(label="Negative Prompt", max_lines=1, placeholder="Enter a negative prompt")
        seed = gr.Slider(label="Seed", minimum=0, maximum=1_000_000, step=1, value=0)
        with gr.Row():
            width = gr.Slider(label="Width", minimum=256, maximum=1344, step=64, value=1024)
            height = gr.Slider(label="Height", minimum=256, maximum=1344, step=64, value=1024)
        with gr.Row():
            guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=7.5)
            num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=50)
    examples = gr.Examples(examples=example_files, inputs=[init_image])
    gr.on(
        triggers=[run_button.click, prompt.submit, negative_prompt.submit],
        fn=infer,
        inputs=[init_image, prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps],
        outputs=[result]
    )
demo.launch()