File size: 2,555 Bytes
66b0cef
0d2ed9b
b3b13e2
0d2ed9b
f38a30e
66b0cef
f38a30e
0d2ed9b
 
5808f1f
f38a30e
 
 
5808f1f
f38a30e
0d2ed9b
 
 
f38a30e
0d2ed9b
 
f38a30e
 
 
 
 
 
 
0d2ed9b
f38a30e
 
 
 
0d2ed9b
f38a30e
 
 
0d2ed9b
f38a30e
 
 
0d2ed9b
 
f38a30e
0d2ed9b
6606b94
0d2ed9b
f38a30e
 
0d2ed9b
6606b94
0d2ed9b
 
 
 
 
 
 
 
b3b13e2
0d2ed9b
6606b94
0d2ed9b
b3b13e2
0d2ed9b
 
 
 
b3b13e2
0d2ed9b
6606b94
73a0c03
f38a30e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
import threading
import os
import torch
from diffusers import StableDiffusionPipeline

# Set environment variables for performance
os.environ["OMP_NUM_THREADS"] = str(os.cpu_count())
torch.set_num_threads(os.cpu_count())

# Load models
model1 = StableDiffusionPipeline.from_pretrained("prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA")
model2 = StableDiffusionPipeline.from_pretrained("Purz/face-projection")

# Event to stop image generation
stop_event = threading.Event()

def generate_images(text, selected_model):
    """Generate images based on the input text and selected model."""
    stop_event.clear()

    try:
        if selected_model == "Model 1 (Turbo Realism)":
            model = model1
        elif selected_model == "Model 2 (Face Projection)":
            model = model2
        else:
            return ["Invalid model selection."] * 3

        results = []
        for i in range(3):
            if stop_event.is_set():
                return ["Image generation stopped by user."] * 3

            modified_text = f"{text} variation {i+1}"
            result = model(modified_text).images[0]  # Generate image
            results.append(result)

        return results
    except Exception as e:
        return [f"Error generating images: {str(e)}"] * 3

def stop_generation():
    """Stop the ongoing image generation."""
    stop_event.set()
    return ["Generation stopped."] * 3

# Gradio interface
with gr.Blocks() as interface:
    gr.Markdown(
        "### ⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
    )
    
    text_input = gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt...")
    model_selector = gr.Radio(
        ["Model 1 (Turbo Realism)", "Model 2 (Face Projection)"],
        label="Select Model",
        value="Model 1 (Turbo Realism)"
    )
    
    with gr.Row():
        generate_button = gr.Button("Generate 3 Images 🎨")
        stop_button = gr.Button("Stop Image Generation")
    
    with gr.Row():
        output1 = gr.Image(label="Generated Image 1")
        output2 = gr.Image(label="Generated Image 2")
        output3 = gr.Image(label="Generated Image 3")
    
    generate_button.click(generate_images, inputs=[text_input, model_selector], outputs=[output1, output2, output3])
    stop_button.click(stop_generation, inputs=[], outputs=[output1, output2, output3])

# Launch the interface with an API endpoint
interface.launch(api_name="generate_images")