adventus commited on
Commit
45ccb3c
·
verified ·
1 Parent(s): 66e6817

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +13 -12
  2. app.py +116 -154
  3. requirements.txt +4 -6
README.md CHANGED
@@ -1,13 +1,14 @@
1
- ---
2
- title: F1 DEV TEST
3
- emoji: 🖼
4
- colorFrom: purple
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.0.1
8
- app_file: app.py
9
- pinned: false
10
- short_description: testspace
11
- ---
12
-
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: FLUX.1 Dev Serverless
3
+ emoji: 🔥
4
+ colorFrom: pink
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.41.0
8
+ app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ short_description: With Neg Prmpt, W/H, Steps, CFG, Seed, & Method. No GPU Req.
12
+ ---
13
+
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,154 +1,116 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
-
153
- if __name__ == "__main__":
154
- demo.launch()
 
1
+ import gradio as gr
2
+ import requests
3
+ import io
4
+ import random
5
+ import os
6
+ import time
7
+ from PIL import Image
8
+ from deep_translator import GoogleTranslator
9
+ import json
10
+
11
+ # Project by Nymbo
12
+
13
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
14
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
15
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
16
+ timeout = 100
17
+
18
+ # Function to query the API and return the generated image
19
+ def query(prompt, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
20
+ if prompt == "" or prompt is None:
21
+ return None
22
+
23
+ key = random.randint(0, 999)
24
+
25
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
26
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
27
+
28
+ # Translate the prompt from Russian to English if necessary
29
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
30
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
31
+
32
+ # Add some extra flair to the prompt
33
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
34
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
35
+
36
+ # Prepare the payload for the API call, including width and height
37
+ payload = {
38
+ "inputs": prompt,
39
+ "is_negative": is_negative,
40
+ "steps": steps,
41
+ "cfg_scale": cfg_scale,
42
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
43
+ "strength": strength,
44
+ "parameters": {
45
+ "width": width, # Pass the width to the API
46
+ "height": height # Pass the height to the API
47
+ }
48
+ }
49
+
50
+ # Send the request to the API and handle the response
51
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
52
+ if response.status_code != 200:
53
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
54
+ print(f"Response content: {response.text}")
55
+ if response.status_code == 503:
56
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
57
+ raise gr.Error(f"{response.status_code}")
58
+
59
+ try:
60
+ # Convert the response content into an image
61
+ image_bytes = response.content
62
+ image = Image.open(io.BytesIO(image_bytes))
63
+ print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
64
+ return image
65
+ except Exception as e:
66
+ print(f"Error when trying to open the image: {e}")
67
+ return None
68
+
69
+ # CSS to style the app
70
+ css = """
71
+ #app-container {
72
+ max-width: 800px;
73
+ margin-left: auto;
74
+ margin-right: auto;
75
+ }
76
+ """
77
+
78
+ # Build the Gradio UI with Blocks
79
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme', css=css) as app:
80
+ # Add a title to the app
81
+ gr.HTML("<center><h1>FLUX.1-Dev</h1></center>")
82
+
83
+ # Container for all the UI elements
84
+ with gr.Column(elem_id="app-container"):
85
+ # Add a text input for the main prompt
86
+ with gr.Row():
87
+ with gr.Column(elem_id="prompt-container"):
88
+ with gr.Row():
89
+ text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=2, elem_id="prompt-text-input")
90
+
91
+ # Accordion for advanced settings
92
+ with gr.Row():
93
+ with gr.Accordion("Advanced Settings", open=False):
94
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
95
+ with gr.Row():
96
+ width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
97
+ height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
98
+ steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
99
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
100
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
101
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1) # Setting the seed to -1 will make it random
102
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
103
+
104
+ # Add a button to trigger the image generation
105
+ with gr.Row():
106
+ text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
107
+
108
+ # Image output area to display the generated image
109
+ with gr.Row():
110
+ image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
111
+
112
+ # Bind the button to the query function with the added width and height inputs
113
+ text_button.click(query, inputs=[text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
114
+
115
+ # Launch the Gradio app
116
+ app.launch(show_api=False, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,6 +1,4 @@
1
- accelerate
2
- diffusers
3
- invisible_watermark
4
- torch
5
- transformers
6
- xformers
 
1
+ requests
2
+ pillow
3
+ deep-translator
4
+ langdetect