Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from diffusers import FluxPriorReduxPipeline, FluxPipeline
|
4 |
+
from diffusers.utils import load_image
|
5 |
+
from huggingface_hub import login
|
6 |
+
from PIL import Image
|
7 |
+
import os
|
8 |
+
|
9 |
+
# Hugging Face HubのAPIキーを設定
|
10 |
+
login(os.getenv("HF_API_KEY"))
|
11 |
+
|
12 |
+
# Prior Reduxパイプラインをロード (CPU対応)
|
13 |
+
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
|
14 |
+
"black-forest-labs/FLUX.1-Redux-dev",
|
15 |
+
use_auth_token=True # APIキーを使用して認証
|
16 |
+
).to("cpu") # CPUに変更
|
17 |
+
|
18 |
+
# メインのFLUXパイプラインをロード (CPU対応)
|
19 |
+
pipe = FluxPipeline.from_pretrained(
|
20 |
+
"black-forest-labs/FLUX.1-dev",
|
21 |
+
use_auth_token=True, # APIキーを使用して認証
|
22 |
+
text_encoder=None,
|
23 |
+
text_encoder_2=None
|
24 |
+
).to("cpu") # CPUに変更
|
25 |
+
|
26 |
+
def process_image(image_path):
|
27 |
+
# 入力画像をロード
|
28 |
+
image = Image.open(image_path).convert("RGB")
|
29 |
+
|
30 |
+
# Prior Reduxパイプラインで事前処理
|
31 |
+
pipe_prior_output = pipe_prior_redux(image)
|
32 |
+
|
33 |
+
# FLUXパイプラインで画像生成
|
34 |
+
images = pipe(
|
35 |
+
guidance_scale=2.5,
|
36 |
+
num_inference_steps=50,
|
37 |
+
generator=torch.Generator("cpu").manual_seed(0), # CPU用の乱数生成器を使用
|
38 |
+
**pipe_prior_output,
|
39 |
+
).images
|
40 |
+
|
41 |
+
# 生成された画像を返す
|
42 |
+
return images[0]
|
43 |
+
|
44 |
+
# Gradioインターフェースを構築
|
45 |
+
def infer(image):
|
46 |
+
result_image = process_image(image)
|
47 |
+
return result_image
|
48 |
+
|
49 |
+
with gr.Blocks() as demo:
|
50 |
+
gr.Markdown("# FLUX Image Generation App")
|
51 |
+
|
52 |
+
with gr.Row():
|
53 |
+
input_image = gr.Image(type="filepath", label="Input Image")
|
54 |
+
output_image = gr.Image(type="pil", label="Generated Image")
|
55 |
+
|
56 |
+
submit_button = gr.Button("Generate")
|
57 |
+
|
58 |
+
submit_button.click(fn=infer, inputs=[input_image], outputs=[output_image])
|
59 |
+
|
60 |
+
demo.launch()
|