Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,53 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
|
|
|
|
|
|
3 |
from PIL import Image
|
4 |
-
from io import BytesIO
|
5 |
import os
|
6 |
|
7 |
# Hugging Face HubのAPIキーを設定
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
files = {
|
19 |
-
"file": ("image.png", image_bytes, "image/png"),
|
20 |
-
}
|
21 |
-
data = {"parameters": parameters} if parameters else {}
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
with open(image_path, "rb") as f:
|
31 |
-
image_bytes = f.read()
|
32 |
-
|
33 |
-
# Prior Reduxモデルで事前処理
|
34 |
-
prior_response = call_hf_api(API_URL_PRIOR, image_bytes)
|
35 |
-
|
36 |
-
# FLUXモデルで画像生成
|
37 |
-
flux_payload = {
|
38 |
-
"guidance_scale": 2.5,
|
39 |
-
"num_inference_steps": 50,
|
40 |
-
"seed": 0, # 再現性のためのシード値
|
41 |
-
}
|
42 |
-
flux_response = call_hf_api(API_URL_FLUX, image_bytes, parameters=flux_payload)
|
43 |
-
|
44 |
-
# 生成された画像を取得
|
45 |
-
generated_image_url = flux_response.get("generated_image_url")
|
46 |
-
if not generated_image_url:
|
47 |
-
raise Exception("Generated image URL not found in the response.")
|
48 |
-
|
49 |
-
# URLから画像をダウンロード
|
50 |
-
response = requests.get(generated_image_url)
|
51 |
-
generated_image = Image.open(BytesIO(response.content))
|
52 |
-
|
53 |
-
return generated_image
|
54 |
|
55 |
# Gradioインターフェースを構築
|
56 |
def infer(image):
|
57 |
-
result_image =
|
58 |
return result_image
|
59 |
|
60 |
with gr.Blocks() as demo:
|
61 |
-
gr.Markdown("# FLUX Image Generation App
|
62 |
|
63 |
with gr.Row():
|
64 |
input_image = gr.Image(type="filepath", label="Input Image")
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from diffusers import FluxPriorReduxPipeline, FluxPipeline
|
4 |
+
from diffusers.utils import load_image
|
5 |
+
from huggingface_hub import login
|
6 |
from PIL import Image
|
|
|
7 |
import os
|
8 |
|
9 |
# Hugging Face HubのAPIキーを設定
|
10 |
+
login(os.getenv("HF_API_KEY"))
|
11 |
+
|
12 |
+
# Prior Reduxパイプラインをロード (CPU対応)
|
13 |
+
pipe_prior_redux = FluxPriorReduxPipeline.from_pretrained(
|
14 |
+
"black-forest-labs/FLUX.1-Redux-dev",
|
15 |
+
use_auth_token=True # APIキーを使用して認証
|
16 |
+
).to("cpu") # CPUに変更
|
17 |
+
|
18 |
+
# メインのFLUXパイプラインをロード (CPU対応)
|
19 |
+
pipe = FluxPipeline.from_pretrained(
|
20 |
+
"black-forest-labs/FLUX.1-dev",
|
21 |
+
use_auth_token=True, # APIキーを使用して認証
|
22 |
+
text_encoder=None,
|
23 |
+
text_encoder_2=None
|
24 |
+
).to("cpu") # CPUに変更
|
25 |
+
|
26 |
+
def process_image(image_path):
|
27 |
+
# 入力画像をロード
|
28 |
+
image = Image.open(image_path).convert("RGB")
|
29 |
|
30 |
+
# Prior Reduxパイプラインで事前処理
|
31 |
+
pipe_prior_output = pipe_prior_redux(image)
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
# FLUXパイプラインで画像生成
|
34 |
+
images = pipe(
|
35 |
+
guidance_scale=2.5,
|
36 |
+
num_inference_steps=50,
|
37 |
+
generator=torch.Generator("cpu").manual_seed(0), # CPU用の乱数生成器を使用
|
38 |
+
**pipe_prior_output,
|
39 |
+
).images
|
40 |
|
41 |
+
# 生成された画像を返す
|
42 |
+
return images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# Gradioインターフェースを構築
|
45 |
def infer(image):
|
46 |
+
result_image = process_image(image)
|
47 |
return result_image
|
48 |
|
49 |
with gr.Blocks() as demo:
|
50 |
+
gr.Markdown("# FLUX Image Generation App")
|
51 |
|
52 |
with gr.Row():
|
53 |
input_image = gr.Image(type="filepath", label="Input Image")
|