Spaces:
Running
Running
File size: 4,718 Bytes
700fd72 435f67e d029a83 286f519 61fd9f3 435f67e 692649e 435f67e bae6956 f913d22 90dfa53 435f67e c23c889 90dfa53 435f67e 286f519 435f67e 700fd72 74d0aae 700fd72 1fb1361 700fd72 435f67e e71ff7b 435f67e c23c889 ea0020b 700fd72 435f67e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
from flask import Flask, request, jsonify, send_file, render_template_string, make_response
import requests
import io
import os
import random
from PIL import Image
from deep_translator import GoogleTranslator
app = Flask(__name__)
API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 50000 # タイムアウトを300秒に設定
# Function to query the API and return the generated image
def query(prompt, negative_prompt="", steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024, num_inference_steps=30, guidance_scale=7.5, top_k=50, top_p=0.9, eta=0.1):
if not prompt:
return None, "Prompt is required"
key = random.randint(0, 999)
# Translate the prompt from Russian to English if necessary
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(f'Generation {key} translation: {prompt}')
# Add some extra flair to the prompt
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'Generation {key}: {prompt}')
payload = {
"inputs": prompt,
"is_negative": False,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength,
"negative_prompt": negative_prompt,
"top_k": top_k,
"top_p": top_p,
"eta": eta,
"parameters": {
"width": width,
"height": height,
"num_inference_steps": num_inference_steps,
"guidance_scale": guidance_scale
}
}
for attempt in range(3): # 最大3回の再試行
try:
response = requests.post(API_URL, json=payload, headers=headers, timeout=timeout)
if response.status_code != 200:
return None, f"Error: Failed to get image. Status code: {response.status_code}, Details: {response.text}"
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
return image, None
except requests.exceptions.Timeout:
if attempt < 2: # 最後の試行でない場合は再試行
print("Timeout occurred, retrying...")
continue
return None, "Error: The request timed out. Please try again."
except requests.exceptions.RequestException as e:
return None, f"Request Exception: {str(e)}"
except Exception as e:
return None, f"Error when trying to open the image: {e}"
# Content-Security-Policyヘッダーを設定するための関数
@app.after_request
def add_security_headers(response):
response.headers['Content-Security-Policy'] = (
"default-src 'self'; "
"connect-src 'self' ^https?:\/\/[\w.-]+\.[\w.-]+(\/[\w.-]*)*(\?[^\s]*)?$"
"img-src 'self' data:; "
"style-src 'self' 'unsafe-inline'; "
"script-src 'self' 'unsafe-inline'; "
)
return response
# HTML template for the index page
index_html = """
"""
@app.route('/')
def index():
return render_template_string(index_html)
@app.route('/generate', methods=['GET'])
def generate_image():
if request.headers.getlist("X-Forwarded-For"):
client_ip = request.headers.getlist("X-Forwarded-For")[0]
else:
client_ip = request.remote_addr
print(f"Client IP: {client_ip}")
prompt = request.args.get("prompt", "")
negative_prompt = request.args.get("negative_prompt", "")
steps = int(request.args.get("steps", 35))
cfg_scale = float(request.args.get("cfgs", 7))
sampler = request.args.get("sampler", "DPM++ 2M Karras")
strength = float(request.args.get("strength", 0.7))
seed = int(request.args.get("seed", -1))
width = int(request.args.get("width", 1024))
height = int(request.args.get("height", 1024))
num_inference_steps = int(request.args.get("num_inference_steps", 30))
guidance_scale = float(request.args.get("guidance_scale", 7.5))
top_k = int(request.args.get("top_k", 50))
top_p = float(request.args.get("top_p", 0.9))
eta = float(request.args.get("eta", 0.1))
image, error = query(prompt, negative_prompt, steps, cfg_scale, sampler, seed, strength, width, height, num_inference_steps, guidance_scale, top_k, top_p, eta)
if error:
return jsonify({"error": error}), 400
img_bytes = io.BytesIO()
image.save(img_bytes, format='PNG')
img_bytes.seek(0)
return send_file(img_bytes, mimetype='image/png')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=7860)
|