Spaces:
Sleeping
Sleeping
import gradio as gr | |
from diffusers import DiffusionPipeline | |
import torch | |
import requests | |
API_URL = "https://router.huggingface.co/hf-inference/v1" | |
headers = {"Authorization": "Bearer hf_xxxxxxxxxxxxxxxxxxxxxxxx"} | |
def query(payload): | |
response = requests.post(API_URL, headers=headers, json=payload) | |
return response.content | |
# You can access the image with PIL.Image for example | |
import io | |
from PIL import Image | |
image = Image.open(io.BytesIO(image_bytes)) | |
# Load the FLUX.1-dev model from Hugging Face | |
pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.float16) | |
pipe = pipe.to("cuda") | |
# Define a function to generate an image based on the prompt | |
def generate_image(prompt: str): | |
image = pipe(prompt).images[0] | |
return image | |
# Create the Gradio interface | |
iface = gr.Interface(fn=generate_image, | |
inputs="text", | |
outputs="image", | |
title="Text-to-Image with FLUX.1-dev", | |
description="Enter a prompt to generate an image using the FLUX.1-dev model.") | |
# Launch the app | |
iface.launch() | |