Spaces:
Paused
Paused
import base64 | |
import datetime | |
import gradio as gr | |
import numpy as np | |
import os | |
import pytz | |
import psutil | |
import re | |
import random | |
import torch | |
import time | |
import shutil | |
import zipfile | |
from PIL import Image | |
from io import BytesIO | |
from diffusers import DiffusionPipeline, LCMScheduler, AutoencoderTiny | |
# ... [previous imports and setup code remains unchanged] | |
# New function to save prompt to history | |
def save_prompt_to_history(prompt): | |
with open("prompt_history.txt", "a") as f: | |
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
f.write(f"{timestamp}: {prompt}\n") | |
# Modified predict function | |
def predict(prompt, guidance, steps, seed=1231231): | |
generator = torch.manual_seed(seed) | |
last_time = time.time() | |
results = pipe( | |
prompt=prompt, | |
generator=generator, | |
num_inference_steps=steps, | |
guidance_scale=guidance, | |
width=512, | |
height=512, | |
output_type="pil", | |
) | |
print(f"Pipe took {time.time() - last_time} seconds") | |
# Save prompt to history | |
save_prompt_to_history(prompt) | |
# ... [rest of the function remains unchanged] | |
return results.images[0] if len(results.images) > 0 else None | |
# Modified save_all_images function | |
def save_all_images(images): | |
if len(images) == 0: | |
return None, None | |
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") | |
zip_filename = f"images_and_history_{timestamp}.zip" | |
with zipfile.ZipFile(zip_filename, 'w') as zipf: | |
# Add image files | |
for file in images: | |
zipf.write(file, os.path.basename(file)) | |
# Add prompt history file | |
if os.path.exists("prompt_history.txt"): | |
zipf.write("prompt_history.txt") | |
# Generate download link | |
zip_base64 = encode_file_to_base64(zip_filename) | |
download_link = f'<a href="data:application/zip;base64,{zip_base64}" download="{zip_filename}">Download All (Images & History)</a>' | |
return zip_filename, download_link | |
# Function to read prompt history | |
def read_prompt_history(): | |
if os.path.exists("prompt_history.txt"): | |
with open("prompt_history.txt", "r") as f: | |
return f.read() | |
return "No prompts yet." | |
# Modified Gradio interface | |
with gr.Blocks(css=css) as demo: | |
with gr.Column(elem_id="container"): | |
# ... [previous UI components remain unchanged] | |
# Add prompt history display | |
with gr.Accordion("Prompt History", open=False): | |
prompt_history = gr.Code(label="Prompt History", language="text", interactive=False) | |
# ... [rest of the UI components] | |
# Function to update prompt history display | |
def update_prompt_history(): | |
return read_prompt_history() | |
# Connect components | |
generate_bt.click(fn=predict, inputs=inputs, outputs=[image, prompt_history], show_progress=False) | |
prompt.submit(fn=predict, inputs=inputs, outputs=[image, prompt_history], show_progress=False) | |
# Update prompt history when generating image or when accordion is opened | |
generate_bt.click(fn=update_prompt_history, outputs=prompt_history) | |
prompt.submit(fn=update_prompt_history, outputs=prompt_history) | |
# Modify save_all_button click event | |
save_all_button.click( | |
fn=lambda: save_all_images([f for f in os.listdir() if f.lower().endswith((".png", ".jpg", ".jpeg"))]), | |
outputs=[gr.File(), gr.HTML()] | |
) | |
demo.queue() | |
demo.launch(allowed_paths=["/"]) |