from share_btn import community_icon_html, loading_icon_html, share_js import os, subprocess import torch # def setup(): # install_cmds = [ # ['pip', 'install', 'ftfy', 'gradio', 'regex', 'tqdm', 'transformers==4.21.2', 'timm', 'fairscale', 'requests'], # ['pip', 'install', 'open_clip_torch'], # ['pip', 'install', '-e', 'git+https://github.com/pharmapsychotic/BLIP.git@lib#egg=blip'], # ['git', 'clone', '-b', 'open-clip', 'https://github.com/pharmapsychotic/clip-interrogator.git'] # ] # for cmd in install_cmds: # print(subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode('utf-8')) # setup() # download cache files # print("Download preprocessed cache files...") # CACHE_URLS = [ # 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_artists.pkl', # 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_flavors.pkl', # 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_mediums.pkl', # 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_movements.pkl', # 'https://huggingface.co/pharma/ci-preprocess/resolve/main/ViT-H-14_laion2b_s32b_b79k_trendings.pkl', # ] # os.makedirs('cache', exist_ok=True) # for url in CACHE_URLS: # print(subprocess.run(['wget', url, '-P', 'cache'], stdout=subprocess.PIPE).stdout.decode('utf-8')) import sys sys.path.append('src/blip') sys.path.append('clip-interrogator') import gradio as gr from clip_interrogator import Config, Interrogator import io from PIL import Image config = Config() config.device = 'cuda' if torch.cuda.is_available() else 'cpu' config.blip_offload = False if torch.cuda.is_available() else True config.chunk_size = 2048 config.flavor_intermediate_count = 512 config.blip_num_beams = 64 ci = Interrogator(config) def inference(input_images, mode, best_max_flavors): # Process each image in the list and generate prompt results prompt_results = [] for image_bytes in input_images: image = Image.open(io.BytesIO(image_bytes)).convert('RGB') if mode == 'best': prompt_result = ci.interrogate(image, max_flavors=int(best_max_flavors)) elif mode == 'classic': prompt_result = ci.interrogate_classic(image) else: prompt_result = ci.interrogate_fast(image) prompt_results.append((image, prompt_result)) # Use dictionary to set image labels return prompt_results title = """

CLIP Interrogator 2.1

Want to figure out what a good prompt might be to create new images like an existing one?
The CLIP Interrogator is here to get you answers!
This version is specialized for producing nice prompts for use with Stable Diffusion 2.0 using the ViT-H-14 OpenCLIP model!

""" article = """

Server busy? You can also run on Google Colab

Has this been helpful to you? Follow Pharma on twitter @pharmapsychotic and check out more tools at his Ai generative art tools list

""" css = ''' #col-container {width: width: 80%;; margin-left: auto; margin-right: auto;} a {text-decoration-line: underline; font-weight: 600;} .animate-spin { animation: spin 1s linear infinite; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; } #share-btn { all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; } #share-btn * { all: unset; } #share-btn-container div:nth-child(-n+2){ width: auto !important; min-height: 0px !important; } #share-btn-container .wrap { display: none !important; } #gallery .caption-label { font-size: 15px !important; right: 0 !important; max-width: 100% !important; text-overflow: clip !important; white-space: normal !important; overflow: auto !important; height: 20% !important; } #gallery .caption { padding: var(--size-2) var(--size-3) !important; text-overflow: clip !important; white-space: normal !important; /* Allows the text to wrap */ color: var(--block-label-text-color) !important; font-weight: var(--weight-semibold) !important; text-align: center !important; height: 100% !important; font-size: 17px !important; } ''' with gr.Blocks(css=css) as block: with gr.Column(elem_id="col-container"): gr.HTML(title) input_image = gr.Files(label = "Inputs", file_count="multiple", type='bytes', elem_id='inputs') with gr.Row(): mode_input = gr.Radio(['best', 'classic', 'fast'], label='Select mode', value='best') flavor_input = gr.Slider(minimum=2, maximum=24, step=2, value=4, label='best mode max flavors') submit_btn = gr.Button("Submit") # rows, cols = NUM_IMAGES //3, gallery = gr.Gallery( label="Outputs", show_label=True, elem_id="gallery", object_fit="contain", height="auto" ) with gr.Group(elem_id="share-btn-container"): loading_icon = gr.HTML(loading_icon_html, visible=False) gr.HTML(article) submit_btn.click(fn=inference, inputs=[input_image,mode_input,flavor_input], outputs=[gallery], api_name="clipi2") block.queue(max_size=32,concurrency_count=10).launch(show_api=False)