import os import sys import gradio as gr from PIL import Image ## environment settup os.system("git clone https://github.com/codeslake/RefVSR.git") os.chdir("RefVSR") os.system("./install/install_cudnn113.sh") os.mkdir("ckpt") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/SPyNet.pytorch -O ckpt/SPyNet.pytorch") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID_8K.pytorch -O ckpt/RefVSR_MFID_8K.pytorch") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID_8K.pytorch") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID.pytorch -O ckpt/RefVSR_MFID.pytorch") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID.pytorch") sys.path.append("RefVSR") ## Input setup (creates folders and places inputs corresponding to the original RefVSR code) HR_LR_path = "test/RealMCVSR/test/HR/UW/0000" HR_Ref_path = "test/RealMCVSR/test/HR/W/0000" HR_Ref_path_T = "test/RealMCVSR/test/HR/T/0000" LR_path = "test/RealMCVSR/test/LRx4/UW/0000" Ref_path = "test/RealMCVSR/test/LRx4/W/0000" Ref_path_T = "test/RealMCVSR/test/LRx4/T/0000" os.makedirs(LR_path) os.makedirs(Ref_path) os.makedirs(Ref_path_T) os.makedirs(HR_LR_path) os.makedirs(HR_Ref_path) os.makedirs(HR_Ref_path_T) os.makedirs('result') os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png") os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png") ## resize if necessary (not used) def resize(img): max_side = 512 w = img.size[0] h = img.size[1] if max(h, w) > max_side: scale_ratio = max_side / max(h, w) wsize=int(w*scale_ratio) hsize=int(h*scale_ratio) img = img.resize((wsize,hsize), Image.ANTIALIAS) return img ## inference def inference(LR, Ref): ## resize for user selected input (not used) #LR = resize(LR) #Ref = resize(Ref) ## Input setup (creates folders and places inputs corresponding to the original RefVSR code) LR.save(os.path.join(LR_path, '0000.png')) Ref.save(os.path.join(Ref_path, '0000.png')) Ref.save(os.path.join(Ref_path_T, '0000.png')) LR.save(os.path.join(HR_LR_path, '0000.png')) Ref.save(os.path.join(HR_Ref_path, '0000.png')) Ref.save(os.path.join(HR_Ref_path_T, '0000.png')) ## Run RefVSR model os.system("python -B run.py \ --mode amp_RefVSR_small_MFID_8K \ --config config_RefVSR_small_MFID \ --data RealMCVSR \ --ckpt_abs_name ckpt/RefVSR_small_MFID_8K.pytorch \ --data_offset ./test \ --output_offset ./result \ --qualitative_only \ --cpu \ --is_gradio") return "result/0000.png" title="RefVSR (under construction)" description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about 150s" article = "

To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.

This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.

The model is trained proposed two-stage training strategy, and the sample frames are in 430x270 resolution and saved in the PNG format.

Project | arXiv | Github

" ## resize for sample (not used) #LR = resize(Image.open('LR.png')).save('LR.png') #Ref = resize(Image.open('Ref.png')).save('Ref.png') ## input examples=[['LR.png', 'Ref.png']] ## interface gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)