Spaces:
Runtime error
Runtime error
File size: 2,565 Bytes
d75bd2b 7f8d20e 06dd79d c1d626a 06dd79d d75bd2b 7f8d20e d75bd2b dfdc13f 14c4d61 06dd79d 80940cb 7f8d20e 80940cb 06dd79d d75bd2b 7f8d20e d75bd2b 14c4d61 d75bd2b 7f8d20e d75bd2b 7f8d20e d75bd2b 7f8d20e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
import pandas as pd
import PIL
from PIL import Image
from PIL import ImageDraw
import gradio as gr
import torch
import easyocr
import webbrowser
torch.hub.download_url_to_file('https://raw.githubusercontent.com/AaronCWacker/Yggdrasil/master/images/20-Books.jpg','20-Books.jpg')
torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/english.png', 'COVID.png')
torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/chinese.jpg', 'chinese.jpg')
torch.hub.download_url_to_file('https://github.com/JaidedAI/EasyOCR/raw/master/examples/japanese.jpg', 'japanese.jpg')
torch.hub.download_url_to_file('https://i.imgur.com/mwQFd7G.jpeg', 'Hindi.jpeg')
def draw_boxes(image, bounds, color='yellow', width=2):
draw = ImageDraw.Draw(image)
for bound in bounds:
p0, p1, p2, p3 = bound[0]
draw.line([*p0, *p1, *p2, *p3, *p0], fill=color, width=width)
return image
def inference(img, lang):
reader = easyocr.Reader(lang)
bounds = reader.readtext(img.name)
im = PIL.Image.open(img.name)
im_with_boxes = draw_boxes(im, bounds)
im_with_boxes.save('result.jpg')
dataframe = pd.DataFrame(bounds, columns=['text', 'confidence'])
dataframe['link'] = dataframe['text'].apply(lambda x: f"<a href='https://en.wikipedia.org/wiki/{x.replace(' ', '_')}'>{x}</a>")
return ['result.jpg', dataframe[['link', 'confidence']]]
title = '🖼️Image to Multilingual OCR👁️Gradio'
description = 'Multilingual OCR which works conveniently on all devices in multiple languages.'
article = "<p style='text-align: center'></p>"
examples = [
['20-Books.jpg',['en']],
['COVID.png',['en']],
['chinese.jpg',['ch_sim', 'en']],
['japanese.jpg',['ja', 'en']],
['Hindi.jpeg',['hi', 'en']]
]
css = ".output_image, .input_image {height: 40rem !important; width: 100% !important;}"
choices = [
"ch_sim",
"ch_tra",
"de",
"en",
"es",
"ja",
"hi",
"ru"
]
def open_link(link):
webbrowser.open_new_tab(link)
output = gr.outputs.Dataframe(headers=['text', 'confidence'])
gr.Interface(
inference,
[gr.inputs.Image(type='file', label='Input'),gr.inputs.CheckboxGroup(choices, type="value", default=['en'], label='language')],
[gr.outputs.Image(type='file', label='Output'), output],
title=title,
description=description,
article=article,
examples=examples,
css=css,
enable_queue=True
).launch(debug=True)
# Launch webbrowser on clicking the link in dataframe
output.df_click = open_link
|