|
import gradio as gr |
|
from transformers import TrOCRProcessor, VisionEncoderDecoderModel |
|
import requests |
|
from PIL import Image |
|
|
|
processor = TrOCRProcessor.from_pretrained("microsoft/trocr-base-handwritten") |
|
model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten") |
|
|
|
def process_image(image): |
|
pixel_values = processor(image, return_tensors="pt").pixel_values |
|
generated_ids = model.generate(pixel_values) |
|
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] |
|
return generated_text |
|
|
|
title = "Transforme(encoder-decoder) based Text OCR" |
|
description = "Demo for Microsoft's TrOCR, an encoder-decoder model \ |
|
consisting of an image Transformer encoder and a text Transformer \ |
|
decoder for state-of-the-art optical character recognition (OCR) on \ |
|
single-text line images. This particular model is fine-tuned on IAM, \ |
|
a dataset of annotated handwritten images." |
|
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2109.10282'>Transformer Optical Character Recognition with Pre-trained Models</a> | <a href='https://github.com/microsoft/unilm/tree/master/trocr'>Github Repo</a></p>" |
|
|
|
iface = gr.Interface(fn=process_image, |
|
inputs=gr.inputs.Image(type="pil"), |
|
outputs=gr.outputs.Textbox(), |
|
title=title, |
|
description=description, |
|
article=article) |
|
iface.launch(debug=False) |