profchaos commited on
Commit
80a164c
·
verified ·
1 Parent(s): e799756

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -0
app.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import libraries
2
+ import cv2
3
+ from PIL import Image
4
+ from transformers import AutoProcessor, Qwen2VLForConditionalGeneration
5
+ import torch
6
+ from byaldi import RAGMultiModalModel
7
+ #from google.colab import files
8
+ from IPython.display import display, HTML
9
+ import os
10
+ import re
11
+
12
+ # to detect cuda(GPU)
13
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
14
+ print("Using device:", device)
15
+
16
+ #loading models
17
+ RAG = RAGMultiModalModel.from_pretrained("vidore/colpali", verbose=0)
18
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
19
+ "Qwen/Qwen2-VL-2B-Instruct",
20
+ torch_dtype=torch.float16,
21
+ device_map="auto"
22
+ )
23
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
24
+
25
+ torch.cuda.empty_cache()
26
+
27
+ #Upload image
28
+ # def upload_image():
29
+ # uploaded = files.upload()
30
+ # for filename in uploaded.keys():
31
+ # print(f'Uploaded file: {filename}')
32
+ # return filename
33
+
34
+ # image_path = upload_image()
35
+
36
+ # Preprocessing using OpenCV
37
+ def preprocess_image(image_path):
38
+ image = cv2.imread(image_path)
39
+ if image is None:
40
+ raise FileNotFoundError(f"Image not found at the path: {image_path}")
41
+
42
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
43
+
44
+ # Maintain aspect ratio
45
+ height, width = gray.shape
46
+ if height > width:
47
+ new_height = 1024
48
+ new_width = int((width / height) * new_height)
49
+ else:
50
+ new_width = 1024
51
+ new_height = int((height / width) * new_width)
52
+
53
+ resized_image = cv2.resize(gray, (new_width, new_height))
54
+
55
+ blurred = cv2.GaussianBlur(resized_image, (5, 5), 0)
56
+ thresholded = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
57
+ denoised = cv2.fastNlMeansDenoising(thresholded, h=30)
58
+ pil_image = Image.fromarray(denoised)
59
+
60
+ return pil_image
61
+
62
+ # Call the function and store the result
63
+ # pil_image = preprocess_image(image_path)
64
+
65
+ # display(pil_image) # Now pil_image is accessible here
66
+
67
+ #extract the text
68
+ def extract_text(image_path):
69
+ try:
70
+ processed_image = preprocess_image(image_path)
71
+ messages = [
72
+ {"role": "user", "content": [{"type": "image"}, {"type": "text", "text": "PLease extract the both hindi and english text as they appear in the image"}]}
73
+ ]
74
+ text_prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
75
+ inputs = processor(text=[text_prompt], images=[processed_image], padding=True, return_tensors="pt").to(device)
76
+ output_ids = model.generate(**inputs, max_new_tokens=1042)
77
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
78
+ extracted_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)[0]
79
+ return extracted_text
80
+ except Exception as e:
81
+ return f"An error occurred during text extraction: {e}"
82
+
83
+ #keyword searching
84
+ def keyword_search(extracted_text, keywords):
85
+ if not keywords:
86
+ return extracted_text, "Please enter a keyword to search and highlight."
87
+ keywords = [keyword.strip() for keyword in keywords.split(",") if keyword.strip()]
88
+
89
+ highlighted_text = ""
90
+
91
+ lines = extracted_text.split('\n')
92
+ for line in lines:
93
+ for keyword in keywords:
94
+ pattern = re.compile(re.escape(keyword), re.IGNORECASE)
95
+ line = pattern.sub(lambda m: f'<span style="color: red;">{m.group()}</span>', line)
96
+ highlighted_text += line + '\n'
97
+ return highlighted_text
98
+
99
+ #OCR and keyword search interface
100
+ def ocr_interface(image):
101
+ image_path = "temp_image.png"
102
+ image.save(image_path)
103
+ extracted_text = extract_text(image_path)
104
+ os.remove(image_path)
105
+
106
+ return extracted_text, ""
107
+ def keyword_interface(extracted_text, keywords):
108
+ highlighted_text = keyword_search(extracted_text, keywords)
109
+ return highlighted_text
110
+
111
+ # Function to launch the Gradio interface
112
+ import gradio as gr
113
+ def launch_gradio():
114
+ with gr.Blocks() as interface:
115
+ with gr.Row():
116
+ with gr.Column():
117
+ image_input = gr.Image(type="pil", label="Upload Image for OCR")
118
+
119
+ with gr.Column():
120
+ extracted_text = gr.Textbox(label="Extracted Text", lines=10, interactive=False)
121
+
122
+ keywords = gr.Textbox(label="Enter Keywords (comma-separated)", interactive=True)
123
+ highlighted_text = gr.HTML(label="Highlighted Text")
124
+
125
+ extract_btn = gr.Button("Extract Text")
126
+ extract_btn.click(fn=ocr_interface, inputs=image_input, outputs=[extracted_text, highlighted_text])
127
+
128
+ keyword_btn = gr.Button("Search & Highlight Keywords")
129
+ keyword_btn.click(fn=keyword_interface, inputs=[extracted_text, keywords], outputs=highlighted_text)
130
+
131
+ interface.launch()
132
+
133
+ if __name__ == "__main__":
134
+ launch_gradio()