UniquePratham commited on
Commit
9919fac
·
verified ·
1 Parent(s): 3cb2a3f

Update ocr_cpu.py

Browse files
Files changed (1) hide show
  1. ocr_cpu.py +27 -63
ocr_cpu.py CHANGED
@@ -1,13 +1,11 @@
1
  import os
2
  from transformers import AutoModel, AutoTokenizer
3
  import torch
 
4
 
5
  # Load model and tokenizer
6
- # model_name = "ucaslcl/GOT-OCR2_0"
7
- model_name = "srimanth-d/GOT_CPU"
8
- tokenizer = AutoTokenizer.from_pretrained(
9
- model_name, trust_remote_code=True, return_tensors='pt'
10
- )
11
 
12
  # Load the model
13
  model = AutoModel.from_pretrained(
@@ -20,84 +18,35 @@ model = AutoModel.from_pretrained(
20
 
21
  # Ensure the model is in evaluation mode and loaded on CPU
22
  device = torch.device("cpu")
23
- dtype = torch.float32 # Use float32 on CPU
24
  model = model.eval()
25
 
26
- # OCR function
27
  def extract_text_got(uploaded_file):
28
  """Use GOT-OCR2.0 model to extract text from the uploaded image."""
29
  temp_file_path = 'temp_image.jpg'
30
-
31
  try:
32
  # Save the uploaded file temporarily
33
  with open(temp_file_path, 'wb') as temp_file:
34
  temp_file.write(uploaded_file.read())
35
 
36
- print(f"Processing image from path: {temp_file_path}") # Debug info
37
 
38
  ocr_types = ['ocr', 'format']
39
- fine_grained_options = ['ocr', 'format']
40
- color_options = ['red', 'green', 'blue']
41
- box = [10, 10, 100, 100] # Example box for demonstration
42
- multi_crop_types = ['ocr', 'format']
43
-
44
  results = []
45
 
46
- # Run basic OCR types
47
  for ocr_type in ocr_types:
48
  with torch.no_grad():
49
- print(f"Running basic OCR with type: {ocr_type}") # Debug info
50
  outputs = model.chat(tokenizer, temp_file_path, ocr_type=ocr_type)
51
-
52
- # Debug outputs
53
- print(f"Outputs for {ocr_type}: {outputs}")
54
-
55
- if isinstance(outputs, list) and outputs[0].strip():
56
- return outputs[0].strip() # Return if successful
57
- results.append(outputs[0].strip() if outputs else "No result")
58
 
59
- # Try FINE-GRAINED OCR with box options
60
- for ocr_type in fine_grained_options:
61
- with torch.no_grad():
62
- print(f"Running fine-grained OCR with box, type: {ocr_type}") # Debug info
63
- outputs = model.chat(tokenizer, temp_file_path, ocr_type=ocr_type, ocr_box=box)
64
-
65
- print(f"Outputs for {ocr_type} with box: {outputs}")
66
-
67
  if isinstance(outputs, list) and outputs[0].strip():
68
- return outputs[0].strip() # Return if successful
69
  results.append(outputs[0].strip() if outputs else "No result")
70
 
71
- # Try FINE-GRAINED OCR with color options
72
- for ocr_type in fine_grained_options:
73
- for color in color_options:
74
- with torch.no_grad():
75
- print(f"Running fine-grained OCR with color {color}, type: {ocr_type}") # Debug info
76
- outputs = model.chat(tokenizer, temp_file_path, ocr_type=ocr_type, ocr_color=color)
77
-
78
- print(f"Outputs for {ocr_type} with color {color}: {outputs}")
79
-
80
- if isinstance(outputs, list) and outputs[0].strip():
81
- return outputs[0].strip() # Return if successful
82
- results.append(outputs[0].strip() if outputs else "No result")
83
-
84
- # Try MULTI-CROP OCR
85
- for ocr_type in multi_crop_types:
86
- with torch.no_grad():
87
- print(f"Running multi-crop OCR with type: {ocr_type}") # Debug info
88
- outputs = model.chat_crop(tokenizer, temp_file_path, ocr_type=ocr_type)
89
-
90
- print(f"Outputs for multi-crop {ocr_type}: {outputs}")
91
-
92
- if isinstance(outputs, list) and outputs[0].strip():
93
- return outputs[0].strip() # Return if successful
94
- results.append(outputs[0].strip() if outputs else "No result")
95
-
96
- # Return combined results or no text found message
97
- if all(not text for text in results):
98
- return "No text extracted."
99
- else:
100
- return "\n".join(results)
101
 
102
  except Exception as e:
103
  return f"Error during text extraction: {str(e)}"
@@ -106,4 +55,19 @@ def extract_text_got(uploaded_file):
106
  # Clean up temporary file
107
  if os.path.exists(temp_file_path):
108
  os.remove(temp_file_path)
109
- print(f"Temporary file {temp_file_path} removed.") # Debug info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  from transformers import AutoModel, AutoTokenizer
3
  import torch
4
+ import re
5
 
6
  # Load model and tokenizer
7
+ model_name = "srimanth-d/GOT_CPU" # Using GOT model on CPU
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, return_tensors='pt')
 
 
 
9
 
10
  # Load the model
11
  model = AutoModel.from_pretrained(
 
18
 
19
  # Ensure the model is in evaluation mode and loaded on CPU
20
  device = torch.device("cpu")
 
21
  model = model.eval()
22
 
23
+ # OCR function to extract text
24
  def extract_text_got(uploaded_file):
25
  """Use GOT-OCR2.0 model to extract text from the uploaded image."""
26
  temp_file_path = 'temp_image.jpg'
27
+
28
  try:
29
  # Save the uploaded file temporarily
30
  with open(temp_file_path, 'wb') as temp_file:
31
  temp_file.write(uploaded_file.read())
32
 
33
+ print(f"Processing image from path: {temp_file_path}")
34
 
35
  ocr_types = ['ocr', 'format']
 
 
 
 
 
36
  results = []
37
 
38
+ # Run OCR on the image
39
  for ocr_type in ocr_types:
40
  with torch.no_grad():
41
+ print(f"Running OCR with type: {ocr_type}")
42
  outputs = model.chat(tokenizer, temp_file_path, ocr_type=ocr_type)
 
 
 
 
 
 
 
43
 
 
 
 
 
 
 
 
 
44
  if isinstance(outputs, list) and outputs[0].strip():
45
+ return outputs[0].strip() # Return the result if successful
46
  results.append(outputs[0].strip() if outputs else "No result")
47
 
48
+ # Combine results or return no text found message
49
+ return results[0] if results else "No text extracted."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  except Exception as e:
52
  return f"Error during text extraction: {str(e)}"
 
55
  # Clean up temporary file
56
  if os.path.exists(temp_file_path):
57
  os.remove(temp_file_path)
58
+ print(f"Temporary file {temp_file_path} removed.")
59
+
60
+ # Function to clean extracted text (removes extra spaces and handles special cases for Hindi and English)
61
+ def clean_text(extracted_text):
62
+ """
63
+ Cleans extracted text by removing extra spaces and handling language-specific issues (Hindi, English, Hinglish).
64
+ """
65
+ # Normalize spaces (remove multiple spaces)
66
+ text = re.sub(r'\s+', ' ', extracted_text)
67
+
68
+ # Handle special cases based on Hindi, English, and Hinglish patterns
69
+ text = re.sub(r'([a-zA-Z]+)\s+([a-zA-Z]+)', r'\1 \2', text) # For English
70
+ text = re.sub(r'([ा-ह]+)\s+([ा-ह]+)', r'\1\2', text) # For Hindi (conjoining Devanagari characters)
71
+
72
+ # Remove trailing and leading spaces
73
+ return text.strip()