tejastake commited on
Commit
3bb0526
·
verified ·
1 Parent(s): 8034de8

Upload 5 files

Browse files
Files changed (5) hide show
  1. Dockerfile +20 -0
  2. app.py +73 -0
  3. process_img.py +48 -0
  4. requirements.txt +6 -0
  5. vector_emb.py +115 -0
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /app
4
+
5
+ COPY . /app
6
+
7
+ RUN pip3 install fastapi uvicorn transformers==4.42.3 pillow protobuf==4.25.3 fastapi-health pinecone_text
8
+
9
+ RUN useradd -m -u 1000 user
10
+
11
+ USER user
12
+
13
+ ENV HOME=/home/user \
14
+ PATH=/home/user/.local/bin:$PATH
15
+
16
+ WORKDIR $HOME/app
17
+
18
+ COPY --chown=user . $HOME/app
19
+
20
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Depends, HTTPException
2
+ from pydantic import BaseModel
3
+ from fastapi_health import health
4
+ from PIL import Image
5
+ import logging
6
+ import sys
7
+ from io import BytesIO
8
+ import base64
9
+
10
+ from process_img import Image_Processor
11
+ from vector_emb import EmbeddingModels
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+ logging.basicConfig(
16
+ level=logging.getLevelName("INFO"),
17
+ handlers=[logging.StreamHandler(sys.stdout)],
18
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
19
+ logging.info('Logging module started')
20
+
21
+ def get_session():
22
+ return True
23
+
24
+ def is_database_online(session: bool = Depends(get_session)):
25
+ return session
26
+
27
+ app = FastAPI()
28
+ app.add_api_route("/healthz", health([is_database_online]))
29
+
30
+
31
+
32
+ model = EmbeddingModels()
33
+ img_Processor = Image_Processor()
34
+
35
+ class ImageBase64(BaseModel):
36
+ base64_string: str
37
+
38
+ class TextInput(BaseModel):
39
+ text: str
40
+
41
+ @app.post("/design-dense/")
42
+ async def embed_image(data: ImageBase64):
43
+ base64_string = data.base64_string
44
+ image_data = base64.b64decode(base64_string)
45
+ image = Image.open(BytesIO(image_data))
46
+ final_image = img_Processor.get_processed_img(image)
47
+ embeddings = model.get_single_image_embedding(final_image)
48
+ return embeddings
49
+
50
+
51
+ @app.post("/sparse/")
52
+ async def embed_text(item: TextInput):
53
+ try:
54
+ logging.info(f'Received text for embedding: {item.text}')
55
+ embeddings = model.get_single_sparse_text_embedding(item.text)
56
+ logging.info('Embedding process completed')
57
+ return embeddings
58
+ except Exception as e:
59
+ logging.error(f'Error during embedding process: {e}')
60
+ raise HTTPException(status_code=500, detail=str(e))
61
+
62
+
63
+ @app.post("/design-sparse/")
64
+ async def embed_text(item: TextInput):
65
+ try:
66
+ logging.info(f'Received text for embedding: {item.text}')
67
+ embeddings = model.get_single_sparse_text_embedding(item.text)
68
+ embeddings = model.normalize_sparse_vector_values(embeddings)
69
+ logging.info('Embedding process completed')
70
+ return embeddings
71
+ except Exception as e:
72
+ logging.error(f'Error during embedding process: {e}')
73
+ raise HTTPException(status_code=500, detail=str(e))
process_img.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from PIL import Image, ImageOps
3
+ import logging
4
+
5
+ class Image_Processor:
6
+ def __init__(self):
7
+ pass
8
+ def is_image_white_by_percentage(self,image_path, white_threshold):
9
+ image = image_path.convert('RGB')
10
+ image_np = np.array(image)
11
+ white_pixel = np.array([255, 255, 255])
12
+ white_pixels_count = np.sum(np.all(image_np == white_pixel, axis=-1))
13
+ total_pixels = image_np.shape[0] * image_np.shape[1]
14
+ white_pixel_percentage = (white_pixels_count / total_pixels) * 100
15
+ return white_pixel_percentage > white_threshold
16
+
17
+ def padding_white(self,image, output_size=(224, 224)):
18
+ # Ensure image is in RGB mode before padding
19
+ if image.mode != 'RGB':
20
+ image = image.convert('RGB')
21
+ new_image = ImageOps.pad(image, output_size, method=Image.Resampling.LANCZOS, color=(255, 255, 255))
22
+ return new_image
23
+
24
+ def resize_image_with_aspect_ratio(self,img):
25
+ target_size=224
26
+ width, height = img.size
27
+ original_aspect_ratio = width / height
28
+ if width > height:
29
+ new_width = target_size
30
+ new_height = int(target_size / original_aspect_ratio)
31
+ else:
32
+ new_height = target_size
33
+ new_width = int(target_size * original_aspect_ratio)
34
+ resized_img = img.resize((new_width, new_height))
35
+ return resized_img
36
+
37
+ def get_processed_img(self,image):
38
+ white_thresh = self.is_image_white_by_percentage(image,50)
39
+ if white_thresh == True:
40
+ resized_image = self.resize_image_with_aspect_ratio(image)
41
+ final_image = self.padding_white(resized_image)
42
+ logging.info('Resized and Padded Image')
43
+ else:
44
+ final_image = self.resize_image_with_aspect_ratio(image)
45
+ logging.info('Resized Image')
46
+
47
+ final_image = final_image.convert('L') if final_image.mode != 'L' else final_image
48
+ return final_image
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ pillow
4
+ torch
5
+ transformers
6
+ fastapi-health
vector_emb.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from pinecone_text.sparse import SpladeEncoder
3
+ import re
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from transformers import CLIPModel, CLIPProcessor, CLIPTokenizer
7
+ import logging
8
+
9
+ class EmbeddingModels:
10
+ def __init__(self, device="cuda" if torch.cuda.is_available() else "cpu"):
11
+ self.device = device
12
+ logging.info(f'Using Device {self.device}')
13
+ self.sparse_model = SpladeEncoder(device=self.device)
14
+ self.img_model_ID = "openai/clip-vit-large-patch14"
15
+ self.img_model, self.img_processor, self.img_tokenizer = self.get_image_model_info(self.img_model_ID)
16
+ logging.info("Model Loaded")
17
+
18
+ def get_image_model_info(self, model_ID):
19
+ model = CLIPModel.from_pretrained(model_ID).to(self.device)
20
+ processor = CLIPProcessor.from_pretrained(model_ID)
21
+ tokenizer = CLIPTokenizer.from_pretrained(model_ID)
22
+ return model, processor, tokenizer
23
+
24
+ def get_single_image_embedding(self, my_image):
25
+ image = self.img_processor(
26
+ text=None,
27
+ images=my_image,
28
+ return_tensors="pt"
29
+ )["pixel_values"].to(self.device)
30
+
31
+ embedding = self.img_model.get_image_features(image)
32
+ logging.info("Embeddings Created")
33
+ embeddings = F.normalize(embedding, p=2, dim=1)
34
+ logging.info("Embeddings Normalized")
35
+ values = embeddings[0].tolist()
36
+ return values
37
+
38
+ def preprocessing_patent_data(self,text):
39
+ # Removing Common tags in patent
40
+ pattern0 = r'\b(SUBSTITUTE SHEET RULE 2 SUMMARY OF THE INVENTION|BRIEF DESCRIPTION OF PREFERRED EMBODIMENTS|BRIEF DESCRIPTION OF THE DRAWINGS/FIGURES|BEST MODE FOR CARRYING OUT THE INVENTION|BACKGROUND AND SUMMARY OF THE INVENTION|FIELD AND BACKGROUND OF THE INVENTION|BACKGROUND OF THE PRESENT INVENTION|FIELD AND BACKGROUND OF INVENTION|STAND DER TECHNIK- BACKGROUND ART|BRIEF DESCRIPTION OF THE DRAWINGS|DESCRIPTION OF THE RELATED ART|BRIEF SUMMARY OF THE INVENTION|UTILITY MODEL CLAIMS A CONTENT|DESCRIPTION OF BACKGROUND ART|BRIEF DESCRIPTION OF DRAWINGS|BACKGROUND OF THE INVENTION|BACKGROUND TO THE INVENTION|TÉCNICA ANTERIOR- PRIOR ART|DISCLOSURE OF THE INVENTION|BRIEF SUMMARY OF INVENTION|BACKGROUND OF RELATED ART|SUMMARY OF THE DISCLOSURE|SUMMARY OF THE INVENTIONS|SUMMARY OF THE INVENTION|OBJECTS OF THE INVENTION|THE CONTENT OF INVENTION|DISCLOSURE OF INVENTION|Disclosure of Invention|Complete Specification|RELATED BACKGROUND ART|BACKGROUND INFORMATION|BACKGROUND TECHNOLOGY|DETAILED DESCRIPTION|SUMMARY OF INVENTION|DETAILED DESCRIPTION|PROBLEM TO BE SOLVED|EFFECT OF INVENTION|WHAT IS CLAIMED IS|What is claimed is|What is Claim is|SUBSTITUTE SHEET|SELECTED DRAWING|BACK GROUND ART|BACKGROUND ART|Background Art|JPO&INPIT|CONSTITUTION|DEFINITIONS|Related Art|BACKGROUND|JPO&INPIT|JPO&NCIPI|COPYRIGHT|SOLUTION|SUMMARY)\b'
41
+ text = re.sub(pattern0, '[SEP]', text, flags=re.IGNORECASE)
42
+ text = ' '.join(text.split())
43
+ # Removing all tags between Heading to /Heading and id=
44
+ regex = r'<\s*heading[^>]*>(.*?)<\s*/\s*heading>|<[^<]+>|id=\"p-\d+\"|:'
45
+ result = re.sub(regex, '[SEP]', text, flags=re.IGNORECASE)
46
+ # find_formula_names from pat text to exclude it from below logic regex
47
+ chemical_list = []
48
+ pattern1 = r'\b((?:(?:H|He|Li|Be|B|C|N|O|F|Ne|Na|Mg|Al|Si|P|S|Cl|Ar|K|Ca|Sc|Ti|V|Cr|Mn|Fe|Co|Ni|Cu|Zn|Ga|Ge|As|Se|Br|Kr|Rb|Sr|Y|Zr|Nb|Mo|Tc|Ru|Rh|Pd|Ag|Cd|In|Sn|Sb|Te|I|Xe|Cs|Ba|La|Hf|Ta|W|Re|Os|Ir|Pt|Au|Hg|Tl|Pb|Bi|Po|At|Rn|Fr|Ra|Ac|Rf|Db|Sg|Bh|Hs|Mt|Ds|Rg|Cn|Nh|Fl|Mc|Lv|Ts|Og|Ce|Pr|Nd|Pm|Sm|Eu|Gd|Tb|Dy|Ho|Er|Tm|Yb|Lu|Th|Pa|U|Np|Pu|Am|Cm|Bk|Cf|Es|Fm|Md|No|Lr)\d*)+)\b'
49
+
50
+ formula_names = re.findall(pattern1, result)
51
+ for formula in formula_names:
52
+ if len(formula)>=2:
53
+ chemical_list.append(formula)
54
+ # print("chemical_list:", chemical_list)
55
+
56
+ # Remove numbers and alphanum inside brackets excluding chemical forms
57
+ pattern2 = r"\((?![A-Za-z]+\))[\w\d\s,-]+\)|\([A-Za-z]\)"
58
+ def keep_strings(text):
59
+ matched = text.group(0)
60
+ if any(item in matched for item in chemical_list):
61
+ return matched
62
+ return ' '
63
+ cleaned_text = re.sub(pattern2, keep_strings, result)
64
+ cleaned_text = ' '.join(cleaned_text.split())
65
+ cleaned_text= re.sub("(\[SEP\]+\s*)+", ' ', cleaned_text, flags=re.IGNORECASE)
66
+ # below new logic to remove chemical compounds (eg.chemical- polymerizable compounds)
67
+ p_text2=re.sub('[\—\-\═\=]', ' ', cleaned_text)
68
+ pattern1 = r'\b((?:(?:H|He|Li|Be|B|C|N|O|F|Ne|Na|Mg|Al|Si|P|S|Cl|Ar|K|Ca|Sc|Ti|V|Cr|Mn|Fe|Co|Ni|Cu|Zn|Ga|Ge|As|Se|Br|Kr|Rb|Sr|Y|Zr|Nb|Mo|Tc|Ru|Rh|Pd|Ag|Cd|In|Sn|Sb|Te|I|Xe|Cs|Ba|La|Hf|Ta|W|Re|Os|Ir|Pt|Au|Hg|Tl|Pb|Bi|Po|At|Rn|Fr|Ra|Ac|Rf|Db|Sg|Bh|Hs|Mt|Ds|Rg|Cn|Nh|Fl|Mc|Lv|Ts|Og|Ce|Pr|Nd|Pm|Sm|Eu|Gd|Tb|Dy|Ho|Er|Tm|Yb|Lu|Th|Pa|U|Np|Pu|Am|Cm|Bk|Cf|Es|Fm|Md|No|Lr)\d*)+)\b'
69
+ cleaned_text = re.sub(pattern1, "", p_text2)
70
+ cleaned_text = re.sub(' ,+|, +', ' ', cleaned_text)
71
+ cleaned_text = re.sub(' +', ' ', cleaned_text)
72
+ cleaned_text = re.sub('\.+', '.', cleaned_text)
73
+ cleaned_text = re.sub('[0-9] [0-9] +', ' ', cleaned_text)
74
+ cleaned_text = re.sub('( )', ' ', cleaned_text)
75
+ cleaned_text=cleaned_text.strip()
76
+ return cleaned_text
77
+
78
+ def get_single_sparse_text_embedding(self, df_chunk):
79
+ df_chunk = self.preprocessing_patent_data(df_chunk)
80
+ txt_sp = self.sparse_model.encode_documents(df_chunk)
81
+
82
+ # tensor = torch.tensor(txt_sp['values'])
83
+ # normalized_tensor = F.normalize(tensor, p=2.0, dim=0, eps=1e-12)
84
+ # values = normalized_tensor.tolist()
85
+
86
+ # # Update the sparse_vector with normalized values
87
+ # normalized_sparse_vector = {
88
+ # 'indices': txt_sp['indices'],
89
+ # 'values': values
90
+ # }
91
+ return txt_sp
92
+
93
+ def normalize_sparse_vector_values(self,sparse_vector):
94
+ """
95
+ Normalize the values of a sparse vector to a 0-1 range using min-max scaling,
96
+ considering a known range of sparse scores.
97
+ Args:
98
+ sparse_vector: A dict representing a sparse vector with 'indices' and 'values'
99
+ min_score: The minimum score in the range of sparse scores (default is 0)
100
+ max_score: The maximum score in the range of sparse scores (default is 6000)
101
+ Returns:
102
+ A dict representing the sparse vector with normalized 'values'.
103
+ """
104
+ # normalized_values = [(value - min_score) / (max_score - min_score) for value in sparse_vector['values']]
105
+ self.tensor = torch.tensor(sparse_vector['values'])
106
+ self.normalized_tensor = F.normalize(self.tensor, p=2.0, dim=0, eps=1e-12)
107
+ values = self.normalized_tensor.tolist()
108
+
109
+ # Update the sparse_vector with normalized values
110
+ self.normalized_sparse_vector = {
111
+ 'indices': sparse_vector['indices'],
112
+ 'values': values
113
+ }
114
+ return self.normalized_sparse_vector
115
+