minar09 commited on
Commit
a5b6e19
·
verified ·
1 Parent(s): acaf225

Delete main.py

Browse files
Files changed (1) hide show
  1. main.py +0 -188
main.py DELETED
@@ -1,188 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import logging
5
- from pathlib import Path
6
- from typing import List, Dict, Optional
7
- from dataclasses import dataclass
8
- from fastapi.encoders import jsonable_encoder
9
- import fitz # PyMuPDF
10
- from sentence_transformers import SentenceTransformer
11
- from llama_cpp import Llama
12
-
13
- logging.basicConfig(level=logging.INFO)
14
- logger = logging.getLogger(__name__)
15
-
16
-
17
- @dataclass
18
- class ProductSpec:
19
- name: str
20
- description: Optional[str] = None
21
- price: Optional[float] = None
22
- attributes: Dict[str, str] = None
23
- tables: List[Dict] = None
24
-
25
- def to_dict(self):
26
- return jsonable_encoder(self)
27
-
28
-
29
- class PDFProcessor:
30
- def __init__(self):
31
- self.emb_model = self._initialize_emb_model("all-MiniLM-L6-v2")
32
- # Choose the appropriate model filename below; adjust if needed.
33
- self.llm = self._initialize_llm("deepseek-llm-7b-base.Q2_K.gguf")
34
- self.output_dir = Path("./output")
35
- self.output_dir.mkdir(exist_ok=True)
36
-
37
- def _initialize_emb_model(self, model_name):
38
- try:
39
- # Use SentenceTransformer if available
40
- return SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
41
- except Exception as e:
42
- logger.warning(f"SentenceTransformer failed: {e}. Falling back to transformers model.")
43
- from transformers import AutoTokenizer, AutoModel
44
- tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/" + model_name)
45
- model = AutoModel.from_pretrained("sentence-transformers/" + model_name)
46
- return model
47
-
48
- def _initialize_llm(self, model_name):
49
- """Initialize LLM with automatic download if needed"""
50
- # Here we use from_pretrained so that if the model is missing locally it downloads it.
51
- return Llama.from_pretrained(
52
- repo_id="TheBloke/deepseek-llm-7B-base-GGUF",
53
- filename=model_name,
54
- )
55
-
56
- def process_pdf(self, pdf_path: str) -> Dict:
57
- """Process PDF using PyMuPDF"""
58
- start_time = time.time()
59
-
60
- # Open PDF
61
- try:
62
- doc = fitz.open(pdf_path)
63
- except Exception as e:
64
- logger.error(f"Failed to open PDF: {e}")
65
- raise RuntimeError("Cannot open PDF file.") from e
66
-
67
- text_blocks = []
68
- tables = []
69
-
70
- # Extract text and tables from each page
71
- for page_num, page in enumerate(doc):
72
- # Extract text blocks from page and filter out very short blocks (noise)
73
- blocks = self._extract_text_blocks(page)
74
- filtered = [block for block in blocks if len(block.strip()) >= 10]
75
- logger.debug(f"Page {page_num + 1}: Extracted {len(blocks)} blocks, {len(filtered)} kept after filtering.")
76
- text_blocks.extend(filtered)
77
-
78
- # Extract tables (if any)
79
- tables.extend(self._extract_tables(page, page_num))
80
-
81
- # Process text blocks with LLM to extract product information
82
- products = []
83
- for idx, block in enumerate(text_blocks):
84
- # Log the text block for debugging
85
- logger.debug(f"Processing text block {idx}: {block[:100]}...")
86
- product = self._process_text_block(block)
87
- if product:
88
- product.tables = tables
89
- # Only add if at least one key (like name) is non-empty
90
- if product.name or product.description or product.price or (
91
- product.attributes and len(product.attributes) > 0):
92
- products.append(product.to_dict())
93
- else:
94
- logger.debug(f"LLM returned empty product for block {idx}.")
95
- else:
96
- logger.debug(f"No product extracted from block {idx}.")
97
-
98
- logger.info(f"Processed {len(products)} products in {time.time() - start_time:.2f}s")
99
- return {"products": products, "tables": tables}
100
-
101
- def _extract_text_blocks(self, page) -> List[str]:
102
- """Extract text blocks from a PDF page using PyMuPDF's blocks method."""
103
- blocks = []
104
- for block in page.get_text("blocks"):
105
- # block[4] contains the text content
106
- text = block[4].strip()
107
- if text:
108
- blocks.append(text)
109
- return blocks
110
-
111
- def _extract_tables(self, page, page_num: int) -> List[Dict]:
112
- """Extract tables from a PDF page using PyMuPDF's table extraction (if available)."""
113
- tables = []
114
- try:
115
- tab = page.find_tables()
116
- if tab and hasattr(tab, 'tables') and tab.tables:
117
- for table in tab.tables:
118
- table_data = table.extract()
119
- if table_data:
120
- tables.append({
121
- "page": page_num + 1,
122
- "cells": table_data,
123
- "header": table.header.names if table.header else [],
124
- "content": table_data
125
- })
126
- except Exception as e:
127
- logger.warning(f"Error extracting tables from page {page_num + 1}: {e}")
128
- return tables
129
-
130
- def _process_text_block(self, text: str) -> Optional[ProductSpec]:
131
- """Process a text block with LLM to extract product specifications."""
132
- prompt = self._generate_query_prompt(text)
133
- logger.debug(f"Generated prompt: {prompt[:200]}...")
134
- try:
135
- response = self.llm.create_chat_completion(
136
- messages=[{"role": "user", "content": prompt}],
137
- temperature=0.1,
138
- max_tokens=512
139
- )
140
- # Debug: log raw response
141
- logger.debug(f"LLM raw response: {response}")
142
- return self._parse_response(response['choices'][0]['message']['content'])
143
- except Exception as e:
144
- logger.warning(f"Error processing text block: {e}")
145
- return None
146
-
147
- def _generate_query_prompt(self, text: str) -> str:
148
- """Generate a prompt instructing the LLM to extract product information."""
149
- return f"""Extract product specifications from the following text. If no product is found, return an empty JSON object with keys.\n\nText:\n{text}\n\nReturn JSON format exactly as:\n{{\n \"name\": \"product name\",\n \"description\": \"product description\",\n \"price\": numeric_price,\n \"attributes\": {{ \"key\": \"value\" }}\n}}"""
150
-
151
- def _parse_response(self, response: str) -> Optional[ProductSpec]:
152
- """Parse the LLM's response to extract a product specification."""
153
- try:
154
- json_start = response.find('{')
155
- json_end = response.rfind('}') + 1
156
- json_str = response[json_start:json_end].strip()
157
- if not json_str:
158
- raise ValueError("No JSON content found in response.")
159
- data = json.loads(json_str)
160
- # If the returned JSON is essentially empty, return None
161
- if all(not data.get(key) for key in ['name', 'description', 'price', 'attributes']):
162
- return None
163
- return ProductSpec(
164
- name=data.get('name', ''),
165
- description=data.get('description'),
166
- price=data.get('price'),
167
- attributes=data.get('attributes', {})
168
- )
169
- except (json.JSONDecodeError, KeyError, ValueError) as e:
170
- logger.warning(f"Parse error: {e} in response: {response}")
171
- return None
172
-
173
-
174
- def process_pdf_catalog(pdf_path: str):
175
- processor = PDFProcessor()
176
- try:
177
- result = processor.process_pdf(pdf_path)
178
- return result, "Processing completed successfully!"
179
- except Exception as e:
180
- logger.error(f"Processing failed: {e}")
181
- return {}, "Error processing PDF"
182
-
183
-
184
- if __name__ == "__main__":
185
- # Example usage: change this if you call process_pdf_catalog elsewhere
186
- pdf_path = "path/to/your/pdf_file.pdf"
187
- result, message = process_pdf_catalog(pdf_path)
188
- print(result, message)