qtAnswering / app.py
ikraamkb's picture
Update app.py
4c11732 verified
raw
history blame
4.74 kB
from fastapi import FastAPI, File, UploadFile
import fitz # PyMuPDF for PDF parsing
from tika import parser # Apache Tika for document parsing
import openpyxl
from pptx import Presentation
import torch
from PIL import Image
from transformers import pipeline
import gradio as gr
import numpy as np
import easyocr
# Initialize FastAPI (not needed for HF Spaces, but kept for flexibility)
app = FastAPI()
print(f"πŸ”„ Loading models")
doc_qa_pipeline = pipeline("text2text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", device=-1)
image_captioning_pipeline = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
print("βœ… Models loaded")
# Initialize OCR Model (CPU Mode)
reader = easyocr.Reader(["en"], gpu=False)
# Allowed File Extensions
ALLOWED_EXTENSIONS = {"pdf", "docx", "pptx", "xlsx"}
def validate_file_type(file):
ext = file.filename.split(".")[-1].lower()
print(f"πŸ” Validating file type: {ext}")
if ext not in ALLOWED_EXTENSIONS:
return f"❌ Unsupported file format: {ext}"
return None
# Function to truncate text to 450 tokens
def truncate_text(text, max_tokens=450):
words = text.split()
truncated = " ".join(words[:max_tokens])
print(f"βœ‚οΈ Truncated text to {max_tokens} tokens.")
return truncated
# Document Text Extraction Functions
def extract_text_from_pdf(pdf_bytes):
try:
print("πŸ“„ Extracting text from PDF...")
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
text = "\n".join([page.get_text("text") for page in doc])
return text if text else "⚠️ No text found."
except Exception as e:
return f"❌ Error reading PDF: {str(e)}"
def extract_text_with_tika(file_bytes):
try:
print("πŸ“ Extracting text with Tika...")
parsed = parser.from_buffer(file_bytes)
return parsed.get("content", "⚠️ No text found.").strip()
except Exception as e:
return f"❌ Error reading document: {str(e)}"
def extract_text_from_excel(excel_bytes):
try:
print("πŸ“Š Extracting text from Excel...")
wb = openpyxl.load_workbook(excel_bytes, read_only=True)
text = []
for sheet in wb.worksheets:
for row in sheet.iter_rows(values_only=True):
text.append(" ".join(map(str, row)))
return "\n".join(text) if text else "⚠️ No text found."
except Exception as e:
return f"❌ Error reading Excel: {str(e)}"
def answer_question_from_document(file: UploadFile, question: str):
print("πŸ“‚ Processing document for QA...")
validation_error = validate_file_type(file)
if validation_error:
return validation_error
file_ext = file.filename.split(".")[-1].lower()
file_bytes = file.file.read()
if file_ext == "pdf":
text = extract_text_from_pdf(file_bytes)
elif file_ext in ["docx", "pptx"]:
text = extract_text_with_tika(file_bytes)
elif file_ext == "xlsx":
text = extract_text_from_excel(file_bytes)
else:
return "❌ Unsupported file format!"
if not text:
return "⚠️ No text extracted from the document."
truncated_text = truncate_text(text)
print("πŸ€– Generating response...")
response = doc_qa_pipeline(f"Question: {question}\nContext: {truncated_text}")
return response[0]["generated_text"]
def answer_question_from_image(image, question):
try:
print("πŸ–ΌοΈ Processing image for QA...")
if isinstance(image, np.ndarray): # If it's a NumPy array from Gradio
image = Image.fromarray(image) # Convert to PIL Image
print("πŸ–ΌοΈ Generating caption for image...")
caption = image_captioning_pipeline(image)[0]['generated_text']
print("πŸ€– Answering question based on caption...")
response = doc_qa_pipeline(f"Question: {question}\nContext: {caption}")
return response[0]["generated_text"]
except Exception as e:
return f"❌ Error processing image: {str(e)}"
# Gradio UI for Document & Image QA
doc_interface = gr.Interface(
fn=answer_question_from_document,
inputs=[gr.File(label="πŸ“‚ Upload Document"), gr.Textbox(label="πŸ’¬ Ask a Question")],
outputs="text",
title="πŸ“„ AI Document Question Answering"
)
img_interface = gr.Interface(
fn=answer_question_from_image,
inputs=[gr.Image(label="πŸ–ΌοΈ Upload Image"), gr.Textbox(label="πŸ’¬ Ask a Question")],
outputs="text",
title="πŸ–ΌοΈ AI Image Question Answering"
)
# Launch Gradio
app = gr.TabbedInterface([doc_interface, img_interface], ["πŸ“„ Document QA", "πŸ–ΌοΈ Image QA"])
if __name__ == "__main__":
app.launch(share=True) # For Hugging Face Spaces