qtAnswering / app.py
ikraamkb's picture
Update app.py
a078426 verified
raw
history blame
5.19 kB
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import RedirectResponse
import fitz # PyMuPDF for PDF parsing
from tika import parser # Apache Tika for document parsing
import openpyxl
from pptx import Presentation
from PIL import Image
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration
import gradio as gr
import torch
import numpy as np
# Initialize FastAPI
app = FastAPI()
print(f"πŸ”„ Loading models")
# Load Hugging Face Models
doc_qa_pipeline = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", device=-1)
# Load Image Captioning Model
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
model = model.to(dtype=torch.float16) # Quantizing to FP16
print("βœ… Models loaded")
# Allowed File Extensions
ALLOWED_EXTENSIONS = {"pdf", "docx", "pptx", "xlsx"}
def validate_file_type(file):
ext = file.filename.split(".")[-1].lower()
print(f"πŸ” Validating file type: {ext}")
if ext not in ALLOWED_EXTENSIONS:
return f"❌ Unsupported file format: {ext}"
return None
# Function to truncate text to 450 tokens
def truncate_text(text, max_tokens=450):
words = text.split()
truncated = " ".join(words[:max_tokens])
print(f"βœ‚οΈ Truncated text to {max_tokens} tokens.")
return truncated
# Document Text Extraction Functions
def extract_text_from_pdf(pdf_bytes):
try:
print("πŸ“„ Extracting text from PDF...")
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
text = "\n".join([page.get_text("text") for page in doc])
return text if text else "⚠️ No text found."
except Exception as e:
return f"❌ Error reading PDF: {str(e)}"
def extract_text_with_tika(file_bytes):
try:
print("πŸ“ Extracting text with Tika...")
parsed = parser.from_buffer(file_bytes)
return parsed.get("content", "⚠️ No text found.").strip()
except Exception as e:
return f"❌ Error reading document: {str(e)}"
def extract_text_from_excel(excel_bytes):
try:
print("πŸ“Š Extracting text from Excel...")
wb = openpyxl.load_workbook(excel_bytes, read_only=True)
text = []
for sheet in wb.worksheets:
for row in sheet.iter_rows(values_only=True):
text.append(" ".join(map(str, row)))
return "\n".join(text) if text else "⚠️ No text found."
except Exception as e:
return f"❌ Error reading Excel: {str(e)}"
def answer_question_from_document(file: UploadFile, question: str):
print("πŸ“‚ Processing document for QA...")
validation_error = validate_file_type(file)
if validation_error:
return validation_error
file_ext = file.filename.split(".")[-1].lower()
file_bytes = file.file.read()
if file_ext == "pdf":
text = extract_text_from_pdf(file_bytes)
elif file_ext in ["docx", "pptx"]:
text = extract_text_with_tika(file_bytes)
elif file_ext == "xlsx":
text = extract_text_from_excel(file_bytes)
else:
return "❌ Unsupported file format!"
if not text:
return "⚠️ No text extracted from the document."
truncated_text = truncate_text(text)
print("πŸ€– Generating response...")
response = doc_qa_pipeline(f"Question: {question}\nContext: {truncated_text}")
return response[0]["generated_text"]
def answer_question_from_image(image, question):
try:
print("πŸ–ΌοΈ Processing image for QA...")
if isinstance(image, np.ndarray): # If it's a NumPy array from Gradio
image = Image.fromarray(image) # Convert to PIL Image
print("πŸ–ΌοΈ Generating caption for image...")
inputs = processor(images=image, return_tensors="pt", use_fast=True).to(dtype=torch.float16)
output = model.generate(**inputs)
caption = processor.decode(output[0], skip_special_tokens=True)
print("πŸ€– Answering question based on caption...")
response = doc_qa_pipeline(f"Question: {question}\nContext: {caption}")
return response[0]["generated_text"]
except Exception as e:
return f"❌ Error processing image: {str(e)}"
# Gradio UI for Document & Image QA
doc_interface = gr.Interface(
fn=answer_question_from_document,
inputs=[gr.File(label="πŸ“‚ Upload Document"), gr.Textbox(label="πŸ’¬ Ask a Question")],
outputs="text",
title="πŸ“„ AI Document Question Answering"
)
img_interface = gr.Interface(
fn=answer_question_from_image,
inputs=[gr.Image(label="πŸ–ΌοΈ Upload Image"), gr.Textbox(label="πŸ’¬ Ask a Question")],
outputs="text",
title="πŸ–ΌοΈ AI Image Question Answering"
)
# Mount Gradio Interfaces
demo = gr.TabbedInterface([doc_interface, img_interface], ["πŸ“„ Document QA", "πŸ–ΌοΈ Image QA"])
app = gr.mount_gradio_app(app, demo, path="/")
@app.get("/")
def home():
return RedirectResponse(url="/")
# Run FastAPI + Gradio together
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)