Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,26 @@
|
|
1 |
|
2 |
-
from fastapi import FastAPI, UploadFile
|
3 |
import pdfplumber
|
4 |
import docx
|
5 |
import openpyxl
|
6 |
from pptx import Presentation
|
|
|
7 |
from transformers import pipeline
|
8 |
import gradio as gr
|
9 |
from fastapi.responses import RedirectResponse
|
10 |
|
11 |
-
#
|
12 |
app = FastAPI()
|
13 |
|
14 |
-
#
|
15 |
-
qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-large", tokenizer="google/flan-t5-large", use_fast=True
|
16 |
|
17 |
-
#
|
18 |
def truncate_text(text, max_tokens=450):
|
19 |
words = text.split()
|
20 |
return " ".join(words[:max_tokens])
|
21 |
|
22 |
-
#
|
23 |
def extract_text_from_pdf(pdf_file):
|
24 |
text = ""
|
25 |
with pdfplumber.open(pdf_file) as pdf:
|
@@ -48,7 +49,12 @@ def extract_text_from_excel(excel_file):
|
|
48 |
text.append(" ".join(map(str, row)))
|
49 |
return "\n".join(text)
|
50 |
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
52 |
def answer_question_from_document(file, question):
|
53 |
file_ext = file.name.split(".")[-1].lower()
|
54 |
|
@@ -65,13 +71,26 @@ def answer_question_from_document(file, question):
|
|
65 |
|
66 |
if not text:
|
67 |
return "No text extracted from the document."
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
input_text = f"Question: {question} Context: {truncated_text}"
|
71 |
response = qa_pipeline(input_text)
|
|
|
72 |
return response[0]["generated_text"]
|
73 |
|
74 |
-
#
|
75 |
doc_interface = gr.Interface(
|
76 |
fn=answer_question_from_document,
|
77 |
inputs=[gr.File(label="Upload Document"), gr.Textbox(label="Ask a Question")],
|
@@ -79,8 +98,15 @@ doc_interface = gr.Interface(
|
|
79 |
title="AI Document Question Answering"
|
80 |
)
|
81 |
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
app = gr.mount_gradio_app(app, demo, path="/")
|
85 |
|
86 |
@app.get("/")
|
|
|
1 |
|
2 |
+
from fastapi import FastAPI, File, UploadFile
|
3 |
import pdfplumber
|
4 |
import docx
|
5 |
import openpyxl
|
6 |
from pptx import Presentation
|
7 |
+
import easyocr
|
8 |
from transformers import pipeline
|
9 |
import gradio as gr
|
10 |
from fastapi.responses import RedirectResponse
|
11 |
|
12 |
+
# Initialize FastAPI
|
13 |
app = FastAPI()
|
14 |
|
15 |
+
# Load AI Model for Question Answering
|
16 |
+
qa_pipeline = pipeline("text2text-generation", model="google/flan-t5-large", tokenizer="google/flan-t5-large", use_fast=True)
|
17 |
|
18 |
+
# Function to truncate text to 450 tokens
|
19 |
def truncate_text(text, max_tokens=450):
|
20 |
words = text.split()
|
21 |
return " ".join(words[:max_tokens])
|
22 |
|
23 |
+
# Functions to extract text from different file formats
|
24 |
def extract_text_from_pdf(pdf_file):
|
25 |
text = ""
|
26 |
with pdfplumber.open(pdf_file) as pdf:
|
|
|
49 |
text.append(" ".join(map(str, row)))
|
50 |
return "\n".join(text)
|
51 |
|
52 |
+
def extract_text_from_image(image_file):
|
53 |
+
reader = easyocr.Reader(["en"])
|
54 |
+
result = reader.readtext(image_file)
|
55 |
+
return " ".join([res[1] for res in result])
|
56 |
+
|
57 |
+
# Function to answer questions based on document content
|
58 |
def answer_question_from_document(file, question):
|
59 |
file_ext = file.name.split(".")[-1].lower()
|
60 |
|
|
|
71 |
|
72 |
if not text:
|
73 |
return "No text extracted from the document."
|
74 |
+
|
75 |
+
truncated_text = truncate_text(text)
|
76 |
+
input_text = f"Question: {question} Context: {truncated_text}"
|
77 |
+
response = qa_pipeline(input_text)
|
78 |
+
|
79 |
+
return response[0]["generated_text"]
|
80 |
|
81 |
+
# Function to answer questions based on image content
|
82 |
+
def answer_question_from_image(image, question):
|
83 |
+
image_text = extract_text_from_image(image)
|
84 |
+
if not image_text:
|
85 |
+
return "No text detected in the image."
|
86 |
+
|
87 |
+
truncated_text = truncate_text(image_text)
|
88 |
input_text = f"Question: {question} Context: {truncated_text}"
|
89 |
response = qa_pipeline(input_text)
|
90 |
+
|
91 |
return response[0]["generated_text"]
|
92 |
|
93 |
+
# Gradio UI for Document & Image QA
|
94 |
doc_interface = gr.Interface(
|
95 |
fn=answer_question_from_document,
|
96 |
inputs=[gr.File(label="Upload Document"), gr.Textbox(label="Ask a Question")],
|
|
|
98 |
title="AI Document Question Answering"
|
99 |
)
|
100 |
|
101 |
+
img_interface = gr.Interface(
|
102 |
+
fn=answer_question_from_image,
|
103 |
+
inputs=[gr.Image(label="Upload Image"), gr.Textbox(label="Ask a Question")],
|
104 |
+
outputs="text",
|
105 |
+
title="AI Image Question Answering"
|
106 |
+
)
|
107 |
+
|
108 |
+
# Mount Gradio Interfaces
|
109 |
+
demo = gr.TabbedInterface([doc_interface, img_interface], ["Document QA", "Image QA"])
|
110 |
app = gr.mount_gradio_app(app, demo, path="/")
|
111 |
|
112 |
@app.get("/")
|