File size: 2,020 Bytes
f23d324
0000b07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f23d324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""from fastapi import FastAPI
from fastapi.responses import RedirectResponse
import gradio as gr
from transformers import pipeline, ViltProcessor, ViltForQuestionAnswering, AutoTokenizer, AutoModelForCausalLM
from PIL import Image
import torch
import fitz  # PyMuPDF for PDF
app = FastAPI()

# ========== Image QA Setup ==========
vqa_processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
vqa_model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
def answer_question_from_image(image, question):
    if image is None or not question.strip():
        return "Please upload an image and ask a question."
    inputs = vqa_processor(image, question, return_tensors="pt")
    with torch.no_grad():
        outputs = vqa_model(**inputs)
    predicted_id = outputs.logits.argmax(-1).item()
    return vqa_model.config.id2label[predicted_id]
# ========== Gradio Interfaces ==========

img_interface = gr.Interface(
    fn=answer_question_from_image,
    inputs=[gr.Image(label="Upload Image"), gr.Textbox(label="Ask a Question")],
    outputs="text",
    title="Image Question Answering"
)
# ========== Combine and Mount ==========
demo = gr.TabbedInterface( img_interface , "Image QA")
app = gr.mount_gradio_app(app, demo, path="/")
@app.get("/")
def root():
    return RedirectResponse(url="/") """
from transformers import ViltProcessor, ViltForQuestionAnswering
import torch

# Load image QA model once
vqa_processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
vqa_model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")

def answer_question_from_image(image, question):
    if image is None or not question.strip():
        return "Please upload an image and ask a question."
    
    inputs = vqa_processor(image, question, return_tensors="pt")
    with torch.no_grad():
        outputs = vqa_model(**inputs)
    
    predicted_id = outputs.logits.argmax(-1).item()
    return vqa_model.config.id2label[predicted_id]