File size: 1,375 Bytes
ce9be4a
6880433
ce9be4a
6880433
ce9be4a
 
 
6880433
ce9be4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6880433
ce9be4a
 
 
 
 
 
 
6880433
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import torch
import gradio as gr
from transformers import AutoModelForQuestionAnswering, AutoTokenizer

# Load the model and tokenizer from Hugging Face
model = AutoModelForQuestionAnswering.from_pretrained("rahul7star/fastai-rahul-text-model-v02")
tokenizer = AutoTokenizer.from_pretrained("rahul7star/fastai-rahul-text-model-v02")

# Function to handle predictions (for question-answering tasks)
def get_answer(question):
    # Tokenize the input question
    inputs = tokenizer(question, return_tensors="pt")
    
    # Get model prediction (start and end positions for the answer)
    with torch.no_grad():
        outputs = model(**inputs)
    
    # Extract start and end positions of the predicted answer
    start_idx = torch.argmax(outputs.start_logits)
    end_idx = torch.argmax(outputs.end_logits)
    
    # Convert the token IDs back to text
    answer_tokens = inputs.input_ids[0][start_idx:end_idx+1]
    answer = tokenizer.decode(answer_tokens, skip_special_tokens=True)
    
    return answer

# Set up the Gradio interface
interface = gr.Interface(
    fn=get_answer,  # Function to call for inference
    inputs=gr.Textbox(label="Ask a Question"),  # Input field for question
    outputs=gr.Textbox(label="Answer"),  # Output field for the model's answer
    live=True  # Set to True for real-time interaction
)

# Launch the interface
interface.launch()