rahul7star commited on
Commit
ce9be4a
·
verified ·
1 Parent(s): 3c9aeef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -13
app.py CHANGED
@@ -1,20 +1,37 @@
 
1
  import gradio as gr
2
- from transformers import BertForQuestionAnswering, BertTokenizer, pipeline
3
- model_name = "rahul7star/Rahul-Context-AI-01"
4
- # Load your fine-tuned model and tokenizer
5
- model = BertForQuestionAnswering.from_pretrained(model_name) # Path to the fine-tuned model
6
- tokenizer = BertTokenizer.from_pretrained(model_name) # Path to the tokenizer
7
 
8
- # Initialize the custom pipeline with your fine-tuned model
9
- qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
 
10
 
11
- # Function to get the answer using the fine-tuned model
12
- def answer_question(context, question):
13
- result = qa_pipeline({'context': context, 'question': question})
14
- return result['answer']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # Define the Gradio interface
17
- interface = gr.Interface(fn=answer_question, inputs=["text", "text"], outputs="text")
 
 
 
 
 
18
 
19
  # Launch the interface
20
  interface.launch()
 
1
+ import torch
2
  import gradio as gr
3
+ from transformers import AutoModelForQuestionAnswering, AutoTokenizer
 
 
 
 
4
 
5
+ # Load the model and tokenizer from Hugging Face
6
+ model = AutoModelForQuestionAnswering.from_pretrained("rahul7star/fastai-rahul-text-model-v02")
7
+ tokenizer = AutoTokenizer.from_pretrained("rahul7star/fastai-rahul-text-model-v02")
8
 
9
+ # Function to handle predictions (for question-answering tasks)
10
+ def get_answer(question):
11
+ # Tokenize the input question
12
+ inputs = tokenizer(question, return_tensors="pt")
13
+
14
+ # Get model prediction (start and end positions for the answer)
15
+ with torch.no_grad():
16
+ outputs = model(**inputs)
17
+
18
+ # Extract start and end positions of the predicted answer
19
+ start_idx = torch.argmax(outputs.start_logits)
20
+ end_idx = torch.argmax(outputs.end_logits)
21
+
22
+ # Convert the token IDs back to text
23
+ answer_tokens = inputs.input_ids[0][start_idx:end_idx+1]
24
+ answer = tokenizer.decode(answer_tokens, skip_special_tokens=True)
25
+
26
+ return answer
27
 
28
+ # Set up the Gradio interface
29
+ interface = gr.Interface(
30
+ fn=get_answer, # Function to call for inference
31
+ inputs=gr.Textbox(label="Ask a Question"), # Input field for question
32
+ outputs=gr.Textbox(label="Answer"), # Output field for the model's answer
33
+ live=True # Set to True for real-time interaction
34
+ )
35
 
36
  # Launch the interface
37
  interface.launch()