rahul7star commited on
Commit
6880433
·
verified ·
1 Parent(s): ccde795

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -0
app.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import BertForQuestionAnswering, BertTokenizer, pipeline
3
+
4
+ # Load your fine-tuned model and tokenizer
5
+ model = BertForQuestionAnswering.from_pretrained("RahulTextLLM") # Path to the fine-tuned model
6
+ tokenizer = BertTokenizer.from_pretrained("RahulTextLLM") # Path to the tokenizer
7
+
8
+ # Initialize the custom pipeline with your fine-tuned model
9
+ qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
10
+
11
+ # Function to get the answer using the fine-tuned model
12
+ def answer_question(context, question):
13
+ result = qa_pipeline({'context': context, 'question': question})
14
+ return result['answer']
15
+
16
+ # Define the Gradio interface
17
+ interface = gr.Interface(fn=answer_question, inputs=["text", "text"], outputs="text")
18
+
19
+ # Launch the interface
20
+ interface.launch()