rahul7star commited on
Commit
5f10744
·
verified ·
1 Parent(s): abf5881

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -32
app.py CHANGED
@@ -1,37 +1,36 @@
1
- import torch
2
  import gradio as gr
3
- from transformers import AutoModelForQuestionAnswering, AutoTokenizer
 
 
4
 
5
- # Load the model and tokenizer from Hugging Facehh
6
- model = AutoModelForQuestionAnswering.from_pretrained("rahul7star/fastai-rahul-text-model-v02")
7
- tokenizer = AutoTokenizer.from_pretrained("rahul7star/fastai-rahul-text-model-v02")
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
- # Function to handle predictions (for question-answering tasks)
10
- def get_answer(question):
11
- # Tokenize the input question
12
- inputs = tokenizer(question, return_tensors="pt")
13
-
14
- # Get model prediction (start and end positions for the answer)
15
- with torch.no_grad():
16
- outputs = model(**inputs)
17
-
18
- # Extract start and end positions of the predicted answer
19
- start_idx = torch.argmax(outputs.start_logits)
20
- end_idx = torch.argmax(outputs.end_logits)
21
-
22
- # Convert the token IDs back to text
23
- answer_tokens = inputs.input_ids[0][start_idx:end_idx+1]
24
- answer = tokenizer.decode(answer_tokens, skip_special_tokens=True)
25
-
26
- return answer
27
 
28
- # Set up the Gradio interface
29
- interface = gr.Interface(
30
- fn=get_answer, # Function to call for inference
31
- inputs=gr.Textbox(label="Ask a Question"), # Input field for question
32
- outputs=gr.Textbox(label="Answer"), # Output field for the model's answer
33
- live=True # Set to True for real-time interaction
34
- )
 
35
 
36
- # Launch the interface
37
- interface.launch()
 
 
1
  import gradio as gr
2
+ import torch
3
+ from fastai.text.all import load_learner
4
+ from huggingface_hub import hf_hub_download
5
 
6
+ # Step 8: Download the model from Hugging Face and load it
7
+ def load_model():
8
+ try:
9
+ # Download the model .pth file from Hugging Face
10
+ model_path = hf_hub_download(
11
+ repo_id="rahul7star/fastai-rahul-text-model-v02",
12
+ filename="model.pth"
13
+ )
14
+
15
+ # Load the model using FastAI's load_learner method
16
+ learn = load_learner(model_path)
17
+ print("Model loaded successfully from Hugging Face.")
18
+ return learn
19
+ except Exception as e:
20
+ print(f"Error loading the model: {e}")
21
+ return None
22
 
23
+ # Load the model
24
+ learn = load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # Step 9: Define the Gradio Interface
27
+ def predict(input_text):
28
+ try:
29
+ # Get prediction from the model
30
+ pred, _, probs = learn.predict(input_text)
31
+ return f"Prediction: {pred}, Confidence: {probs.max():.2f}"
32
+ except Exception as e:
33
+ return f"Error during prediction: {e}"
34
 
35
+ # Step 10: Create Gradio Interface
36
+ gr.Interface(fn=predict, inputs="text", outputs="text").launch()