Spaces:
Running
Running
import gradio as gr | |
import torch | |
from fastai.text.all import load_learner | |
from huggingface_hub import hf_hub_download | |
# Step 8: Download the model from Hugging Face and load it | |
def load_model(): | |
try: | |
# Download the model .pth file from Hugging Face | |
model_path = hf_hub_download( | |
repo_id="rahul7star/fastai-rahul-text-model-v02", | |
filename="model.pth" | |
) | |
# Load the model using FastAI's load_learner method | |
learn = load_learner(model_path) | |
print("Model loaded successfully from Hugging Face.") | |
return learn | |
except Exception as e: | |
print(f"Error loading the model: {e}") | |
return None | |
# Load the model | |
learn = load_model() | |
# Step 9: Define the Gradio Interface | |
def predict(input_text): | |
try: | |
# Get prediction from the model | |
pred, _, probs = learn.predict(input_text) | |
return f"Prediction: {pred}, Confidence: {probs.max():.2f}" | |
except Exception as e: | |
return f"Error during prediction: {e}" | |
# Step 10: Create Gradio Interface | |
gr.Interface(fn=predict, inputs="text", outputs="text").launch() | |