priya2k's picture
Update app.py
f41a38c verified
from transformers import AutoTokenizer, AutoModel
import torch
import os
import gradio as gr
# Load Hugging Face Token
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("❌ Hugging Face API token not found! Set HF_TOKEN as an environment variable.")
# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("mental/mental-bert-base-uncased", use_auth_token=HF_TOKEN)
model = AutoModel.from_pretrained("mental/mental-bert-base-uncased", use_auth_token=HF_TOKEN,output_hidden_states=True)
model.eval() # Set model to evaluation mode
def infer(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs)
last_hidden_state = outputs.last_hidden_state # (1, seq_len, hidden_size)
mask = inputs['attention_mask'].unsqueeze(-1).expand(last_hidden_state.size()).float()
masked_embeddings = last_hidden_state * mask
summed = torch.sum(masked_embeddings, dim=1)
counts = torch.clamp(mask.sum(dim=1), min=1e-9)
mean_pooled = summed / counts
return mean_pooled.squeeze().tolist()
# Gradio interface
iface = gr.Interface(
fn=infer,
inputs=[
gr.Textbox(label="text"),
],
outputs="text"
)
iface.launch()