mwaliahmad's picture
update
1f5c4dd
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch
# Load model and tokenizer
model = AutoModelForSequenceClassification.from_pretrained(
"Kevintu/Engessay_grading_ML")
tokenizer = AutoTokenizer.from_pretrained("KevSun/Engessay_grading_ML")
def grade_essay(text):
encoded_input = tokenizer(
text, return_tensors='pt', padding=True, truncation=True, max_length=64)
model.eval()
with torch.no_grad():
outputs = model(**encoded_input)
predictions = outputs.logits.squeeze()
item_names = ["cohesion", "syntax", "vocabulary",
"phraseology", "grammar", "conventions"]
scaled_scores = 2.25 * predictions.numpy() - 1.25
rounded_scores = [round(score * 2) / 2 for score in scaled_scores]
results = {item: f"{score:.1f}" for item,
score in zip(item_names, rounded_scores)}
return results
# Create Gradio interface
demo = gr.Interface(
fn=grade_essay,
inputs=gr.Textbox(lines=10, placeholder="Enter essay text here..."),
outputs=gr.JSON(),
title="Essay Grading API",
description="Grade essays on six dimensions of writing quality",
examples=[
["The English Language Learner Insight, Proficiency and Skills Evaluation (ELLIPSE) Corpus is a freely available corpus of ~6,500 ELL writing samples that have been scored for overall holistic language proficiency as well as analytic proficiency scores."]
]
)
# For API access
demo.queue()
demo.launch()