File size: 1,526 Bytes
6337bb1
f3c19a0
6337bb1
 
f3c19a0
6337bb1
 
f3c19a0
6337bb1
 
f3c19a0
 
 
6337bb1
 
f3c19a0
6337bb1
 
 
 
 
 
 
 
 
 
 
 
f3c19a0
6337bb1
f3c19a0
 
 
ef1263f
f3c19a0
 
 
1f5c4dd
6337bb1
 
f3c19a0
6337bb1
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import gradio as gr
from transformers import AutoModelForSequenceClassification, AutoTokenizer
import torch

# Load model and tokenizer
model = AutoModelForSequenceClassification.from_pretrained(
    "Kevintu/Engessay_grading_ML")
tokenizer = AutoTokenizer.from_pretrained("KevSun/Engessay_grading_ML")


def grade_essay(text):
    encoded_input = tokenizer(
        text, return_tensors='pt', padding=True, truncation=True, max_length=64)
    model.eval()
    with torch.no_grad():
        outputs = model(**encoded_input)
    predictions = outputs.logits.squeeze()

    item_names = ["cohesion", "syntax", "vocabulary",
                  "phraseology", "grammar", "conventions"]
    scaled_scores = 2.25 * predictions.numpy() - 1.25
    rounded_scores = [round(score * 2) / 2 for score in scaled_scores]

    results = {item: f"{score:.1f}" for item,
               score in zip(item_names, rounded_scores)}
    return results


# Create Gradio interface
demo = gr.Interface(
    fn=grade_essay,
    inputs=gr.Textbox(lines=10, placeholder="Enter essay text here..."),
    outputs=gr.JSON(),
    title="Essay Grading API",
    description="Grade essays on six dimensions of writing quality",
    examples=[
        ["The English Language Learner Insight, Proficiency and Skills Evaluation (ELLIPSE) Corpus is a freely available corpus of ~6,500 ELL writing samples that have been scored for overall holistic language proficiency as well as analytic proficiency scores."]
    ]
)

# For API access
demo.queue()
demo.launch()