File size: 1,534 Bytes
93643d5
040c521
b0d2a02
e83c60c
d54f118
6b9e813
8a243e5
 
 
b0d2a02
 
 
 
31fb3f9
 
 
6b9e813
d184de8
31fb3f9
6b9e813
 
 
 
b0d2a02
8a243e5
 
b0d2a02
8a243e5
 
 
 
b0d2a02
47a0109
daac94f
47a0109
daac94f
9704577
0686401
 
5071704
93643d5
 
daac94f
0686401
93643d5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio
import json
import torch
from transformers import AutoTokenizer
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from transformers import pipeline
from huggingface_hub import cached_download
from optimum.onnxruntime import ORTModelForQuestionAnswering

class OnnxTokenClassificationPipeline(TokenClassificationPipeline):

# CORS Config
app = FastAPI()

app.add_middleware(
    CORSMiddleware,
    allow_origins=["https://jhuhman.com"], #["https://statosphere-3704059fdd7e.c5v4v4jx6pq5.win"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

model_name = "xenova/mobilebert-uncased-mnli"
model = ORTModelForQuestionAnswering.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained("typeform/mobilebert-uncased-mnli")

# file = cached_download("https://huggingface.co/" + model_name + "")
# sess = InferenceSession(file)

classifier = pipeline(task="zero-shot-classification", model=model, tokenizer=tokenizer)

def zero_shot_classification(data_string):
    print(data_string)
    data = json.loads(data_string)
    print(data)
    results = classifier(data['sequence'], candidate_labels=data['candidate_labels'], hypothesis_template=data['hypothesis_template'], multi_label=data['multi_label'])
    response_string = json.dumps(results)
    return response_string

gradio_interface = gradio.Interface(
    fn = zero_shot_classification,
    inputs = gradio.Textbox(label="JSON Input"),
    outputs = gradio.Textbox()
)
gradio_interface.launch()