Lord-Raven commited on
Commit
19a483c
·
1 Parent(s): de9282c

Messing with configuration.

Browse files
Files changed (1) hide show
  1. app.py +2 -21
app.py CHANGED
@@ -37,23 +37,8 @@ tokenizer_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
37
  # model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
38
  # tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
39
 
40
- # session_options = onnxruntime.SessionOptions()
41
- # session_options.log_severity_level = 0
42
-
43
- # print(f"ORTModelForSequenceClassification.from_pretrained")
44
- # model = ORTModelForSequenceClassification.from_pretrained(
45
- # "distilbert-base-uncased-finetuned-sst-2-english",
46
- # export=True,
47
- # provider="CUDAExecutionProvider",
48
- # session_options=session_options
49
- # )
50
- # print(f"AutoTokenizer.from_pretrained")
51
- # tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
52
-
53
- print(f"pipeline")
54
  classifier = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name, device="cuda:0")
55
 
56
- print(f"Testing 1")
57
  def classify(data_string, request: gradio.Request):
58
  if request:
59
  if request.headers["origin"] not in ["https://statosphere-3704059fdd7e.c5v4v4jx6pq5.win", "https://crunchatize-77a78ffcc6a6.c5v4v4jx6pq5.win", "https://crunchatize-2-2b4f5b1479a6.c5v4v4jx6pq5.win", "https://tamabotchi-2dba63df3bf1.c5v4v4jx6pq5.win", "https://ravenok-statosphere-backend.hf.space", "https://lord-raven.github.io"]:
@@ -64,7 +49,6 @@ def classify(data_string, request: gradio.Request):
64
  # else:
65
  return zero_shot_classification(data)
66
 
67
- print(f"Testing 2")
68
  @spaces.GPU
69
  def zero_shot_classification(data):
70
  results = classifier(data['sequence'], candidate_labels=data['candidate_labels'], hypothesis_template=data['hypothesis_template'], multi_label=data['multi_label'])
@@ -75,7 +59,6 @@ def create_sequences(data):
75
  # return ['###Given:\n' + data['sequence'] + '\n###End Given\n###Hypothesis:\n' + data['hypothesis_template'].format(label) + "\n###End Hypothesis" for label in data['candidate_labels']]
76
  return [data['sequence'] + '\n' + data['hypothesis_template'].format(label) for label in data['candidate_labels']]
77
 
78
- print(f"Testing 3")
79
  # def few_shot_classification(data):
80
  # sequences = create_sequences(data)
81
  # print(sequences)
@@ -91,13 +74,11 @@ print(f"Testing 3")
91
  # response_dict = {'scores': scores, 'labels': labels}
92
  # print(response_dict)
93
  # response_string = json.dumps(response_dict)
94
- # return response_string
95
-
96
- print(f"Testing 4")
97
  gradio_interface = gradio.Interface(
98
  fn = classify,
99
  inputs = gradio.Textbox(label="JSON Input"),
100
  outputs = gradio.Textbox()
101
  )
102
- print(f"Testing 5")
103
  gradio_interface.launch()
 
37
  # model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
38
  # tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  classifier = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name, device="cuda:0")
41
 
 
42
  def classify(data_string, request: gradio.Request):
43
  if request:
44
  if request.headers["origin"] not in ["https://statosphere-3704059fdd7e.c5v4v4jx6pq5.win", "https://crunchatize-77a78ffcc6a6.c5v4v4jx6pq5.win", "https://crunchatize-2-2b4f5b1479a6.c5v4v4jx6pq5.win", "https://tamabotchi-2dba63df3bf1.c5v4v4jx6pq5.win", "https://ravenok-statosphere-backend.hf.space", "https://lord-raven.github.io"]:
 
49
  # else:
50
  return zero_shot_classification(data)
51
 
 
52
  @spaces.GPU
53
  def zero_shot_classification(data):
54
  results = classifier(data['sequence'], candidate_labels=data['candidate_labels'], hypothesis_template=data['hypothesis_template'], multi_label=data['multi_label'])
 
59
  # return ['###Given:\n' + data['sequence'] + '\n###End Given\n###Hypothesis:\n' + data['hypothesis_template'].format(label) + "\n###End Hypothesis" for label in data['candidate_labels']]
60
  return [data['sequence'] + '\n' + data['hypothesis_template'].format(label) for label in data['candidate_labels']]
61
 
 
62
  # def few_shot_classification(data):
63
  # sequences = create_sequences(data)
64
  # print(sequences)
 
74
  # response_dict = {'scores': scores, 'labels': labels}
75
  # print(response_dict)
76
  # response_string = json.dumps(response_dict)
77
+ # return response_strin
 
 
78
  gradio_interface = gradio.Interface(
79
  fn = classify,
80
  inputs = gradio.Textbox(label="JSON Input"),
81
  outputs = gradio.Textbox()
82
  )
83
+
84
  gradio_interface.launch()