Spaces:
Running
on
Zero
Running
on
Zero
Lord-Raven
commited on
Commit
·
e355e92
1
Parent(s):
de1ced9
Messing with configuration.
Browse files- app.py +12 -6
- requirements.txt +1 -0
app.py
CHANGED
@@ -36,16 +36,18 @@ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
|
|
36 |
# model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
|
37 |
# tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
|
38 |
|
39 |
-
model = ORTModelForSequenceClassification.from_pretrained(
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
)
|
44 |
|
45 |
-
tokenizer = AutoTokenizer.from_pretrained("philschmid/tiny-bert-sst2-distilled")
|
46 |
|
47 |
# classifier = pipeline(task="zero-shot-classification", model=model, tokenizer=tokenizer, device="cuda:0")
|
48 |
|
|
|
|
|
49 |
def classify(data_string, request: gradio.Request):
|
50 |
if request:
|
51 |
if request.headers["origin"] not in ["https://statosphere-3704059fdd7e.c5v4v4jx6pq5.win", "https://crunchatize-77a78ffcc6a6.c5v4v4jx6pq5.win", "https://crunchatize-2-2b4f5b1479a6.c5v4v4jx6pq5.win", "https://tamabotchi-2dba63df3bf1.c5v4v4jx6pq5.win", "https://ravenok-statosphere-backend.hf.space", "https://lord-raven.github.io"]:
|
@@ -56,6 +58,7 @@ def classify(data_string, request: gradio.Request):
|
|
56 |
# else:
|
57 |
return zero_shot_classification(data)
|
58 |
|
|
|
59 |
# @spaces.GPU()
|
60 |
def zero_shot_classification(data):
|
61 |
results = []
|
@@ -67,6 +70,7 @@ def create_sequences(data):
|
|
67 |
# return ['###Given:\n' + data['sequence'] + '\n###End Given\n###Hypothesis:\n' + data['hypothesis_template'].format(label) + "\n###End Hypothesis" for label in data['candidate_labels']]
|
68 |
return [data['sequence'] + '\n' + data['hypothesis_template'].format(label) for label in data['candidate_labels']]
|
69 |
|
|
|
70 |
# def few_shot_classification(data):
|
71 |
# sequences = create_sequences(data)
|
72 |
# print(sequences)
|
@@ -84,9 +88,11 @@ def create_sequences(data):
|
|
84 |
# response_string = json.dumps(response_dict)
|
85 |
# return response_string
|
86 |
|
|
|
87 |
gradio_interface = gradio.Interface(
|
88 |
fn = classify,
|
89 |
inputs = gradio.Textbox(label="JSON Input"),
|
90 |
outputs = gradio.Textbox()
|
91 |
)
|
|
|
92 |
gradio_interface.launch()
|
|
|
36 |
# model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
|
37 |
# tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
|
38 |
|
39 |
+
# model = ORTModelForSequenceClassification.from_pretrained(
|
40 |
+
# "philschmid/tiny-bert-sst2-distilled",
|
41 |
+
# export=True,
|
42 |
+
# provider="CUDAExecutionProvider",
|
43 |
+
# )
|
44 |
|
45 |
+
# tokenizer = AutoTokenizer.from_pretrained("philschmid/tiny-bert-sst2-distilled")
|
46 |
|
47 |
# classifier = pipeline(task="zero-shot-classification", model=model, tokenizer=tokenizer, device="cuda:0")
|
48 |
|
49 |
+
print(f"Testing 1")
|
50 |
+
|
51 |
def classify(data_string, request: gradio.Request):
|
52 |
if request:
|
53 |
if request.headers["origin"] not in ["https://statosphere-3704059fdd7e.c5v4v4jx6pq5.win", "https://crunchatize-77a78ffcc6a6.c5v4v4jx6pq5.win", "https://crunchatize-2-2b4f5b1479a6.c5v4v4jx6pq5.win", "https://tamabotchi-2dba63df3bf1.c5v4v4jx6pq5.win", "https://ravenok-statosphere-backend.hf.space", "https://lord-raven.github.io"]:
|
|
|
58 |
# else:
|
59 |
return zero_shot_classification(data)
|
60 |
|
61 |
+
print(f"Testing 2")
|
62 |
# @spaces.GPU()
|
63 |
def zero_shot_classification(data):
|
64 |
results = []
|
|
|
70 |
# return ['###Given:\n' + data['sequence'] + '\n###End Given\n###Hypothesis:\n' + data['hypothesis_template'].format(label) + "\n###End Hypothesis" for label in data['candidate_labels']]
|
71 |
return [data['sequence'] + '\n' + data['hypothesis_template'].format(label) for label in data['candidate_labels']]
|
72 |
|
73 |
+
print(f"Testing 3")
|
74 |
# def few_shot_classification(data):
|
75 |
# sequences = create_sequences(data)
|
76 |
# print(sequences)
|
|
|
88 |
# response_string = json.dumps(response_dict)
|
89 |
# return response_string
|
90 |
|
91 |
+
print(f"Testing 4")
|
92 |
gradio_interface = gradio.Interface(
|
93 |
fn = classify,
|
94 |
inputs = gradio.Textbox(label="JSON Input"),
|
95 |
outputs = gradio.Textbox()
|
96 |
)
|
97 |
+
print(f"Testing 5")
|
98 |
gradio_interface.launch()
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
fastapi==0.88.0
|
2 |
huggingface_hub==0.23.5
|
3 |
json5==0.9.25
|
|
|
1 |
+
torch==2.4.0
|
2 |
fastapi==0.88.0
|
3 |
huggingface_hub==0.23.5
|
4 |
json5==0.9.25
|