Lord-Raven commited on
Commit
e9d5a08
·
1 Parent(s): 19a483c

Messing with configuration.

Browse files
Files changed (2) hide show
  1. app.py +3 -2
  2. requirements.txt +1 -1
app.py CHANGED
@@ -20,8 +20,8 @@ app.add_middleware(
20
  allow_headers=["*"],
21
  )
22
 
23
- print(f"Is CUDA available: {torch.cuda.is_available()}")
24
- print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
25
 
26
  # "xenova/mobilebert-uncased-mnli" "typeform/mobilebert-uncased-mnli" Fast but small--same as bundled in Statosphere
27
  # "xenova/deberta-v3-base-tasksource-nli" Not impressed
@@ -37,6 +37,7 @@ tokenizer_name = "MoritzLaurer/deberta-v3-base-zeroshot-v2.0"
37
  # model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
38
  # tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
39
 
 
40
  classifier = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name, device="cuda:0")
41
 
42
  def classify(data_string, request: gradio.Request):
 
20
  allow_headers=["*"],
21
  )
22
 
23
+ # print(f"Is CUDA available: {torch.cuda.is_available()}")
24
+ # print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
25
 
26
  # "xenova/mobilebert-uncased-mnli" "typeform/mobilebert-uncased-mnli" Fast but small--same as bundled in Statosphere
27
  # "xenova/deberta-v3-base-tasksource-nli" Not impressed
 
37
  # model = ORTModelForSequenceClassification.from_pretrained(model_name, export=True, provider="CUDAExecutionProvider")
38
  # tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, model_max_length=512)
39
 
40
+ # classifier = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name)
41
  classifier = pipeline(task="zero-shot-classification", model=model_name, tokenizer=tokenizer_name, device="cuda:0")
42
 
43
  def classify(data_string, request: gradio.Request):
requirements.txt CHANGED
@@ -3,5 +3,5 @@ fastapi==0.88.0
3
  huggingface_hub==0.26.0
4
  json5==0.9.25
5
  numpy
6
- optimum[onnxruntime-gpu]==1.24.0
7
  transformers==4.36
 
3
  huggingface_hub==0.26.0
4
  json5==0.9.25
5
  numpy
6
+ optimum[onnxruntime]==1.24.0
7
  transformers==4.36