* upgrade version of llm guard
Browse files- prompt.py +0 -4
- requirements.txt +2 -2
prompt.py
CHANGED
@@ -6,7 +6,6 @@ from typing import Dict, List
|
|
6 |
import streamlit as st
|
7 |
from llm_guard.input_scanners import get_scanner_by_name
|
8 |
from llm_guard.input_scanners.anonymize import default_entity_types
|
9 |
-
from llm_guard.input_scanners.prompt_injection import ALL_MODELS as PI_ALL_MODELS
|
10 |
from llm_guard.vault import Vault
|
11 |
from streamlit_tags import st_tags
|
12 |
|
@@ -361,9 +360,6 @@ def get_scanner(scanner_name: str, vault: Vault, settings: Dict):
|
|
361 |
if scanner_name == "Anonymize":
|
362 |
settings["vault"] = vault
|
363 |
|
364 |
-
if scanner_name == "PromptInjection":
|
365 |
-
settings["models"] = PI_ALL_MODELS
|
366 |
-
|
367 |
if scanner_name in ["Anonymize", "BanTopics", "Code", "PromptInjection", "Toxicity"]:
|
368 |
settings["use_onnx"] = True
|
369 |
|
|
|
6 |
import streamlit as st
|
7 |
from llm_guard.input_scanners import get_scanner_by_name
|
8 |
from llm_guard.input_scanners.anonymize import default_entity_types
|
|
|
9 |
from llm_guard.vault import Vault
|
10 |
from streamlit_tags import st_tags
|
11 |
|
|
|
360 |
if scanner_name == "Anonymize":
|
361 |
settings["vault"] = vault
|
362 |
|
|
|
|
|
|
|
363 |
if scanner_name in ["Anonymize", "BanTopics", "Code", "PromptInjection", "Toxicity"]:
|
364 |
settings["use_onnx"] = True
|
365 |
|
requirements.txt
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
-
llm-guard==0.3.
|
2 |
-
llm-guard[onnxruntime]==0.3.
|
3 |
pandas==2.1.3
|
4 |
streamlit==1.28.2
|
5 |
streamlit-tags==1.2.8
|
|
|
1 |
+
llm-guard==0.3.3
|
2 |
+
llm-guard[onnxruntime]==0.3.3
|
3 |
pandas==2.1.3
|
4 |
streamlit==1.28.2
|
5 |
streamlit-tags==1.2.8
|