jeffhaines commited on
Commit
bc4b1cb
·
1 Parent(s): 3915f8f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -9,7 +9,7 @@ from transformers import AutoModelForSequenceClassification, AutoTokenizer
9
  st.title('Ethical Judgment Classifier')
10
  st.write('This app uses a pre-trained Distilbert model fine-tuned on the Commonsense Ethics dataset from the Aligning AI With Shared Human Values project (for more information, see https://github.com/hendrycks/ethics). It judges whether a given action of scenario is wrong or not wrong and uses transformers-interpret (https://pypi.org/project/transformers-interpret/) to show how the words in your scenario affected the model\'s judgment.')
11
 
12
- loaded_model = DistilBertForSequenceClassification.from_pretrained('commonsense_ethics')
13
  model_name = 'distilbert-base-uncased'
14
  tokenizer = DistilBertTokenizerFast.from_pretrained(model_name)
15
  cls_explainer = SequenceClassificationExplainer(loaded_model, tokenizer)
@@ -20,7 +20,7 @@ text = st.text_input('Enter a scenario or action.')
20
 
21
  if text:
22
  answer = clf(text)
23
- label = 'wrong' if answer[0]['label'] == 'LABEL_1' else 'not wrong'
24
  st.write(f'This action is {label} (confidence level {answer[0]["score"]*100:.2f}%).')
25
  attributions = cls_explainer(text)
26
  df = pd.DataFrame(attributions[1:-1])
 
9
  st.title('Ethical Judgment Classifier')
10
  st.write('This app uses a pre-trained Distilbert model fine-tuned on the Commonsense Ethics dataset from the Aligning AI With Shared Human Values project (for more information, see https://github.com/hendrycks/ethics). It judges whether a given action of scenario is wrong or not wrong and uses transformers-interpret (https://pypi.org/project/transformers-interpret/) to show how the words in your scenario affected the model\'s judgment.')
11
 
12
+ loaded_model = DistilBertForSequenceClassification.from_pretrained('distilbert')
13
  model_name = 'distilbert-base-uncased'
14
  tokenizer = DistilBertTokenizerFast.from_pretrained(model_name)
15
  cls_explainer = SequenceClassificationExplainer(loaded_model, tokenizer)
 
20
 
21
  if text:
22
  answer = clf(text)
23
+ label = 'wrong' if answer[0]['label'] == 'LABEL_0' else 'not wrong'
24
  st.write(f'This action is {label} (confidence level {answer[0]["score"]*100:.2f}%).')
25
  attributions = cls_explainer(text)
26
  df = pd.DataFrame(attributions[1:-1])