Commit
·
fbc7734
1
Parent(s):
2412ff0
Delete commonsense_judgments.py
Browse files- commonsense_judgments.py +0 -30
commonsense_judgments.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification
|
2 |
-
import torch
|
3 |
-
import pandas as pd
|
4 |
-
import numpy as np
|
5 |
-
import streamlit as st
|
6 |
-
from transformers import pipeline
|
7 |
-
from transformers_interpret import SequenceClassificationExplainer
|
8 |
-
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
9 |
-
|
10 |
-
st.title('Ethics Classifier')
|
11 |
-
st.write('This app uses a pre-trained Distilbert model fine-tuned on the Commonsense Ethics dataset from the Aligning AI With Shared Human Values project (https://github.com/hendrycks/ethics). It judges whether a given action of scenario is wrong or not wrong and shows how the words in the scenario affected the judgment.')
|
12 |
-
|
13 |
-
loaded_model = DistilBertForSequenceClassification.from_pretrained('commonsense_ethics')
|
14 |
-
model_name = 'distilbert-base-uncased'
|
15 |
-
tokenizer = DistilBertTokenizerFast.from_pretrained(model_name)
|
16 |
-
cls_explainer = SequenceClassificationExplainer(loaded_model, tokenizer)
|
17 |
-
|
18 |
-
clf = pipeline("text-classification", model = loaded_model, tokenizer = tokenizer)
|
19 |
-
|
20 |
-
text = st.text_input('Enter a scenario or action.')
|
21 |
-
|
22 |
-
if text:
|
23 |
-
answer = clf(text)
|
24 |
-
label = 'wrong' if answer[0]['label'] == 'LABEL_1' else 'not wrong'
|
25 |
-
st.write(f'This action is {label} (confidence level {answer[0]["score"]*100:.2f}%).')
|
26 |
-
attributions = cls_explainer(text)
|
27 |
-
df = pd.DataFrame(attributions[1:-1])
|
28 |
-
df.rename(columns = {0: 'Token', 1: 'Contribution'}, inplace = True)
|
29 |
-
st.write(df.style.hide(axis = 'index'))
|
30 |
-
st.write(cls_explainer.visualize())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|