Rob Caamano commited on
Commit
1167df7
·
unverified ·
1 Parent(s): c518343
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -11,7 +11,6 @@ demo = """I'm so proud of myself for accomplishing my goals today. #motivation #
11
 
12
  text = st.text_area("Input text", demo, height=250)
13
 
14
- # Add a drop-down menu for model selection
15
  model_options = {
16
  "DistilBERT Base Uncased (SST-2)": "distilbert-base-uncased-finetuned-sst-2-english",
17
  "Fine-tuned Toxicity Model": "RobCaamano/toxicity_distilbert",
@@ -23,7 +22,6 @@ mod_name = model_options[selected_model]
23
  tokenizer = AutoTokenizer.from_pretrained(mod_name)
24
  model = AutoModelForSequenceClassification.from_pretrained(mod_name)
25
 
26
- # Update the id2label mapping for the fine-tuned model
27
  if selected_model == "Fine-tuned Toxicity Model":
28
  toxicity_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
29
  model.config.id2label = {i: toxicity_classes[i] for i in range(model.config.num_labels)}
@@ -40,17 +38,19 @@ if st.button("Submit", type="primary"):
40
 
41
  tweet_portion = text[:50] + "..." if len(text) > 50 else text
42
 
43
- # Create and display the table
44
  if selected_model == "Fine-tuned Toxicity Model":
45
  column_name = "Highest Toxicity Class"
46
  else:
47
  column_name = "Prediction"
48
 
49
- df = pd.DataFrame(
50
- {
51
- "Tweet (portion)": [tweet_portion],
52
- column_name: [label],
53
- "Probability": [probability],
54
- }
55
- )
56
- st.table(df)
 
 
 
 
11
 
12
  text = st.text_area("Input text", demo, height=250)
13
 
 
14
  model_options = {
15
  "DistilBERT Base Uncased (SST-2)": "distilbert-base-uncased-finetuned-sst-2-english",
16
  "Fine-tuned Toxicity Model": "RobCaamano/toxicity_distilbert",
 
22
  tokenizer = AutoTokenizer.from_pretrained(mod_name)
23
  model = AutoModelForSequenceClassification.from_pretrained(mod_name)
24
 
 
25
  if selected_model == "Fine-tuned Toxicity Model":
26
  toxicity_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"]
27
  model.config.id2label = {i: toxicity_classes[i] for i in range(model.config.num_labels)}
 
38
 
39
  tweet_portion = text[:50] + "..." if len(text) > 50 else text
40
 
 
41
  if selected_model == "Fine-tuned Toxicity Model":
42
  column_name = "Highest Toxicity Class"
43
  else:
44
  column_name = "Prediction"
45
 
46
+ if probability < 0.1:
47
+ st.write("This tweet is not toxic.")
48
+ else:
49
+ df = pd.DataFrame(
50
+ {
51
+ "Tweet (portion)": [tweet_portion],
52
+ column_name: [label],
53
+ "Probability": [probability],
54
+ }
55
+ )
56
+ st.table(df)