k-code commited on
Commit
6fe8523
·
1 Parent(s): 9ca6304
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
+
5
+ # Load from local checkpoint
6
+ # or whatever your checkpoint number is
7
+ model_id = "/Users/kennyho/Dev/k-code-experiments/political-bert-classifier/src/results/checkpoint-2391"
8
+ tokenizer = AutoTokenizer.from_pretrained(
9
+ 'bert-base-uncased') # Original tokenizer
10
+ model = AutoModelForSequenceClassification.from_pretrained(model_id)
11
+
12
+
13
+ def predict(text):
14
+ # Tokenize and predict
15
+ inputs = tokenizer(text,
16
+ truncation=True,
17
+ padding=True,
18
+ max_length=64,
19
+ return_tensors="pt")
20
+
21
+ with torch.no_grad():
22
+ outputs = model(**inputs)
23
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
24
+ prediction = probs.argmax(-1).item()
25
+ confidence = probs[0][prediction].item()
26
+ return probs
27
+
28
+ label_map = {0: 'Left', 1: 'Right', 2: 'Centrist'}
29
+ return f"{label_map[prediction]} (Confidence: {confidence:.2%})"
30
+
31
+
32
+ # Create the interface
33
+ demo = gr.Interface(
34
+ fn=predict,
35
+ inputs=gr.Textbox(lines=4, placeholder="Enter text to analyze..."),
36
+ outputs="text",
37
+ title="Political Text Classifier",
38
+ description="Classify political text as Left, Right, or Centrist"
39
+ )
40
+
41
+ demo.launch()