Oscar Wang commited on
Commit
f0ef3c4
·
verified ·
1 Parent(s): 34531ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -33
app.py CHANGED
@@ -8,50 +8,93 @@ model_options = {
8
  "GoalZero/aidetection-ada-v0.1": "GoalZero/aidetection-ada-v0.1"
9
  }
10
 
11
- # Initialize tokenizer and model with the default model
12
- default_model = model_options["GoalZero/aidetection-ada-v0.2"]
13
- tokenizer = RobertaTokenizer.from_pretrained(default_model)
14
- model = RobertaForSequenceClassification.from_pretrained(default_model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
- # Define the prediction function
17
  def classify_text(text, model_choice):
18
- global model, tokenizer # Access the global model and tokenizer variables
19
-
20
- # Check if the model needs to be changed
21
- if model_choice != model.name_or_path:
22
- model = RobertaForSequenceClassification.from_pretrained(model_choice)
23
- tokenizer = RobertaTokenizer.from_pretrained(model_choice)
24
-
25
- # Remove periods and new lines from the input text
26
- cleaned_text = text.replace('.', '').replace('\n', ' ')
27
-
28
- # Tokenize the cleaned input text
29
- inputs = tokenizer(cleaned_text, return_tensors='pt', padding=True, truncation=True, max_length=128)
30
-
31
- # Get the model's prediction
32
- with torch.no_grad():
33
- outputs = model(**inputs)
34
-
35
- # Apply softmax to get probabilities
36
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
37
-
38
- # Get the probability of the class '1'
39
- prob_1 = probabilities[0][1].item()
40
 
41
- return {"Probability of being AI": prob_1}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # Create the Gradio interface
44
  iface = gr.Interface(
45
  fn=classify_text,
46
  inputs=[
47
- gr.Textbox(lines=2, placeholder="Enter text here..."),
48
- gr.Dropdown(choices=list(model_options.keys()), value=default_model, label="Select Model")
 
 
 
 
 
 
 
 
49
  ],
50
- outputs="json",
51
- title="GoalZero Ada Model Selector",
52
  description="Enter text to get the probability of it being AI-written. Select a model version to use.",
 
 
 
 
 
53
  )
54
 
55
  # Launch the app
56
  if __name__ == "__main__":
57
- iface.launch(share=True)
 
8
  "GoalZero/aidetection-ada-v0.1": "GoalZero/aidetection-ada-v0.1"
9
  }
10
 
11
+ # Initialize global variables for model and tokenizer
12
+ model = None
13
+ tokenizer = None
14
+
15
+ def load_model(model_name):
16
+ """Helper function to load model and tokenizer"""
17
+ try:
18
+ return (
19
+ RobertaForSequenceClassification.from_pretrained(model_name),
20
+ RobertaTokenizer.from_pretrained(model_name)
21
+ )
22
+ except Exception as e:
23
+ raise Exception(f"Failed to load model {model_name}: {str(e)}")
24
+
25
+ # Load default model
26
+ try:
27
+ default_model = "GoalZero/aidetection-ada-v0.2"
28
+ model, tokenizer = load_model(default_model)
29
+ except Exception as e:
30
+ print(f"Error loading default model: {str(e)}")
31
 
 
32
  def classify_text(text, model_choice):
33
+ global model, tokenizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ try:
36
+ # Check if we need to change the model
37
+ if model is None or model_choice != model.name_or_path:
38
+ model, tokenizer = load_model(model_choice)
39
+
40
+ # Clean the input text
41
+ cleaned_text = text.replace('.', '').replace('\n', ' ')
42
+
43
+ # Tokenize the cleaned input text
44
+ inputs = tokenizer(
45
+ cleaned_text,
46
+ return_tensors='pt',
47
+ padding=True,
48
+ truncation=True,
49
+ max_length=128
50
+ )
51
+
52
+ # Get the model's prediction
53
+ with torch.no_grad():
54
+ outputs = model(**inputs)
55
+
56
+ # Apply softmax to get probabilities
57
+ probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
58
+
59
+ # Get the probability of class '1'
60
+ prob_1 = probabilities[0][1].item()
61
+
62
+ return {
63
+ "AI Probability": round(prob_1 * 100, 2),
64
+ "Model used": model_choice
65
+ }
66
+
67
+ except Exception as e:
68
+ return {
69
+ "error": f"An error occurred: {str(e)}",
70
+ "Model used": model_choice
71
+ }
72
 
73
  # Create the Gradio interface
74
  iface = gr.Interface(
75
  fn=classify_text,
76
  inputs=[
77
+ gr.Textbox(
78
+ lines=2,
79
+ placeholder="Enter text here...",
80
+ label="Input Text"
81
+ ),
82
+ gr.Dropdown(
83
+ choices=list(model_options.keys()),
84
+ value="GoalZero/aidetection-ada-v0.2",
85
+ label="Select Model Version"
86
+ )
87
  ],
88
+ outputs=gr.JSON(label="Results"),
89
+ title="GoalZero Ada AI Detection",
90
  description="Enter text to get the probability of it being AI-written. Select a model version to use.",
91
+ examples=[
92
+ ["WWII demonstrated the importance of alliances in global conflicts. The Axis and Allied powers were formed as countries sought to protect their interests and expand their influence. This lesson underscores the potential for future global conflicts to involve complex alliances, similar to the Cold War era’s NATO and Warsaw Pact alignments.", "GoalZero/aidetection-ada-v0.2"],
93
+ ["Eustace was a thorough gentleman. There was candor in his quack, and
94
+ affability in his waddle; and underneath his snowy down beat a pure and sympathetic heart. In short, he was a most exemplary duck.", "GoalZero/aidetection-ada-v0.1"]
95
+ ]
96
  )
97
 
98
  # Launch the app
99
  if __name__ == "__main__":
100
+ iface.launch(share=True)