Canstralian commited on
Commit
9bc591d
·
verified ·
1 Parent(s): b38a095

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -36
app.py CHANGED
@@ -2,14 +2,6 @@ import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM
3
  import torch
4
 
5
- # Sidebar for user input
6
- st.sidebar.header("Model Configuration")
7
- model_choice = st.sidebar.selectbox("Select a model", [
8
- "CyberAttackDetection",
9
- "text2shellcommands",
10
- "pentest_ai"
11
- ])
12
-
13
  # Define the model names
14
  model_mapping = {
15
  "CyberAttackDetection": "Canstralian/CyberAttackDetection",
@@ -17,10 +9,6 @@ model_mapping = {
17
  "pentest_ai": "Canstralian/pentest_ai"
18
  }
19
 
20
- model_name = model_mapping.get(model_choice, "Canstralian/CyberAttackDetection")
21
-
22
- # Load model and tokenizer on demand
23
- @st.cache_resource
24
  def load_model(model_name):
25
  try:
26
  # Fallback to a known model for debugging
@@ -39,32 +27,48 @@ def load_model(model_name):
39
  st.error(f"Error loading model: {e}")
40
  return None, None
41
 
42
- # Load the model and tokenizer
43
- tokenizer, model = load_model(model_name)
44
-
45
- # Input text box in the main panel
46
- st.title(f"{model_choice} Model")
47
- user_input = st.text_area("Enter text:")
48
 
49
- # Make prediction if user input is provided
50
- if user_input and model and tokenizer:
51
- if model_choice == "text2shellcommands":
52
- # For text2shellcommands model, generate shell commands
53
- inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True)
54
- with torch.no_grad():
55
- outputs = model.generate(**inputs)
56
- generated_command = tokenizer.decode(outputs[0], skip_special_tokens=True)
57
- st.write(f"Generated Shell Command: {generated_command}")
58
-
59
- else:
60
- # For CyberAttackDetection and pentest_ai models, perform classification
61
  inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True)
62
  with torch.no_grad():
63
  outputs = model(**inputs)
64
- logits = outputs.logits
65
- predicted_class = torch.argmax(logits, dim=-1).item()
66
- st.write(f"Predicted Class: {predicted_class}")
67
- st.write(f"Logits: {logits}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- else:
70
- st.info("Please enter some text for prediction.")
 
2
  from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForSeq2SeqLM
3
  import torch
4
 
 
 
 
 
 
 
 
 
5
  # Define the model names
6
  model_mapping = {
7
  "CyberAttackDetection": "Canstralian/CyberAttackDetection",
 
9
  "pentest_ai": "Canstralian/pentest_ai"
10
  }
11
 
 
 
 
 
12
  def load_model(model_name):
13
  try:
14
  # Fallback to a known model for debugging
 
27
  st.error(f"Error loading model: {e}")
28
  return None, None
29
 
30
+ def validate_input(user_input):
31
+ if not user_input:
32
+ st.error("Please enter some text for prediction.")
33
+ return False
34
+ return True
 
35
 
36
+ def make_prediction(model, tokenizer, user_input):
37
+ try:
 
 
 
 
 
 
 
 
 
 
38
  inputs = tokenizer(user_input, return_tensors="pt", padding=True, truncation=True)
39
  with torch.no_grad():
40
  outputs = model(**inputs)
41
+ return outputs
42
+ except Exception as e:
43
+ st.error(f"Error making prediction: {e}")
44
+ return None
45
+
46
+ def main():
47
+ st.sidebar.header("Model Configuration")
48
+ model_choice = st.sidebar.selectbox("Select a model", [
49
+ "CyberAttackDetection",
50
+ "text2shellcommands",
51
+ "pentest_ai"
52
+ ])
53
+
54
+ model_name = model_mapping.get(model_choice, "Canstralian/CyberAttackDetection")
55
+
56
+ tokenizer, model = load_model(model_name)
57
+
58
+ st.title(f"{model_choice} Model")
59
+ user_input = st.text_area("Enter text:")
60
+
61
+ if validate_input(user_input) and model is not None and tokenizer is not None:
62
+ outputs = make_prediction(model, tokenizer, user_input)
63
+ if outputs is not None:
64
+ if model_choice == "text2shellcommands":
65
+ generated_command = tokenizer.decode(outputs[0], skip_special_tokens=True)
66
+ st.write(f"Generated Shell Command: {generated_command}")
67
+ else:
68
+ logits = outputs.logits
69
+ predicted_class = torch.argmax(logits, dim=-1).item()
70
+ st.write(f"Predicted Class: {predicted_class}")
71
+ st.write(f"Logits: {logits}")
72
 
73
+ if __name__ == "__main__":
74
+ main()