S-Dreamer commited on
Commit
43350c0
·
verified ·
1 Parent(s): 30f1a7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -18
app.py CHANGED
@@ -1,26 +1,67 @@
1
  import streamlit as st
2
  from llama_cpp import Llama
3
 
4
- @st.cache_resource
5
- def load_model():
6
- return Llama(
7
- model_path="cybertron-v4-qw7B-MGS-IQ2_M.gguf",
8
- n_ctx=2048,
9
- n_threads=8,
10
- n_gpu_layers=20
11
- )
 
 
 
12
 
13
- llm = load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
- st.title("Cybertron Chat")
 
16
 
17
- prompt = st.text_input("Ask a question:")
 
 
 
 
 
 
18
 
19
- if prompt:
 
20
  with st.spinner("Generating response..."):
21
- response = llm.create_chat_completion(
22
- messages=[{"role": "user", "content": prompt}],
23
- temperature=0.7,
24
- max_tokens=256
25
- )
26
- st.write(response["choices"][0]["message"]["content"])
 
1
  import streamlit as st
2
  from llama_cpp import Llama
3
 
4
+ st.set_page_config(page_title="Cybertron Chat Interface", layout="wide")
5
+ st.title("🧠 Cybertron Chat: Generalist vs Specialist Mode")
6
+ st.markdown("""
7
+ Welcome to the Cybertron Chat Interface. Choose between:
8
+
9
+ - 🤖 **Generalist Mode** for creative, imaginative, multi-purpose reasoning
10
+ - 🛡️ **Specialist Mode** for precise, tactical cyber security and pentesting insights
11
+ """)
12
+
13
+ # Sidebar toggle for mode
14
+ model_choice = st.sidebar.radio("Choose Mode:", ["Generalist 🤖", "Specialist 🛡️"], help="Switch between general reasoning and focused cybersec models")
15
 
16
+ # Display model tips
17
+ if model_choice == "Generalist 🤖":
18
+ st.sidebar.markdown("""
19
+ **Best for:**
20
+ - Creative writing
21
+ - Brainstorming tools or strategies
22
+ - Simulating conversations
23
+ """)
24
+ llm = Llama.from_pretrained(
25
+ repo_id="bartowski/cybertron-v4-qw7B-MGS-GGUF",
26
+ filename="cybertron-v4-qw7B-MGS-IQ2_M.gguf",
27
+ )
28
+ example_prompts = [
29
+ "Simulate a hacker group planning a phishing campaign",
30
+ "Explain how to exploit a misconfigured NGINX server",
31
+ "Write a Python script that scrapes threat intel feeds"
32
+ ]
33
+ else:
34
+ st.sidebar.markdown("""
35
+ **Best for:**
36
+ - Penetration testing guidance
37
+ - Red/blue team planning
38
+ - Shell scripting and command-line tasks
39
+ """)
40
+ llm = Llama.from_pretrained(
41
+ repo_id="TheBloke/una-cybertron-7B-v2-GGUF",
42
+ filename="una-cybertron-7b-v2-bf16.Q2_K.gguf",
43
+ )
44
+ example_prompts = [
45
+ "List enumeration commands for Active Directory",
46
+ "Simulate a post-exploitation persistence technique",
47
+ "Generate a Bash reverse shell with obfuscation"
48
+ ]
49
 
50
+ # Prompt input and example selector
51
+ col1, col2 = st.columns([2, 1])
52
 
53
+ with col1:
54
+ user_input = st.text_area("\U0001F4AC Enter your query below:", height=150)
55
+ with col2:
56
+ st.markdown("**\U0001F4D6 Prompt Examples:**")
57
+ selected_example = st.selectbox("Try an example:", ["-- Select an example --"] + example_prompts)
58
+ if selected_example != "-- Select an example --":
59
+ user_input = selected_example
60
 
61
+ # Run inference
62
+ if st.button("Submit", use_container_width=True):
63
  with st.spinner("Generating response..."):
64
+ output = llm(user_input, max_tokens=512, echo=True)
65
+ st.markdown("---")
66
+ st.markdown("**\U0001F4C4 Response:**")
67
+ st.code(output["choices"][0]["text"].strip())