File size: 2,445 Bytes
44feadc
 
 
43350c0
 
 
 
 
 
 
 
 
 
 
44feadc
43350c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44feadc
43350c0
 
44feadc
43350c0
 
 
 
 
 
 
44feadc
43350c0
 
44feadc
43350c0
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import streamlit as st
from llama_cpp import Llama

st.set_page_config(page_title="Cybertron Chat Interface", layout="wide")
st.title("🧠 Cybertron Chat: Generalist vs Specialist Mode")
st.markdown("""
Welcome to the Cybertron Chat Interface. Choose between:

- 🤖 **Generalist Mode** for creative, imaginative, multi-purpose reasoning
- 🛡️ **Specialist Mode** for precise, tactical cyber security and pentesting insights
""")

# Sidebar toggle for mode
model_choice = st.sidebar.radio("Choose Mode:", ["Generalist 🤖", "Specialist 🛡️"], help="Switch between general reasoning and focused cybersec models")

# Display model tips
if model_choice == "Generalist 🤖":
    st.sidebar.markdown("""
    **Best for:**
    - Creative writing
    - Brainstorming tools or strategies
    - Simulating conversations
    """)
    llm = Llama.from_pretrained(
        repo_id="bartowski/cybertron-v4-qw7B-MGS-GGUF",
        filename="cybertron-v4-qw7B-MGS-IQ2_M.gguf",
    )
    example_prompts = [
        "Simulate a hacker group planning a phishing campaign",
        "Explain how to exploit a misconfigured NGINX server",
        "Write a Python script that scrapes threat intel feeds"
    ]
else:
    st.sidebar.markdown("""
    **Best for:**
    - Penetration testing guidance
    - Red/blue team planning
    - Shell scripting and command-line tasks
    """)
    llm = Llama.from_pretrained(
        repo_id="TheBloke/una-cybertron-7B-v2-GGUF",
        filename="una-cybertron-7b-v2-bf16.Q2_K.gguf",
    )
    example_prompts = [
        "List enumeration commands for Active Directory",
        "Simulate a post-exploitation persistence technique",
        "Generate a Bash reverse shell with obfuscation"
    ]

# Prompt input and example selector
col1, col2 = st.columns([2, 1])

with col1:
    user_input = st.text_area("\U0001F4AC Enter your query below:", height=150)
with col2:
    st.markdown("**\U0001F4D6 Prompt Examples:**")
    selected_example = st.selectbox("Try an example:", ["-- Select an example --"] + example_prompts)
    if selected_example != "-- Select an example --":
        user_input = selected_example

# Run inference
if st.button("Submit", use_container_width=True):
    with st.spinner("Generating response..."):
        output = llm(user_input, max_tokens=512, echo=True)
        st.markdown("---")
        st.markdown("**\U0001F4C4 Response:**")
        st.code(output["choices"][0]["text"].strip())