Spaces:
Running
Running
import streamlit as st | |
from llama_cpp import Llama | |
st.set_page_config(page_title="Cybertron Chat Interface", layout="wide") | |
st.title("🧠 Cybertron Chat: Generalist vs Specialist Mode") | |
st.markdown(""" | |
Welcome to the Cybertron Chat Interface. Choose between: | |
- 🤖 **Generalist Mode** for creative, imaginative, multi-purpose reasoning | |
- 🛡️ **Specialist Mode** for precise, tactical cyber security and pentesting insights | |
""") | |
# Sidebar toggle for mode | |
model_choice = st.sidebar.radio("Choose Mode:", ["Generalist 🤖", "Specialist 🛡️"], help="Switch between general reasoning and focused cybersec models") | |
# Display model tips | |
if model_choice == "Generalist 🤖": | |
st.sidebar.markdown(""" | |
**Best for:** | |
- Creative writing | |
- Brainstorming tools or strategies | |
- Simulating conversations | |
""") | |
llm = Llama.from_pretrained( | |
repo_id="bartowski/cybertron-v4-qw7B-MGS-GGUF", | |
filename="cybertron-v4-qw7B-MGS-IQ2_M.gguf", | |
) | |
example_prompts = [ | |
"Simulate a hacker group planning a phishing campaign", | |
"Explain how to exploit a misconfigured NGINX server", | |
"Write a Python script that scrapes threat intel feeds" | |
] | |
else: | |
st.sidebar.markdown(""" | |
**Best for:** | |
- Penetration testing guidance | |
- Red/blue team planning | |
- Shell scripting and command-line tasks | |
""") | |
llm = Llama.from_pretrained( | |
repo_id="TheBloke/una-cybertron-7B-v2-GGUF", | |
filename="una-cybertron-7b-v2-bf16.Q2_K.gguf", | |
) | |
example_prompts = [ | |
"List enumeration commands for Active Directory", | |
"Simulate a post-exploitation persistence technique", | |
"Generate a Bash reverse shell with obfuscation" | |
] | |
# Prompt input and example selector | |
col1, col2 = st.columns([2, 1]) | |
with col1: | |
user_input = st.text_area("\U0001F4AC Enter your query below:", height=150) | |
with col2: | |
st.markdown("**\U0001F4D6 Prompt Examples:**") | |
selected_example = st.selectbox("Try an example:", ["-- Select an example --"] + example_prompts) | |
if selected_example != "-- Select an example --": | |
user_input = selected_example | |
# Run inference | |
if st.button("Submit", use_container_width=True): | |
with st.spinner("Generating response..."): | |
output = llm(user_input, max_tokens=512, echo=True) | |
st.markdown("---") | |
st.markdown("**\U0001F4C4 Response:**") | |
st.code(output["choices"][0]["text"].strip()) | |