menikev's picture
Update app.py
747d33d verified
raw
history blame
3.31 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
from langchain_core.prompts import PromptTemplate
from langchain_huggingface import HuggingFacePipeline
from langchain.agents import create_react_agent, AgentExecutor, Tool
from langchain.memory import ConversationBufferMemory
# Mock lead data
LEADS = [
{"name": "John Doe", "email": "[email protected]", "company": "TechCorp"},
]
# Set up the open-source LLM
@st.cache_resource
def load_model():
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
pipe = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=512
)
return HuggingFacePipeline(pipeline=pipe)
local_llm = load_model()
# Define the tools for the agent
def send_email(to_email, subject, body):
# For demo purposes, we'll just print the email details
st.write(f"Email sent to: {to_email}")
st.write(f"Subject: {subject}")
st.write(f"Body: {body}")
return "Email sent successfully"
tools = [
Tool(
name="Send Email",
func=send_email,
description="Useful for sending emails to leads"
)
]
# Define the prompt
prompt = PromptTemplate.from_template(
"""You are an AI CyberSecurity Program Advisor. Your goal is to engage with leads and get them to book a video call for an in-person sales meeting. You have access to a list of leads and can send emails.
You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: [Insert your final response here]
Begin!
Question: {input}
Thought: Let's approach this step-by-step:
{agent_scratchpad}"""
)
# Create the React agent
agent = create_react_agent(
llm=local_llm,
tools=tools,
prompt=prompt
)
# Create the agent executor
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
verbose=True,
memory=ConversationBufferMemory()
)
# Streamlit interface
st.title("AI CyberSecurity Program Advisor Demo")
st.write("This demo showcases an AI agent that can engage with leads and attempt to book video calls for sales meetings.")
# Start a conversation with a predefined lead
lead = LEADS[0]
initial_message = f"Hello {lead['name']}, I'd like to discuss our cybersecurity program with {lead['company']}. Are you available for a quick video call?"
if st.button("Start Conversation"):
with st.spinner("AI is generating a response..."):
response = agent_executor({"input": initial_message})
st.write("AI Response:")
st.write(response["output"]) # Adjust this based on the actual output key
st.sidebar.title("About")
st.sidebar.info("This is a demo of an AI CyberSecurity Program Advisor using an open-source LLM and LangChain. It's designed to engage with leads and attempt to book video calls for sales meetings.")