File size: 3,299 Bytes
28552b3 5a1b2d8 b38937a 28552b3 b38937a 28552b3 dc28758 ecc0e05 28552b3 ecc0e05 28552b3 ecc0e05 28552b3 ecc0e05 28552b3 ecc0e05 e28f41a ecc0e05 28552b3 ecc0e05 28552b3 dc28758 5ab8f67 93d7736 5ab8f67 e28f41a 28552b3 5ab8f67 93d7736 28552b3 747d33d e28f41a 747d33d f4f4ef5 747d33d 3fd5c94 747d33d 3fd5c94 28552b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
from langchain_core.prompts import PromptTemplate
from langchain_huggingface import HuggingFacePipeline
from langchain.agents import create_react_agent, AgentExecutor, Tool
from langchain.memory import ConversationBufferMemory
# Mock lead data
LEADS = [
{"name": "John Doe", "email": "[email protected]", "company": "TechCorp"},
]
# Set up the open-source LLM
@st.cache_resource
def load_model():
model_name = "google/flan-t5-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
pipe = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=512
)
return HuggingFacePipeline(pipeline=pipe)
local_llm = load_model()
# Define the tools for the agent
def send_email(to_email, subject, body):
# For demo purposes, we'll just print the email details
st.write(f"Email sent to: {to_email}")
st.write(f"Subject: {subject}")
st.write(f"Body: {body}")
return "Email sent successfully"
tools = [
Tool(
name="Send Email",
func=send_email,
description="Useful for sending emails to leads"
)
]
# Define the prompt
prompt = PromptTemplate.from_template(
"""You are an AI CyberSecurity Program Advisor. Your goal is to engage with leads and get them to book a video call for an in-person sales meeting. You have access to a list of leads and can send emails.
You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: [Insert your final response here]
Begin!
Question: {input}
Thought: Let's approach this step-by-step:
{agent_scratchpad}"""
)
# Create the React agent
agent = create_react_agent(
llm=local_llm,
tools=tools,
prompt=prompt
)
# Create the agent executor
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
verbose=True,
memory=ConversationBufferMemory()
)
# Streamlit interface
st.title("AI CyberSecurity Program Advisor Demo")
st.write("This demo showcases an AI agent that can engage with leads and attempt to book video calls for sales meetings.")
# Start a conversation with a predefined lead
lead = LEADS[0]
initial_message = f"Hello {lead['name']}, I'd like to discuss our cybersecurity program with {lead['company']}. Are you available for a quick video call?"
if st.button("Start Conversation"):
with st.spinner("AI is generating a response..."):
response = agent_executor.invoke({"input": initial_message})
st.write("AI Response:")
st.write(response) # Display the full response dictionary
st.sidebar.title("About")
st.sidebar.info("This is a demo of an AI CyberSecurity Program Advisor using an open-source LLM and LangChain. It's designed to engage with leads and attempt to book video calls for sales meetings.")
|