import streamlit as st from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline from langchain_core.prompts import PromptTemplate from langchain_huggingface import HuggingFacePipeline from langchain.agents import create_react_agent, AgentExecutor, Tool from langchain.memory import ConversationBufferMemory # Mock lead data LEADS = [ {"name": "John Doe", "email": "john@example.com", "company": "TechCorp"}, {"name": "Jane Smith", "email": "jane@example.com", "company": "InnoSoft"}, {"name": "Bob Johnson", "email": "bob@example.com", "company": "DataTech"}, ] # Set up the open-source LLM @st.cache_resource def load_model(): model_name = "google/flan-t5-large" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) pipe = pipeline( "text2text-generation", model=model, tokenizer=tokenizer, max_length=512 ) return HuggingFacePipeline(pipeline=pipe) local_llm = load_model() # Define the tools for the agent def search_leads(query): results = [lead for lead in LEADS if query.lower() in lead['name'].lower()] return results def send_email(to_email, subject, body): # For demo purposes, we'll just print the email details st.write(f"Email sent to: {to_email}") st.write(f"Subject: {subject}") st.write(f"Body: {body}") return "Email sent successfully" tools = [ Tool( name="Search Leads", func=search_leads, description="Useful for searching leads by name" ), Tool( name="Send Email", func=send_email, description="Useful for sending emails to leads" ) ] # Set up the agent with a specified output key prompt = PromptTemplate.from_template( """You are an AI CyberSecurity Program Advisor. Your goal is to engage with leads and get them to book a video call for an in-person sales meeting. You have access to a list of leads and can send emails. You have access to the following tools: {tools} Use the following format: Question: the input question you must answer Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can repeat N times) Thought: I now know the final answer Final Answer: {final_answer} # Ensure this is defined as the output key Begin! Question: {input} Thought: Let's approach this step-by-step: {agent_scratchpad}""" ) agent = create_react_agent( local_llm, tools, prompt, output_key="final_answer" # Specify the output key here ) # Create the agent executor agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, tools=tools, verbose=True, memory=ConversationBufferMemory(), output_keys=["final_answer"] # Explicitly define the output key(s) here ) # Streamlit interface st.title("AI CyberSecurity Program Advisor Demo") st.write("This demo showcases an AI agent that can engage with leads and attempt to book video calls for in-person sales meetings.") lead_name = st.text_input("Enter a lead's name to engage with:") if lead_name: lead_info = search_leads(lead_name) if not lead_info: st.write(f"No lead found with the name {lead_name}") else: lead = lead_info[0] st.write(f"Lead found: {lead['name']} (Email: {lead['email']}, Company: {lead['company']})") initial_message = f"Hello {lead['name']}, I'd like to discuss our cybersecurity program with {lead['company']}. Are you available for a quick video call?" if st.button("Engage with Lead"): with st.spinner("AI is generating a response..."): response = agent_executor.run(initial_message) st.write("AI Response:") st.write(response) # Now 'response' will directly be the final answer st.sidebar.title("About") st.sidebar.info("This is a demo of an AI CyberSecurity Program Advisor using an open-source LLM and LangChain. It's designed to engage with leads and attempt to book video calls for sales meetings.") # To run this script, use: streamlit run your_script_name.py