email_ai / email_ai.py
prithvirajpawar's picture
changed intro_msg
46c47f0
# -*- coding: utf-8 -*-
# Import all the required Libraries
import os
from pathlib import Path
import pandas as pd
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.schema.runnable import RunnablePassthrough
# from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
import google.generativeai as genai
import os
from dotenv import load_dotenv
# import chromadb
# Load environment variables
load_dotenv()
gemini_api_key = os.getenv("GEMINI_API_KEY")
## load the GROQ And OpenAI API KEY
# gemini_api_key = open("gemini_api_key.txt", "r").read().strip()
intro_message = 'Hello! 😊 ask me questions about Enron emails. \n\nSample questions you can ask are \'What is the period of emails?\', \'Who are the people exchanging emails?\', \'What are the decisions made?\', \'What does Enron deals in?\' etc. \n\nYou can ask these questions by tapping and holding the mic.'
def initialize_conversation():
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash-latest", api_key=gemini_api_key)
embeddings_model = HuggingFaceEmbeddings()
# client = chromadb.PersistentClient()
# Connect to your ChromaDB instance
# client = chromadb.PersistentClient(path="chroma_langchain_db1")
#
# collection_names = client.list_collections()
# for name in collection_names:
# print(name)
# insurance_collection = client.get_collection(name='langchain', embedding_function=embeddings_model)
db = Chroma(persist_directory="chroma_langchain_db1", embedding_function=embeddings_model)
chroma_retriever = db.as_retriever(search_kwargs={"k":50})
return llm, chroma_retriever
# initialize_conversation()
# def get_template_prompt(query):
# return docs, prompt
#"""You are a email assistant answering queries on a knowledgebase fo email threads provided in the context:
template = """Answer the question based only on the following context:
{context}
provide citations consisting of date of email and from whom the email was sent.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
Question: {question}
"""
def get_chat_model_completions(llm, chroma_retriever, query):
# docs = chroma_retriever.get_relevant_documents(query) #, search_kwargs={"k": 3}) 'k' does not work here
prompt = ChatPromptTemplate.from_template(template)
chain = (
{"context": chroma_retriever, "question": RunnablePassthrough()}
| prompt
| llm
#| StrOutputParser()
)
# response = chain.invoke("Generate report on ideas for maintaining Enron's edge?")
# response = chain.invoke("what is disccussed in the emails?")
response = chain.invoke(query)
# response = chain.invoke("What are issues with the email system?")
# response = chain.invoke("What were the solutions proposed for email system issues?")
# response = chain.invoke("Was the issue with the emal system resolved and when?")
# print(response.content)
return response
# response=get_chat_model_completions(initialize_conversation(), query)
# print(response.response)
if __name__ == "__main__":
initialize_conversation()