Spaces:
Sleeping
Sleeping
File size: 3,826 Bytes
6f7a50b 734f1f6 6f7a50b c22d873 6f7a50b 734f1f6 6f7a50b 139146c 6f7a50b 96f1251 5b1a096 ea4a3d3 5b1a096 139146c 5b1a096 6f7a50b 96f1251 5b1a096 ea4a3d3 5b1a096 139146c 5b1a096 ea4a3d3 5b1a096 6f7a50b ea4a3d3 6f7a50b 5b1a096 6f7a50b ea4a3d3 6f7a50b ea4a3d3 6f7a50b ea4a3d3 6f7a50b ea4a3d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import HuggingFaceHub
import langchain
langchain.verbose = False
from htmlTemplates import css, bot_template, user_template
from dotenv import load_dotenv
# Set the Streamlit page configuration and CSS styles
st.set_page_config(page_title="PDF Buddy", page_icon=":coffee:")
st.markdown(
"""
<style>
body {
background-color: #fce6ef;
}
</style>
""",
unsafe_allow_html=True
)
st.write(css, unsafe_allow_html=True)
st.header("PDF Buddy :coffee:")
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
@st.cache_resource
def load_embeddings():
model_name = "hkunlp/instructor-xl"
model_kwargs = {'device': 'cpu'}
embeddings = HuggingFaceInstructEmbeddings(
model_name=model_name, model_kwargs=model_kwargs)
return embeddings
embeddings = load_embeddings()
def get_vectorstore(text_chunks):
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
@st.cache_resource
def load_llm():
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":218})
return llm
# Load the model and store it as a global variable
llm = load_llm()
def get_conversation_chain(vectorstore):
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def main():
load_dotenv()
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
if __name__ == '__main__':
main() |