Technocoloredgeek commited on
Commit
e496e5b
·
verified ·
1 Parent(s): 82dcbba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -12
app.py CHANGED
@@ -5,8 +5,8 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
6
  from langchain_community.vectorstores import Qdrant
7
  from langchain.prompts import ChatPromptTemplate
8
- from langchain_core.output_parsers import StrOutputParser
9
- from langchain_core.runnables import RunnablePassthrough
10
  from qdrant_client import QdrantClient
11
  from operator import itemgetter
12
 
@@ -19,7 +19,6 @@ pdf_links = [
19
  "https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf"
20
  ]
21
 
22
- # Load and process PDFs
23
  @st.cache_resource
24
  def load_and_process_pdfs(pdf_links):
25
  documents = []
@@ -36,7 +35,6 @@ def load_and_process_pdfs(pdf_links):
36
 
37
  return text_splitter.split_documents(documents)
38
 
39
- # Set up Qdrant and embeddings
40
  @st.cache_resource
41
  def setup_vectorstore():
42
  qdrant_client = QdrantClient(":memory:")
@@ -49,15 +47,11 @@ def setup_vectorstore():
49
  embeddings=embeddings
50
  )
51
 
52
- # Check if collection exists, if not, create and populate it
53
- collections = qdrant_client.get_collections().collections
54
- if not any(collection.name == COLLECTION_NAME for collection in collections):
55
- documents = load_and_process_pdfs(pdf_links)
56
- vector_store.add_documents(documents)
57
 
58
  return vector_store
59
 
60
- # Create RAG pipeline
61
  @st.cache_resource
62
  def create_rag_pipeline(vector_store):
63
  retriever = vector_store.as_retriever()
@@ -85,7 +79,7 @@ def create_rag_pipeline(vector_store):
85
  """
86
 
87
  prompt = ChatPromptTemplate.from_template(template)
88
- primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0)
89
 
90
  retrieval_augmented_qa_chain = (
91
  {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
@@ -95,7 +89,6 @@ def create_rag_pipeline(vector_store):
95
 
96
  return retrieval_augmented_qa_chain
97
 
98
- # Streamlit UI
99
  st.title("Ask About AI Ethics!")
100
 
101
  vector_store = setup_vectorstore()
 
5
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
6
  from langchain_community.vectorstores import Qdrant
7
  from langchain.prompts import ChatPromptTemplate
8
+ from langchain.schema.output_parser import StrOutputParser
9
+ from langchain.schema.runnable import RunnablePassthrough
10
  from qdrant_client import QdrantClient
11
  from operator import itemgetter
12
 
 
19
  "https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf"
20
  ]
21
 
 
22
  @st.cache_resource
23
  def load_and_process_pdfs(pdf_links):
24
  documents = []
 
35
 
36
  return text_splitter.split_documents(documents)
37
 
 
38
  @st.cache_resource
39
  def setup_vectorstore():
40
  qdrant_client = QdrantClient(":memory:")
 
47
  embeddings=embeddings
48
  )
49
 
50
+ documents = load_and_process_pdfs(pdf_links)
51
+ vector_store.add_documents(documents)
 
 
 
52
 
53
  return vector_store
54
 
 
55
  @st.cache_resource
56
  def create_rag_pipeline(vector_store):
57
  retriever = vector_store.as_retriever()
 
79
  """
80
 
81
  prompt = ChatPromptTemplate.from_template(template)
82
+ primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0) # Note: Changed from gpt-4o-mini to gpt-4
83
 
84
  retrieval_augmented_qa_chain = (
85
  {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
 
89
 
90
  return retrieval_augmented_qa_chain
91
 
 
92
  st.title("Ask About AI Ethics!")
93
 
94
  vector_store = setup_vectorstore()