Technocoloredgeek commited on
Commit
3aa6e0c
·
verified ·
1 Parent(s): 9d308e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -1
app.py CHANGED
@@ -8,6 +8,7 @@ from langchain.prompts import ChatPromptTemplate
8
  from langchain_core.output_parsers import StrOutputParser
9
  from langchain_core.runnables import RunnablePassthrough
10
  from qdrant_client import QdrantClient
 
11
 
12
  # Set up API keys
13
  os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
@@ -71,4 +72,43 @@ def create_rag_pipeline(vector_store):
71
  5. If the context doesn't contain sufficient information to fully answer the question, state this clearly and say,'I don't know'.
72
  6. Do not introduce any information not present in the context.
73
  7. If asked for an opinion or recommendation, base it strictly on insights from the context.
74
- 8. Use
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  from langchain_core.output_parsers import StrOutputParser
9
  from langchain_core.runnables import RunnablePassthrough
10
  from qdrant_client import QdrantClient
11
+ from operator import itemgetter
12
 
13
  # Set up API keys
14
  os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
 
72
  5. If the context doesn't contain sufficient information to fully answer the question, state this clearly and say,'I don't know'.
73
  6. Do not introduce any information not present in the context.
74
  7. If asked for an opinion or recommendation, base it strictly on insights from the context.
75
+ 8. Use a confident, authoritative tone while maintaining accuracy.
76
+ 9. If you cannot provide a clear answer to the question, reply with "I don't know".
77
+
78
+ Question:
79
+ {question}
80
+
81
+ Context:
82
+ {context}
83
+
84
+ Answer:
85
+ """
86
+
87
+ prompt = ChatPromptTemplate.from_template(template)
88
+ primary_qa_llm = ChatOpenAI(model_name="gpt-4", temperature=0)
89
+
90
+ retrieval_augmented_qa_chain = (
91
+ {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
92
+ | RunnablePassthrough.assign(context=itemgetter("context"))
93
+ | {"response": prompt | primary_qa_llm, "context": itemgetter("context")}
94
+ )
95
+
96
+ return retrieval_augmented_qa_chain
97
+
98
+ # Streamlit UI
99
+ st.title("Ask About AI Ethics!")
100
+
101
+ vector_store = setup_vectorstore()
102
+ rag_pipeline = create_rag_pipeline(vector_store)
103
+
104
+ user_query = st.text_input("Enter your question about AI Ethics:")
105
+
106
+ if user_query:
107
+ with st.spinner("Generating response..."):
108
+ result = rag_pipeline.invoke({"question": user_query})
109
+
110
+ st.write("Response:")
111
+ st.write(result["response"].content)
112
+
113
+ st.write("Context Used:")
114
+ st.write(result["context"])