Tamil Eniyan
commited on
Commit
·
39d1fa0
1
Parent(s):
ab64022
Updated app to use fire base
Browse files
app.py
CHANGED
@@ -1,32 +1,11 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
import streamlit as st
|
4 |
-
import firebase_admin
|
5 |
-
from firebase_admin import credentials, firestore
|
6 |
import faiss
|
7 |
import numpy as np
|
8 |
import pickle
|
|
|
|
|
9 |
from sentence_transformers import SentenceTransformer
|
10 |
-
from transformers import pipeline
|
11 |
-
|
12 |
-
# -----------------------------
|
13 |
-
# Firebase Initialization
|
14 |
-
# -----------------------------
|
15 |
-
@st.cache_resource
|
16 |
-
def init_firestore():
|
17 |
-
# Load the service account JSON from the environment variable
|
18 |
-
firebase_creds_json = os.environ.get("FIREBASE_SERVICE_ACCOUNT")
|
19 |
-
if not firebase_creds_json:
|
20 |
-
st.error("Firebase service account credentials not found in environment variables!")
|
21 |
-
return None
|
22 |
-
service_account_info = json.loads(firebase_creds_json)
|
23 |
-
cred = credentials.Certificate(service_account_info)
|
24 |
-
# Initialize the Firebase app only once
|
25 |
-
if not firebase_admin._apps:
|
26 |
-
firebase_admin.initialize_app(cred)
|
27 |
-
return firestore.client()
|
28 |
-
|
29 |
-
db = init_firestore()
|
30 |
|
31 |
# ========================
|
32 |
# File Names & Model Names
|
@@ -65,21 +44,22 @@ def load_curated_qa_pairs():
|
|
65 |
try:
|
66 |
with open(CURATED_QA_FILE, "r", encoding="utf-8") as f:
|
67 |
return json.load(f)
|
68 |
-
except
|
69 |
-
st.error(f"Error loading curated Q/A pairs: {e}")
|
70 |
return []
|
71 |
|
72 |
# ========================================
|
73 |
# Chatbot Interface & Conversation Handling
|
74 |
# ========================================
|
|
|
75 |
def display_conversation():
|
76 |
"""Displays conversation history in a structured chat format."""
|
77 |
-
for
|
|
|
78 |
with st.chat_message(role):
|
79 |
st.write(message)
|
80 |
|
81 |
def add_to_conversation(role, message):
|
82 |
-
"""Adds a message to
|
83 |
st.session_state.conversation_history.append((role, message))
|
84 |
|
85 |
# Initialize conversation history
|
@@ -104,25 +84,15 @@ user_query = st.chat_input("Ask a question about the document...")
|
|
104 |
if user_query:
|
105 |
add_to_conversation("user", user_query)
|
106 |
|
107 |
-
# Check for
|
108 |
answer = None
|
109 |
for pair in curated_qa_pairs:
|
110 |
if user_query.lower() in pair["question"].lower():
|
111 |
answer = pair["answer"]
|
112 |
break
|
113 |
-
|
114 |
-
# If no curated answer is found, save the question to Firebase and process it.
|
115 |
if not answer:
|
116 |
-
|
117 |
-
if db is not None:
|
118 |
-
db.collection("llmquestions").add({
|
119 |
-
"question": user_query,
|
120 |
-
"timestamp": firestore.SERVER_TIMESTAMP
|
121 |
-
})
|
122 |
-
except Exception as e:
|
123 |
-
st.error(f"Error saving question to Firebase: {e}")
|
124 |
-
|
125 |
-
# Retrieve relevant context from FAISS index
|
126 |
query_embedding = embed_model.encode([user_query]).astype("float32")
|
127 |
distances, indices = index.search(query_embedding, 3)
|
128 |
pdf_context = "\n".join(chunks[idx] for idx in indices[0])
|
@@ -132,4 +102,4 @@ if user_query:
|
|
132 |
answer = response.get("answer", "I couldn't find an answer to that.")
|
133 |
|
134 |
add_to_conversation("assistant", answer)
|
135 |
-
st.rerun()
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
|
|
2 |
import faiss
|
3 |
import numpy as np
|
4 |
import pickle
|
5 |
+
import json
|
6 |
+
import torch
|
7 |
from sentence_transformers import SentenceTransformer
|
8 |
+
from transformers import pipeline, RagTokenizer, RagRetriever, RagSequenceForGeneration
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# ========================
|
11 |
# File Names & Model Names
|
|
|
44 |
try:
|
45 |
with open(CURATED_QA_FILE, "r", encoding="utf-8") as f:
|
46 |
return json.load(f)
|
47 |
+
except:
|
|
|
48 |
return []
|
49 |
|
50 |
# ========================================
|
51 |
# Chatbot Interface & Conversation Handling
|
52 |
# ========================================
|
53 |
+
|
54 |
def display_conversation():
|
55 |
"""Displays conversation history in a structured chat format."""
|
56 |
+
for entry in st.session_state.conversation_history:
|
57 |
+
role, message = entry
|
58 |
with st.chat_message(role):
|
59 |
st.write(message)
|
60 |
|
61 |
def add_to_conversation(role, message):
|
62 |
+
"""Adds a message to conversation history."""
|
63 |
st.session_state.conversation_history.append((role, message))
|
64 |
|
65 |
# Initialize conversation history
|
|
|
84 |
if user_query:
|
85 |
add_to_conversation("user", user_query)
|
86 |
|
87 |
+
# Check for curated Q/A pair
|
88 |
answer = None
|
89 |
for pair in curated_qa_pairs:
|
90 |
if user_query.lower() in pair["question"].lower():
|
91 |
answer = pair["answer"]
|
92 |
break
|
93 |
+
|
|
|
94 |
if not answer:
|
95 |
+
# Retrieve relevant context
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
query_embedding = embed_model.encode([user_query]).astype("float32")
|
97 |
distances, indices = index.search(query_embedding, 3)
|
98 |
pdf_context = "\n".join(chunks[idx] for idx in indices[0])
|
|
|
102 |
answer = response.get("answer", "I couldn't find an answer to that.")
|
103 |
|
104 |
add_to_conversation("assistant", answer)
|
105 |
+
st.rerun()
|