import streamlit as st
import torch
from rag import FinancialChatbot
import warnings
from transformers import logging
# Suppress warnings
warnings.filterwarnings("ignore")
logging.set_verbosity_error()
torch.classes.__path__ = []
# Use session state to persist the chatbot instance
if "chatbot" not in st.session_state:
st.session_state.chatbot = FinancialChatbot()
def fetch_answer_from_backend(query):
"""Calls the backend function to get an answer."""
return st.session_state.chatbot.get_answer(query) # Use session state chatbot instance
# Initialize Session State for Chat History
if "chat_history" not in st.session_state:
st.session_state.chat_history = []
if "loading" not in st.session_state:
st.session_state.loading = False
# Layout and Title
st.title("Financial RAG Chat Assistant")
# st.markdown(
# """
#
# """,
# unsafe_allow_html=True
# )
# Display Chat History
for chat in st.session_state.chat_history:
# User's query on the right
st.markdown(
f"""
""",
unsafe_allow_html=True
)
# Assistant's label and response on the left
if chat["answer"] is not None:
st.markdown(
f"""
Assistant
{chat['answer']}
""",
unsafe_allow_html=True
)
# Confidence Score (Below the answer)
if chat["confidence"] is not None:
st.markdown(
f"Confidence: {chat['confidence'] * 100}%
",
unsafe_allow_html=True
)
st.divider() # Adds a visual divider between Q&A pairs
# User Input (Always at the Bottom)
user_input = st.chat_input("Ask a financial question...")
# If user inputs a question
if user_input:
# Add question to chat history and show loading animation
st.session_state.chat_history.append({
"question": user_input,
"answer": None, # Placeholder for the answer
"confidence": None # Placeholder for the confidence score
})
st.session_state.loading = True
st.rerun() # Refresh to display the question immediately
# If loading, simulate the API call and update the last question's answer
if st.session_state.loading:
with st.spinner("Fetching answer..."):
# Get the last question
last_question = st.session_state.chat_history[-1]["question"]
# API Call
answer, confidence = fetch_answer_from_backend(last_question)
# Update the last chat history item with the answer and confidence
st.session_state.chat_history[-1]["answer"] = answer
st.session_state.chat_history[-1]["confidence"] = confidence
# Stop loading and refresh to show the answer
st.session_state.loading = False
st.rerun()