import streamlit as st import pandas as pd import os import traceback from dotenv import load_dotenv from llama_index.readers.file.paged_csv.base import PagedCSVReader from llama_index.core import Settings, VectorStoreIndex from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding from llama_index.vector_stores.faiss import FaissVectorStore from llama_index.core.ingestion import IngestionPipeline from langchain_community.vectorstores import FAISS as LangChainFAISS from langchain_community.docstore.in_memory import InMemoryDocstore from langchain.chains import create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain from langchain_core.prompts import ChatPromptTemplate from langchain_openai import OpenAIEmbeddings, ChatOpenAI from langchain_core.documents import Document import faiss import tempfile # Load environment variables os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY") # Check OpenAI API Key if not os.getenv("OPENAI_API_KEY"): st.error("⚠️ OpenAI API Key is missing! Please check your .env file or environment variables.") # Global settings for LlamaIndex EMBED_DIMENSION = 512 Settings.llm = OpenAI(model="gpt-4o") Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small", dimensions=EMBED_DIMENSION) # Streamlit app st.title("Chat with CSV Files - LangChain vs LlamaIndex") # File uploader uploaded_file = st.file_uploader("Upload a CSV file", type=["csv"]) if uploaded_file: try: # Read and preview CSV data using pandas data = pd.read_csv(uploaded_file) st.write("Preview of uploaded data:") st.dataframe(data) # Save the uploaded file to a temporary location with tempfile.NamedTemporaryFile(delete=False, suffix=".csv", mode="w", encoding="utf-8") as temp_file: temp_file_path = temp_file.name data.to_csv(temp_file.name, index=False, encoding="utf-8") temp_file.flush() # Debugging: Verify the temporary file (Display partial content) st.write("Temporary file path:", temp_file_path) with open(temp_file_path, "r") as f: content = f.read() st.write("Partial file content (first 500 characters):") st.text(content[:500]) # Tabs for LangChain and LlamaIndex tab1, tab2 = st.tabs(["LangChain", "LlamaIndex"]) # ✅ LangChain Processing with tab1: st.subheader("LangChain Query") try: # ✅ Convert CSV rows into LangChain Document objects st.write("Processing CSV with a custom loader...") documents = [] for _, row in data.iterrows(): content = "\n".join([f"{col}: {row[col]}" for col in data.columns]) doc = Document(page_content=content) documents.append(doc) # Print a sample document if documents: st.write("Sample processed document (LangChain):") st.text(documents[0].page_content) # ✅ Create FAISS VectorStore langchain_index = faiss.IndexFlatL2(EMBED_DIMENSION) docstore = InMemoryDocstore() index_to_docstore_id = {} langchain_vector_store = LangChainFAISS( embedding_function=OpenAIEmbeddings(), index=langchain_index, docstore=docstore, index_to_docstore_id=index_to_docstore_id, ) # ✅ Add properly formatted documents to FAISS langchain_vector_store.add_documents(documents) st.write("Documents successfully added to FAISS VectorStore.") # ✅ Query Processing query = st.text_input("Ask a question about your data (LangChain):") if query: try: st.write("Processing your question...") answer = langchain_rag_chain.invoke({"input": query}) st.write(f"**Answer:** {answer['answer']}") except Exception as e: error_message = traceback.format_exc() st.error(f"Error processing query: {e}") st.text(error_message) except Exception as e: error_message = traceback.format_exc() st.error(f"Error processing with LangChain: {e}") st.text(error_message) except Exception as e: error_message = traceback.format_exc() st.error(f"Error reading uploaded file: {e}") st.text(error_message)