from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain_pinecone import PineconeVectorStore from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings from pinecone import Pinecone import asyncio from langchain_community.document_loaders.sitemap import SitemapLoader def get_website_data(sitemap_url): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loader = SitemapLoader( sitemap_url ) docs = loader.load() return docs def split_data(docs): text_splitter = RecursiveCharacterTextSplitter( chunk_size = 1000, chunk_overlap = 200, length_function = len, ) docs_chunks = text_splitter.split_documents(docs) return docs_chunks def create_embeddings(): embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") return embeddings def push_to_pinecone(pinecone_apikey, pinecone_index_name, embeddings, docs): # Initialize Pinecone with v3 client pc = Pinecone(api_key=pinecone_apikey) # Check if index exists, if not create it existing_indexes = [index_info["name"] for index_info in pc.list_indexes()] if pinecone_index_name not in existing_indexes: # You may need to adjust dimension based on your embedding model pc.create_index( name=pinecone_index_name, dimension=384, # dimension for all-MiniLM-L6-v2 metric="cosine" ) # Get the index index = pc.Index(pinecone_index_name) # Create vector store vector_store = PineconeVectorStore(index=index, embedding=embeddings) # Add documents vector_store.add_documents(documents=docs) return vector_store def pull_from_pinecone(pinecone_apikey, pinecone_index_name, embeddings): # Initialize Pinecone with v3 client pc = Pinecone(api_key=pinecone_apikey) # Get the index index = pc.Index(pinecone_index_name) # Create vector store from existing index vector_store = PineconeVectorStore(index=index, embedding=embeddings) return vector_store def get_similar_docs(vector_store, query, k=2): similar_docs = vector_store.similarity_search(query, k=k) return similar_docs