|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain_pinecone import PineconeVectorStore |
|
from langchain_community.embeddings.sentence_transformer import SentenceTransformerEmbeddings |
|
from pinecone import Pinecone |
|
import asyncio |
|
from langchain_community.document_loaders.sitemap import SitemapLoader |
|
|
|
def get_website_data(sitemap_url): |
|
loop = asyncio.new_event_loop() |
|
asyncio.set_event_loop(loop) |
|
loader = SitemapLoader( |
|
sitemap_url |
|
) |
|
docs = loader.load() |
|
return docs |
|
|
|
def split_data(docs): |
|
text_splitter = RecursiveCharacterTextSplitter( |
|
chunk_size = 1000, |
|
chunk_overlap = 200, |
|
length_function = len, |
|
) |
|
docs_chunks = text_splitter.split_documents(docs) |
|
return docs_chunks |
|
|
|
def create_embeddings(): |
|
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2") |
|
return embeddings |
|
|
|
def push_to_pinecone(pinecone_apikey, pinecone_index_name, embeddings, docs): |
|
|
|
pc = Pinecone(api_key=pinecone_apikey) |
|
|
|
|
|
existing_indexes = [index_info["name"] for index_info in pc.list_indexes()] |
|
|
|
if pinecone_index_name not in existing_indexes: |
|
|
|
pc.create_index( |
|
name=pinecone_index_name, |
|
dimension=384, |
|
metric="cosine" |
|
) |
|
|
|
|
|
index = pc.Index(pinecone_index_name) |
|
|
|
|
|
vector_store = PineconeVectorStore(index=index, embedding=embeddings) |
|
|
|
|
|
vector_store.add_documents(documents=docs) |
|
|
|
return vector_store |
|
|
|
def pull_from_pinecone(pinecone_apikey, pinecone_index_name, embeddings): |
|
|
|
pc = Pinecone(api_key=pinecone_apikey) |
|
|
|
|
|
index = pc.Index(pinecone_index_name) |
|
|
|
|
|
vector_store = PineconeVectorStore(index=index, embedding=embeddings) |
|
|
|
return vector_store |
|
|
|
def get_similar_docs(vector_store, query, k=2): |
|
similar_docs = vector_store.similarity_search(query, k=k) |
|
return similar_docs |