from langchain_openai.embeddings import OpenAIEmbeddings from langchain_core.globals import set_llm_cache from langchain_openai import ChatOpenAI from langchain_core.caches import InMemoryCache from langchain.storage import LocalFileStore from langchain.embeddings import CacheBackedEmbeddings from dotenv import load_dotenv load_dotenv() core_embeddings = OpenAIEmbeddings(model="text-embedding-3-small") # Adding cache! store = LocalFileStore("./cache/") cached_embedder = CacheBackedEmbeddings.from_bytes_store( core_embeddings, store, namespace=core_embeddings.model ) chat_model = ChatOpenAI(model="gpt-4o") set_llm_cache(InMemoryCache())