Spaces:
Sleeping
Sleeping
import os | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.vectorstores import Chroma | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from google.colab import userdata | |
class GeminiLLM(): | |
def __init__(self): | |
self.ACCESS_TOKEN = os.getenv('GOOGLE_GEMINI_TOKEN') | |
self.model_name = "gemini-pro" | |
def getEmbeddingsModel(self): | |
self.embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") | |
return self.embeddings | |
def getRetriver(self, documents ): | |
vectorstore = Chroma.from_documents( | |
documents = documents, | |
embedding = self.embeddings, | |
persist_directory = "chroma_db_dir", # Local mode with in-memory storage only | |
collection_name="sermon_lab_ai" | |
) | |
retriever = vectorstore.as_retriever( | |
search_kwargs={"k": 3} | |
) | |
return (retriever, vectorstore) | |
def getLLM(self, documents ): | |
if os.getenv('GOOGLE_GEMINI_TOKEN') is None: | |
raise ValueError("GOOGLE_GEMINI_TOKEN environment variable not set") | |
else: | |
self.llm = ChatGoogleGenerativeAI( | |
model = self.model_name, | |
temperature = 0.7, | |
top_k = 40, | |
top_p = 1 | |
) | |
return self.llm |