Phoenix21 commited on
Commit
5cda1d3
·
verified ·
1 Parent(s): 739f62e

Update pipeline.py

Browse files
Files changed (1) hide show
  1. pipeline.py +5 -4
pipeline.py CHANGED
@@ -27,13 +27,13 @@ from langchain.chains import RetrievalQA, LLMChain
27
  from langchain.prompts import PromptTemplate
28
  from langchain.docstore.document import Document
29
  from langchain_core.caches import BaseCache
30
- from langchain_core.callbacks import CallbackManager
31
  # from langchain.callbacks.base import BaseCallbacks # Updated import
32
  # from langchain.callbacks.manager import CallbackManager
33
  # from langchain.callbacks import StdOutCallbackHandler
34
 
35
  # Custom chain imports
36
- from groq_client import GroqClient
37
  from classification_chain import get_classification_chain
38
  from refusal_chain import get_refusal_chain
39
  from tailor_chain import get_tailor_chain
@@ -208,13 +208,15 @@ class NoCache(BaseCache):
208
  def clear(self):
209
  pass
210
 
 
 
211
  # Initialize ChatGroq with cache
212
  try:
213
  fallback_groq_api_key = os.environ.get("GROQ_API_KEY_FALLBACK", os.environ.get("GROQ_API_KEY"))
214
  if not fallback_groq_api_key:
215
  logger.warning("No Groq API key found for fallback LLM")
216
  groq_fallback_llm = ChatGroq(
217
- model=GROQ_MODELS["default"],
218
  temperature=0.7,
219
  groq_api_key=fallback_groq_api_key,
220
  max_tokens=2048,
@@ -223,7 +225,6 @@ try:
223
  except Exception as e:
224
  logger.error(f"Failed to initialize fallback Groq LLM: {e}")
225
  raise RuntimeError("ChatGroq initialization failed.") from e
226
-
227
  # -------------------------------------------------------
228
  # Rate-limit & Cache
229
  # -------------------------------------------------------
 
27
  from langchain.prompts import PromptTemplate
28
  from langchain.docstore.document import Document
29
  from langchain_core.caches import BaseCache
30
+ # from langchain_core.callbacks import CallbackManager
31
  # from langchain.callbacks.base import BaseCallbacks # Updated import
32
  # from langchain.callbacks.manager import CallbackManager
33
  # from langchain.callbacks import StdOutCallbackHandler
34
 
35
  # Custom chain imports
36
+ # from groq_client import GroqClient
37
  from classification_chain import get_classification_chain
38
  from refusal_chain import get_refusal_chain
39
  from tailor_chain import get_tailor_chain
 
208
  def clear(self):
209
  pass
210
 
211
+ # Rebuild the ChatGroq model after defining NoCache
212
+ ChatGroq.model_rebuild()
213
  # Initialize ChatGroq with cache
214
  try:
215
  fallback_groq_api_key = os.environ.get("GROQ_API_KEY_FALLBACK", os.environ.get("GROQ_API_KEY"))
216
  if not fallback_groq_api_key:
217
  logger.warning("No Groq API key found for fallback LLM")
218
  groq_fallback_llm = ChatGroq(
219
+ model="default", # Replace with your actual model name if different
220
  temperature=0.7,
221
  groq_api_key=fallback_groq_api_key,
222
  max_tokens=2048,
 
225
  except Exception as e:
226
  logger.error(f"Failed to initialize fallback Groq LLM: {e}")
227
  raise RuntimeError("ChatGroq initialization failed.") from e
 
228
  # -------------------------------------------------------
229
  # Rate-limit & Cache
230
  # -------------------------------------------------------