from fastapi import FastAPI, HTTPException from pydantic import BaseModel from langchain_groq import ChatGroq from langchain.chains import LLMChain from langchain.prompts import PromptTemplate from dotenv import load_dotenv import os # Load environment variables load_dotenv() # Initialize FastAPI app app = FastAPI() # Create a request model class SearchQuery(BaseModel): query: str # Initialize LangChain with Groq llm = ChatGroq( temperature=0.7, model_name="mixtral-8x7b-32768", # You can also use "llama2-70b-4096" ) prompt_template = PromptTemplate( input_variables=["query"], template="Please provide a detailed response to the following query: {query}" ) chain = LLMChain(llm=llm, prompt=prompt_template) @app.post("/search") async def process_search(search_query: SearchQuery): try: # Process the query using LangChain response = chain.run(query=search_query.query) return { "status": "success", "response": response } except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @app.get("/") async def root(): return {"message": "Search API is running"}