Spaces:
Sleeping
Sleeping
File size: 1,192 Bytes
d17c60a 1476c30 d17c60a 1476c30 d17c60a 1476c30 d17c60a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from langchain_groq import ChatGroq
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Initialize FastAPI app
app = FastAPI()
# Create a request model
class SearchQuery(BaseModel):
query: str
# Initialize LangChain with Groq
llm = ChatGroq(
temperature=0.7,
model_name="mixtral-8x7b-32768", # You can also use "llama2-70b-4096"
)
prompt_template = PromptTemplate(
input_variables=["query"],
template="Please provide a detailed response to the following query: {query}"
)
chain = LLMChain(llm=llm, prompt=prompt_template)
@app.post("/search")
async def process_search(search_query: SearchQuery):
try:
# Process the query using LangChain
response = chain.run(query=search_query.query)
return {
"status": "success",
"response": response
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@app.get("/")
async def root():
return {"message": "Search API is running"} |