intelliSOC / app.py
rajrakeshdr's picture
Update app.py
04b4725 verified
raw
history blame
3.08 kB
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from langchain_groq import ChatGroq
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate import os
# Initialize FastAPI app
app = FastAPI()
# Create a request model with context
class SearchQuery(BaseModel): query: str context: str = None # Optional context field
# Initialize LangChain with Groq
llm = ChatGroq( temperature=0.7, model_name="mixtral-8x7b-32768", groq_api_key="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" # Replace
with your actual Groq API key
)
# Define the prompt template with cybersecurity expertise Define the prompt template with elite cybersecurity expertise
prompt_template = PromptTemplate( input_variables=["query", "context"],
template=""" Context: You are an elite cybersecurity AI with comprehensive
mastery of all domains, including network security, cloud security, threat intelligence, cryptography, and incident response. Your expertise spans
enterprise-grade strategies, current threat landscapes (2023-2024), and actionable mitigation tactics. Prioritize concise, technical, and
ROI-driven insights. Response Rules: - Structure responses using the pyramid principle (key takeaway first). - Maximum 500 words per response. -
Use technical terminology appropriately (e.g., OWASP Top 10, MITRE ATT&CK, NIST references). - Include critical data points:
- CVE IDs for vulnerabilities. - CVSS scores where applicable. - Latest compliance standards (e.g., ISO 27001:2022, NIST CSF 2.0). - Format
complex concepts clearly:
β†’ Security through obscurity β†’ Zero-trust architecture Source Integration: - Cite only authoritative sources (e.g., CISA alerts, RFCs, vendor
advisories). - Include timestamps for exploit disclosures. - Flag conflicting industry perspectives where relevant. Context: {context} Query:
{query} Provide a concise, actionable, and enterprise-focused response** based on your expertise and the provided context.
"""
) chain = LLMChain(llm=llm, prompt=prompt_template) @app.post("/search") async def process_search(search_query: SearchQuery): try:
# Set default context if not provided
context = search_query.context or "You are a cybersecurity expert."
# Process the query using LangChain with context
response = chain.run(query=search_query.query, context=context)
return { "status": "success", "response": response
}
except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @app.get("/") async def root():
return {"message": "Search API is running"}