Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ from pydantic import BaseModel
|
|
3 |
from langchain_groq import ChatGroq
|
4 |
from langchain.chains import LLMChain
|
5 |
from langchain.prompts import PromptTemplate
|
|
|
6 |
import os
|
7 |
|
8 |
# Initialize FastAPI app
|
@@ -20,40 +21,29 @@ llm = ChatGroq(
|
|
20 |
groq_api_key="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" # Replace with your actual Groq API key
|
21 |
)
|
22 |
|
23 |
-
# Define the prompt template with cybersecurity expertise
|
24 |
-
|
25 |
# Define the prompt template with elite cybersecurity expertise
|
26 |
prompt_template = PromptTemplate(
|
27 |
input_variables=["query", "context"],
|
28 |
template="""
|
29 |
Context:
|
30 |
You are an elite cybersecurity AI with comprehensive mastery of all domains, including network security, cloud security, threat intelligence, cryptography, and incident response. Your expertise spans enterprise-grade strategies, current threat landscapes (2023-2024), and actionable mitigation tactics. Prioritize concise, technical, and ROI-driven insights.
|
31 |
-
|
32 |
Response Rules:
|
33 |
-
- Structure responses using the pyramid principle (key takeaway first).
|
34 |
- Maximum 500 words per response.
|
35 |
- Use technical terminology appropriately (e.g., OWASP Top 10, MITRE ATT&CK, NIST references).
|
36 |
- Include critical data points:
|
37 |
- CVE IDs for vulnerabilities.
|
38 |
- CVSS scores where applicable.
|
39 |
- Latest compliance standards (e.g., ISO 27001:2022, NIST CSF 2.0).
|
40 |
-
- Format complex concepts clearly:
|
41 |
-
→ Security through obscurity
|
42 |
-
→ Zero-trust architecture
|
43 |
-
|
44 |
-
Source Integration:
|
45 |
-
- Cite only authoritative sources (e.g., CISA alerts, RFCs, vendor advisories).
|
46 |
-
- Include timestamps for exploit disclosures.
|
47 |
-
- Flag conflicting industry perspectives where relevant.
|
48 |
-
|
49 |
Context: {context}
|
50 |
Query: {query}
|
51 |
-
|
52 |
Provide a concise, actionable, and enterprise-focused response** based on your expertise and the provided context.
|
53 |
"""
|
54 |
)
|
55 |
chain = LLMChain(llm=llm, prompt=prompt_template)
|
56 |
|
|
|
|
|
|
|
57 |
@app.post("/search")
|
58 |
async def process_search(search_query: SearchQuery):
|
59 |
try:
|
@@ -63,6 +53,14 @@ async def process_search(search_query: SearchQuery):
|
|
63 |
# Process the query using LangChain with context
|
64 |
response = chain.run(query=search_query.query, context=context)
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
return {
|
67 |
"status": "success",
|
68 |
"response": response
|
|
|
3 |
from langchain_groq import ChatGroq
|
4 |
from langchain.chains import LLMChain
|
5 |
from langchain.prompts import PromptTemplate
|
6 |
+
import httpx
|
7 |
import os
|
8 |
|
9 |
# Initialize FastAPI app
|
|
|
21 |
groq_api_key="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" # Replace with your actual Groq API key
|
22 |
)
|
23 |
|
|
|
|
|
24 |
# Define the prompt template with elite cybersecurity expertise
|
25 |
prompt_template = PromptTemplate(
|
26 |
input_variables=["query", "context"],
|
27 |
template="""
|
28 |
Context:
|
29 |
You are an elite cybersecurity AI with comprehensive mastery of all domains, including network security, cloud security, threat intelligence, cryptography, and incident response. Your expertise spans enterprise-grade strategies, current threat landscapes (2023-2024), and actionable mitigation tactics. Prioritize concise, technical, and ROI-driven insights.
|
|
|
30 |
Response Rules:
|
|
|
31 |
- Maximum 500 words per response.
|
32 |
- Use technical terminology appropriately (e.g., OWASP Top 10, MITRE ATT&CK, NIST references).
|
33 |
- Include critical data points:
|
34 |
- CVE IDs for vulnerabilities.
|
35 |
- CVSS scores where applicable.
|
36 |
- Latest compliance standards (e.g., ISO 27001:2022, NIST CSF 2.0).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
Context: {context}
|
38 |
Query: {query}
|
|
|
39 |
Provide a concise, actionable, and enterprise-focused response** based on your expertise and the provided context.
|
40 |
"""
|
41 |
)
|
42 |
chain = LLMChain(llm=llm, prompt=prompt_template)
|
43 |
|
44 |
+
# URL of the external API
|
45 |
+
EXTERNAL_API_URL = "https://api.example.com/process-input" # Replace with the actual URL
|
46 |
+
|
47 |
@app.post("/search")
|
48 |
async def process_search(search_query: SearchQuery):
|
49 |
try:
|
|
|
53 |
# Process the query using LangChain with context
|
54 |
response = chain.run(query=search_query.query, context=context)
|
55 |
|
56 |
+
# Send the user input to the external API (fire-and-forget)
|
57 |
+
async with httpx.AsyncClient() as client:
|
58 |
+
await client.post(
|
59 |
+
EXTERNAL_API_URL,
|
60 |
+
json={"input": search_query.query}, # Send the user input
|
61 |
+
timeout=5 # Set a timeout to avoid hanging
|
62 |
+
)
|
63 |
+
|
64 |
return {
|
65 |
"status": "success",
|
66 |
"response": response
|