Muhammad2003 commited on
Commit
1f891e5
·
verified ·
1 Parent(s): 3ecfd54

Upload 45 files

Browse files
Files changed (45) hide show
  1. AI_core/.env +1 -0
  2. AI_core/Legal_comprehension.ipynb +0 -0
  3. AI_core/__init__.py +3 -0
  4. AI_core/__pycache__/__init__.cpython-312.pyc +0 -0
  5. AI_core/__pycache__/config.cpython-312.pyc +0 -0
  6. AI_core/__pycache__/main.cpython-312.pyc +0 -0
  7. AI_core/agent/__init__.py +6 -0
  8. AI_core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
  9. AI_core/agent/__pycache__/agent_setup.cpython-312.pyc +0 -0
  10. AI_core/agent/agent_setup.py +40 -0
  11. AI_core/config.py +28 -0
  12. AI_core/main.py +25 -0
  13. AI_core/models/__pycache__/input_schemas.cpython-312.pyc +0 -0
  14. AI_core/models/input_schemas.py +11 -0
  15. AI_core/requirement.txt +10 -0
  16. AI_core/res.md +46 -0
  17. AI_core/tools/__init__.py +37 -0
  18. AI_core/tools/__pycache__/__init__.cpython-312.pyc +0 -0
  19. AI_core/tools/__pycache__/element_extraction_tool.cpython-312.pyc +0 -0
  20. AI_core/tools/__pycache__/evidence_analysis_tool.cpython-312.pyc +0 -0
  21. AI_core/tools/__pycache__/legal_qa_tool.cpython-312.pyc +0 -0
  22. AI_core/tools/__pycache__/report_generation_tool.cpython-312.pyc +0 -0
  23. AI_core/tools/__pycache__/summarization_tool.cpython-312.pyc +0 -0
  24. AI_core/tools/element_extraction_tool.py +62 -0
  25. AI_core/tools/evidence_analysis_tool.py +53 -0
  26. AI_core/tools/legal_qa_tool.py +68 -0
  27. AI_core/tools/report_generation_tool.py +74 -0
  28. AI_core/tools/summarization_tool.py +65 -0
  29. backend/__init__.py +3 -0
  30. backend/__pycache__/__init__.cpython-312.pyc +0 -0
  31. backend/__pycache__/__init__.cpython-38.pyc +0 -0
  32. backend/__pycache__/config.cpython-312.pyc +0 -0
  33. backend/config.py +28 -0
  34. backend/endpoint/__init__.py +3 -0
  35. backend/endpoint/__pycache__/__init__.cpython-312.pyc +0 -0
  36. backend/endpoint/__pycache__/__init__.cpython-38.pyc +0 -0
  37. backend/endpoint/__pycache__/api.cpython-312.pyc +0 -0
  38. backend/endpoint/__pycache__/models.cpython-312.pyc +0 -0
  39. backend/endpoint/__pycache__/server.cpython-312.pyc +0 -0
  40. backend/endpoint/__pycache__/server.cpython-38.pyc +0 -0
  41. backend/endpoint/api.py +125 -0
  42. backend/endpoint/models.py +27 -0
  43. backend/endpoint/server.py +38 -0
  44. backend/requirement.txt +6 -0
  45. run_server.py +7 -0
AI_core/.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GROQ_API_KEY = "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"
AI_core/Legal_comprehension.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AI_core/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """
2
+ AI Core package initialization.
3
+ """
AI_core/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (219 Bytes). View file
 
AI_core/__pycache__/config.cpython-312.pyc ADDED
Binary file (933 Bytes). View file
 
AI_core/__pycache__/main.cpython-312.pyc ADDED
Binary file (1.15 kB). View file
 
AI_core/agent/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ Agent package initialization.
3
+ """
4
+ from .agent_setup import agent_executor
5
+
6
+ __all__ = ['agent_executor']
AI_core/agent/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (309 Bytes). View file
 
AI_core/agent/__pycache__/agent_setup.cpython-312.pyc ADDED
Binary file (1.46 kB). View file
 
AI_core/agent/agent_setup.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Setup for the legal assistant agent.
3
+ """
4
+ from langchain.agents import AgentExecutor, create_tool_calling_agent
5
+ from langchain.prompts import ChatPromptTemplate
6
+
7
+ from AI_core.config import AGENT_LLM
8
+ from AI_core.tools import tools
9
+
10
+ # Create the main legal assistant agent
11
+ system_prompt = """
12
+ You are a helpful assistant that can use multiple tools to answer questions.
13
+ You have access to multiple specialized tools:
14
+
15
+ 1. Document Summarization - For summarizing legal documents
16
+ 2. Case Report Generation - For creating comprehensive legal case reports
17
+ 3. Evidence Analysis - For analyzing legal evidence using advanced research capabilities
18
+ (Use this tool when you need updated information regarding laws and public information)
19
+ 4. Legal Q&A - For answering legal questions using a knowledge base
20
+ 5. Legal Element Extraction - For extracting specific elements from legal texts
21
+ """
22
+
23
+ agent_prompt = ChatPromptTemplate.from_messages(
24
+ [
25
+ ("system", system_prompt),
26
+ ("human", "{input}"),
27
+ ("placeholder", "{agent_scratchpad}"),
28
+ ]
29
+ )
30
+
31
+ agent = create_tool_calling_agent(AGENT_LLM, tools, agent_prompt)
32
+
33
+ # Create agent executor
34
+ agent_executor = AgentExecutor(
35
+ agent=agent,
36
+ tools=tools,
37
+ verbose=True,
38
+ handle_parsing_errors=True,
39
+ max_iterations=5
40
+ )
AI_core/config.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Configuration settings for the Legal AI Assistant.
3
+ """
4
+ from dotenv import load_dotenv
5
+ load_dotenv() # by default looks for .env in current directory
6
+
7
+ import os
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain.embeddings import HuggingFaceEmbeddings
10
+ from langchain_groq import ChatGroq
11
+
12
+ # LLM configurations
13
+ LLM = ChatGroq(temperature=0, model_name="compound-beta")
14
+ AGENT_LLM = ChatGroq(temperature=0, model_name="deepseek-r1-distill-llama-70b")
15
+
16
+ # Embedding configuration
17
+ EMBEDDINGS = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
18
+
19
+ # Text splitting configuration
20
+ TEXT_SPLITTER = RecursiveCharacterTextSplitter(
21
+ chunk_size=1500,
22
+ chunk_overlap=150,
23
+ )
24
+
25
+ # API endpoints
26
+ DEEPSEARCH_API_URL = 'https://deepsearch.jina.ai/v1/chat/completions'
27
+
28
+
AI_core/main.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main entry point for the Legal AI Assistant.
3
+ """
4
+ import asyncio
5
+ from typing import Dict, Any
6
+ from AI_core.agent import agent_executor
7
+
8
+
9
+
10
+ async def process_legal_request(user_query: str) -> str:
11
+ """
12
+ Process a user's legal request using the multiagent system.
13
+
14
+ Args:
15
+ user_query (str): The user's legal question or request
16
+
17
+ Returns:
18
+ str: The response from the legal assistant
19
+ """
20
+ try:
21
+ input_data = {"input": user_query}
22
+ response = await agent_executor.ainvoke(input_data)
23
+ return response["output"]
24
+ except Exception as e:
25
+ return f"Error processing your request: {str(e)}"
AI_core/models/__pycache__/input_schemas.cpython-312.pyc ADDED
Binary file (974 Bytes). View file
 
AI_core/models/input_schemas.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pydantic models for input validation.
3
+ """
4
+ from pydantic import BaseModel, Field
5
+
6
+ class ReportGenerationInput(BaseModel):
7
+ """Input schema for the report generation tool."""
8
+ case_name: str = Field(description="Name of the legal case")
9
+ case_facts: str = Field(description="Key facts of the case")
10
+ legal_issues: str = Field(description="Legal issues identified in the case")
11
+ applicable_laws: str = Field(description="Laws and regulations applicable to this case")
AI_core/requirement.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain>=0.1.0
2
+ langchain-google-genai>=0.0.5
3
+ google-generativeai>=0.3.0
4
+ langchain-community>=0.0.16
5
+ faiss-cpu>=1.7.4
6
+ pypdf>=3.17.0
7
+ sentence-transformers>=2.5.0
8
+ pydantic>=2.5.0
9
+ requests>=2.31.0
10
+ huggingface-hub>=0.20.0
AI_core/res.md ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The Affordable Care Act's (ACA) individual mandate has been a lightning rod for legal challenges, primarily questioning the scope of federal power over individual healthcare decisions. The central issue revolves around whether the government can compel individuals to purchase health insurance and the legal justifications for such a mandate.
2
+
3
+ Initially, opponents of the ACA argued that the individual mandate exceeded Congress's authority under the Commerce Clause of the U.[^1]S. Constitution. This clause grants Congress the power to regulate interstate commerce, but critics contended that it does not extend to compelling individuals to participate in commerce by purchasing health insurance. Several lawsuits were filed challenging the mandate on these grounds, arguing that it represented an unprecedented expansion of federal power. For instance, in *Virginia ex rel. Cuccinelli v. Sebelius*, the plaintiffs argued that the Commerce Clause could not be used to regulate inactivity, i.e., the decision not to purchase health insurance. This argument posited that the Commerce Clause only applies to individuals and entities engaged in an "activity," and because the plaintiffs maintained they were not engaging in any activity, the Commerce Clause did not apply.[^2]
4
+
5
+ However, in the landmark case of *National Federation of Independent Business (NFIB) v. Sebelius* (2012), the Supreme Court rejected the Commerce Clause argument. Chief Justice John Roberts, writing for the majority, held that the individual mandate could not be justified under the Commerce Clause because it sought to regulate inactivity rather than activity. The Court reasoned that the power to regulate commerce presupposes the existence of commerce to regulate. Mandating the purchase of health insurance, the Court said, was not a permissible regulation of existing commercial activity.
6
+
7
+ Despite rejecting the Commerce Clause justification, the Supreme Court upheld the individual mandate as a valid exercise of Congress's taxing power. The Court reasoned that the mandate could be interpreted as a tax on those who choose not to purchase health insurance, and that Congress has broad authority to levy taxes under Article I, Section 8 of the Constitution. This decision was pivotal, as it shifted the legal basis for the mandate from the Commerce Clause to the taxing power. The Court emphasized that while Congress could not compel individuals to purchase insurance under the Commerce Clause, it could impose a tax on those who choose not to do so.[^3]
8
+
9
+ A seismic shift in the legal landscape occurred with the passage of the Tax Cuts and Jobs Act of 2017. This legislation reduced the tax penalty associated with the individual mandate to zero, effectively nullifying its financial impact. While the mandate itself remained on the books, its enforceability was fundamentally undermined. This 'zeroing out' of the penalty triggered a new wave of legal challenges, this time focusing on the severability of the mandate from the rest of the ACA.[^4]
10
+
11
+ Following the 'zeroing out,' the legal battleground shifted to the doctrine of severability. This doctrine addresses the question of whether, if one provision of a statute is found unconstitutional, the remaining provisions can stand independently. Opponents of the ACA argued that because the individual mandate was now essentially unenforceable, it was no longer constitutional, and that the rest of the ACA could not survive without it. They contended that the mandate was an integral part of the ACA, essential to its functioning, and that Congress would not have enacted the ACA without the mandate in place. This argument was central to the case of *Texas v. United States*, where the plaintiffs argued that the ACA was inextricably linked to the individual mandate, and that the entire law should be struck down.[^5]
12
+
13
+ Conversely, those defending the ACA argued that the mandate was severable from the rest of the law. They emphasized that Congress's decision to leave the ACA intact when it zeroed out the penalty indicated that the mandate was not so essential that the entire law had to fall with it. They also pointed to the continued stability of the insurance markets after the penalty was reduced to zero as evidence that the ACA could function effectively without the mandate. [^6]This position was supported by numerous legal scholars who argued that the ACA's other provisions, such as the guaranteed issue and community rating requirements, could operate independently of the mandate.
14
+
15
+ However, these post-'zeroing out' legal challenges encountered a significant obstacle: standing. Standing is a legal doctrine that requires plaintiffs to demonstrate a concrete and particularized injury in order to bring a lawsuit. In *California v. Texas* (2021), the Supreme Court ultimately rejected the challenges to the ACA on standing grounds. The Court held that the plaintiffs, a group of states and individuals, had failed to demonstrate that they had suffered any actual harm as a result of the individual mandate now that the penalty was zero. Chief Justice Roberts, again writing for the majority, reasoned that because individuals were no longer required to pay a penalty for failing to obtain health insurance, they could not show any direct injury caused by the mandate itself. This ruling effectively prevented the Court from reaching the merits of the severability arguments, leaving the ACA intact.
16
+
17
+ It is also important to acknowledge the dissenting voices in these legal battles. In *NFIB v. Sebelius*, Justices Scalia, Kennedy, Thomas, and Alito dissented, arguing that the individual mandate was unconstitutional under both the Commerce Clause and the Necessary and Proper Clause. They asserted that the mandate exceeded Congress's enumerated powers and could not be justified as a valid exercise of federal authority. Their dissenting opinion reflected a fundamental disagreement with the majority's interpretation of the Constitution and the scope of federal power.
18
+
19
+ Furthermore, even after the Supreme Court's decision in *California v. Texas*, differing opinions persisted among the lower courts regarding the severability of the individual mandate. For example, the Fifth Circuit Court of Appeals had previously expressed skepticism about the ACA's viability without the mandate, creating a split of authority among the circuit courts. This divergence of opinion underscored the ongoing legal uncertainty surrounding the ACA and the individual mandate.
20
+
21
+ The following table summarizes the legal arguments, constitutional bases, outcomes, and courts involved in the challenges to the ACA individual mandate:
22
+
23
+ | Legal Argument | Constitutional Basis | Outcome | Court |
24
+ | --- | --- | --- | --- |
25
+ | Exceeds Congress's power to regulate commerce | Commerce Clause | Rejected | Supreme Court (*NFIB v. Sebelius*) |
26
+ | Unconstitutional as a mandate, but valid as a tax | Taxing Power | Upheld | Supreme Court (*NFIB v. Sebelius*) |
27
+ | Individual mandate is inseverable from the ACA after penalty zeroed out | Severability Doctrine | Challenges rejected on standing grounds | Supreme Court (*California v. Texas*)[^7] |
28
+
29
+
30
+ The legal saga surrounding the ACA's individual mandate reveals a complex interplay of constitutional principles, political considerations, and practical implications. While the Supreme Court has twice upheld the ACA, the legal challenges have exposed deep divisions over the role of the federal government in healthcare and the limits of congressional power. The 'zeroing out' of the penalty and the subsequent focus on severability demonstrate how legislative actions can reshape the legal landscape and prompt new rounds of litigation. The ultimate resolution of these challenges underscores the importance of standing as a gatekeeping mechanism in federal courts, preventing abstract legal questions from being decided without a concrete injury. The ACA's journey through the courts serves as a potent reminder of the enduring tension between individual liberty and the collective good, and the ongoing debate over the appropriate balance between the two. The fact that the ACA remains, despite numerous attempts to dismantle it, suggests a certain resilience, perhaps even a recognition of its necessity in the current American healthcare landscape. Or perhaps it simply highlights the inertia of large, complex systems, and the difficulty of unwinding them once they are in place.
31
+
32
+
33
+
34
+ [^1]: States that challenged the ACA argued that the individual mandate was an overreach of Congress s commerce clause powers the government s well recognized but [The Constitutionality of the Affordable Care Act: An Update](https://journalofethics.ama-assn.org/article/constitutionality-affordable-care-act-update/2012-11)
35
+
36
+ [^2]: But while the Congress that passed the ACA said the mandate was important for the reformed insurance market to function the Congress that zeroed out the penalty decided to keep the other provisions in place Long standing legal principles say that Congress not the court gets to make that decision as even a brief from past litigants against the ACA noted [ACA Survives Legal Challenge, Protecting Coverage for Tens of](https://cbpp.org/research/health/suit-challenging-aca-legally-suspect-but-threatens-loss-of-coverage-for-millions)
37
+
38
+ [^3]: individual mandate penalty was zeroed out by tax legislation in 2017 While that legislation discussed further below did not repeal the [A Review of the Affordable Care Act at 10 Years, Part 1 - Mintz](https://mintz.com/insights-center/viewpoints/2146/2020-04-07-review-affordable-care-act-10-years-part-1-individual)
39
+
40
+ [^4]: Is the individual mandate severable from the ACA The challengers argued that even though there is no enforceable penalty now the text of the ACA indicates that the individual mandate is inextricably tied to its functioning Some of the justices appeared to agree with this assessment noting that in the 2012 case the ACA s defenders contended that the mandate was essential for ensuring successful operation of the ACA [While the Supreme Court Appears Likely to Uphold the Affordable](https://nashp.org/while-the-supreme-court-appears-likely-to-uphold-the-affordable-care-act-states-still-face-uncertainty)
41
+
42
+ [^5]: The state challengers main argument is that because the Supreme Court s 2012 decision centered on whether the mandate was a valid exercise of Congress taxing power with the mandate no longer generating federal revenue it is now unconstitutional Also the challengers argued that the specific language used in the text of the mandate obligates individuals to purchase coverage despite the fact there is no longer a penalty for not buying health insurance [While the Supreme Court Appears Likely to Uphold the Affordable](https://nashp.org/while-the-supreme-court-appears-likely-to-uphold-the-affordable-care-act-states-still-face-uncertainty)
43
+
44
+ [^6]: Sebelius upheld the constitutionality of the ACA based on Congress taxing power now that there is no revenue associated with the mandate penalty it can no longer be considered a tax and consequently the individual mandate is unconstitutional The plaintiffs also argue that because the individual mandate is so crucial to the ACA the entire law should be ruled unconstitutional [While the Supreme Court Appears Likely to Uphold the Affordable](https://nashp.org/while-the-supreme-court-appears-likely-to-uphold-the-affordable-care-act-states-still-face-uncertainty)
45
+
46
+ [^7]: At issue here is the assertion that the Commerce Clause can only reach individuals and entities engaged in an activity and because the plaintiffs maintain [Obamacare Is Unconstitutional - Cato Institute](https://cato.org/policy-report/march/april-2011/obamacare-unconstitutional)
AI_core/tools/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Legal Assistant tools initialization.
3
+ """
4
+ from langchain.tools import Tool
5
+
6
+ from AI_core.tools.summarization_tool import SummarizationTool
7
+ from AI_core.tools.report_generation_tool import ReportGenerationTool
8
+ from AI_core.tools.evidence_analysis_tool import EvidenceAnalysisTool
9
+ from AI_core.tools.legal_qa_tool import LegalQATool
10
+ from AI_core.tools.element_extraction_tool import ElementExtractionTool
11
+
12
+ # Create tool instances
13
+ summarization_tool = SummarizationTool()
14
+ report_generation_tool = ReportGenerationTool()
15
+ evidence_analysis_tool = EvidenceAnalysisTool()
16
+ legal_qa_tool = LegalQATool()
17
+ element_extraction_tool = ElementExtractionTool()
18
+
19
+ # Create tools list for the agent
20
+ tools = [
21
+ Tool.from_function(
22
+ func=summarization_tool._run,
23
+ name="document_summarization_tool",
24
+ description="Summarizes legal documents. Input should be a file path to a PDF or text document."
25
+ ),
26
+ report_generation_tool,
27
+ evidence_analysis_tool,
28
+ Tool.from_function(
29
+ func=legal_qa_tool._run,
30
+ name="legal_qa_tool",
31
+ description="Answers legal questions. Input should be a clear legal question."
32
+ ),
33
+ element_extraction_tool
34
+ ]
35
+
36
+ __all__ = ['tools', 'summarization_tool', 'report_generation_tool',
37
+ 'evidence_analysis_tool', 'legal_qa_tool', 'element_extraction_tool']
AI_core/tools/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (1.4 kB). View file
 
AI_core/tools/__pycache__/element_extraction_tool.cpython-312.pyc ADDED
Binary file (3.37 kB). View file
 
AI_core/tools/__pycache__/evidence_analysis_tool.cpython-312.pyc ADDED
Binary file (2.25 kB). View file
 
AI_core/tools/__pycache__/legal_qa_tool.cpython-312.pyc ADDED
Binary file (2.7 kB). View file
 
AI_core/tools/__pycache__/report_generation_tool.cpython-312.pyc ADDED
Binary file (3.03 kB). View file
 
AI_core/tools/__pycache__/summarization_tool.cpython-312.pyc ADDED
Binary file (2.92 kB). View file
 
AI_core/tools/element_extraction_tool.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tool for extracting specific legal elements from texts.
3
+ """
4
+ from langchain.tools import BaseTool
5
+ from langchain.prompts import ChatPromptTemplate
6
+ from langchain.schema import SystemMessage, HumanMessage
7
+
8
+ from AI_core.config import AGENT_LLM
9
+
10
+ class ElementExtractionTool(BaseTool):
11
+ """Tool to extract specific legal elements from legal texts."""
12
+ name: str = "legal_element_extraction_tool"
13
+ description: str = "Extracts specific legal elements from legal texts such as contracts, judgments, or legal briefs."
14
+
15
+ def _run(self, query: str) -> str:
16
+ """
17
+ Extract specific legal elements from texts.
18
+
19
+ Args:
20
+ query: Legal text to extract elements from
21
+
22
+ Returns:
23
+ str: Extracted legal elements
24
+ """
25
+ # Define extraction schema
26
+ schema = {
27
+ "title": "Extractor",
28
+ "description": "Extract relevant legal elements.",
29
+ "type": "object",
30
+ "properties": {
31
+ "parties": {"type": "array", "items": {"type": "string"}, "description": "The parties involved in the legal document"},
32
+ "dates": {"type": "array", "items": {"type": "string"}, "description": "Important dates mentioned in the document"},
33
+ "obligations": {"type": "array", "items": {"type": "string"}, "description": "Legal obligations specified in the document"},
34
+ "jurisdiction": {"type": "string", "description": "The legal jurisdiction that applies"},
35
+ "legal_citations": {"type": "array", "items": {"type": "string"}, "description": "Citations of laws, regulations, or precedents"},
36
+ "monetary_values": {"type": "array", "items": {"type": "string"}, "description": "Monetary amounts mentioned in the document"}
37
+ },
38
+ "required": ["parties"]
39
+ }
40
+
41
+ # Create extraction chain
42
+ extraction_prompt = ChatPromptTemplate.from_messages([
43
+ SystemMessage(content="You are a legal element extraction expert. Extract the requested information from the provided legal text."),
44
+ HumanMessage(content="Extract the following information from this legal text: {query}")
45
+ ])
46
+ extraction_chain = extraction_prompt | AGENT_LLM.with_structured_output(schema=schema)
47
+
48
+ # Run extraction
49
+ try:
50
+ result = extraction_chain.invoke({"query": query})
51
+ # Format result for better readability
52
+ formatted_result = "Extracted Legal Elements:\n\n"
53
+ for key, value in result.items():
54
+ if isinstance(value, list):
55
+ formatted_result += f"{key.capitalize()}:\n"
56
+ for item in value:
57
+ formatted_result += f"- {item}\n"
58
+ else:
59
+ formatted_result += f"{key.capitalize()}: {value}\n"
60
+ return formatted_result
61
+ except Exception as e:
62
+ return f"Error extracting elements: {str(e)}"
AI_core/tools/evidence_analysis_tool.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tool for analyzing legal evidence using DEEPresearch.
3
+ """
4
+ import requests
5
+ import json
6
+ from langchain.tools import BaseTool
7
+
8
+ from AI_core.config import DEEPSEARCH_API_URL
9
+
10
+ class EvidenceAnalysisTool(BaseTool):
11
+ """Tool to analyze legal evidence using DEEPresearch."""
12
+ name: str = "evidence_analysis_tool"
13
+ description: str = "Analyzes legal evidence using DEEPresearch and builds relationships between entities. Input should be a legal query about evidence or case analysis."
14
+
15
+ def _run(self, query: str) -> str:
16
+ """
17
+ Analyze legal evidence using DEEPresearch.
18
+
19
+ Args:
20
+ query: Legal query about evidence or case analysis
21
+
22
+ Returns:
23
+ str: Analysis results from DEEPresearch
24
+ """
25
+ headers = {
26
+ 'Content-Type': 'application/json'
27
+ }
28
+
29
+ data = {
30
+ "model": "jina-deepsearch-v1",
31
+ "messages": [
32
+ {
33
+ "role": "system",
34
+ "content": "You are a legal evidence analyst. Your task is to analyze legal evidence, extract key entities, and establish relationships between them."
35
+ },
36
+ {
37
+ "role": "user",
38
+ "content": query
39
+ }
40
+ ],
41
+ "stream": False,
42
+ "reasoning_effort": "low",
43
+ "max_attempts": 2,
44
+ "no_direct_answer": False
45
+ }
46
+
47
+ try:
48
+ # In production, use actual API call
49
+ response = requests.post(DEEPSEARCH_API_URL, headers=headers, json=data)
50
+ result = response.json()['choices'][0]['message']['content']
51
+ return result
52
+ except Exception as e:
53
+ return f"Error analyzing evidence with DEEPresearch: {str(e)}"
AI_core/tools/legal_qa_tool.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tool for answering legal questions using a knowledge base.
3
+ """
4
+ from langchain.tools import BaseTool
5
+ from langchain.chains import LLMChain
6
+ from langchain.prompts import PromptTemplate
7
+ from langchain.memory import ConversationBufferMemory
8
+
9
+ from AI_core.config import LLM
10
+
11
+ class LegalQATool(BaseTool):
12
+ """Tool to answer legal questions using a knowledge base."""
13
+ name: str = "legal_qa_tool"
14
+ description: str = "Answers legal questions using a knowledge base of laws and regulations."
15
+ memory: ConversationBufferMemory = None
16
+
17
+ def __init__(self):
18
+ """Initialize the legal QA tool with conversation memory."""
19
+ super().__init__()
20
+ # Initialize memory in the constructor
21
+ self.memory = ConversationBufferMemory(
22
+ memory_key="chat_history",
23
+ return_messages=True
24
+ )
25
+
26
+ def _run(self, query: str) -> str:
27
+ """
28
+ Answer legal questions using a knowledge base.
29
+
30
+ Args:
31
+ query: Legal question to answer
32
+
33
+ Returns:
34
+ str: Answer to the legal question
35
+ """
36
+ # In production environment:
37
+ # 1. Load vector store with legal documents
38
+ # 2. Create retriever from vector store
39
+ # 3. Create ConversationalRetrievalChain
40
+
41
+ template = """
42
+ You are a legal assistant specializing in answering legal questions.
43
+
44
+ Use your knowledge of laws and regulations to provide an accurate and helpful answer to the question.
45
+
46
+ Question: {question}
47
+
48
+ Provide a clear, concise answer citing relevant laws or precedents when appropriate.
49
+ Include a disclaimer that your answer is not legal advice.
50
+ """
51
+
52
+ prompt = PromptTemplate(
53
+ template=template,
54
+ input_variables=["question"]
55
+ )
56
+
57
+ qa_chain = LLMChain(
58
+ llm=LLM,
59
+ prompt=prompt
60
+ )
61
+
62
+ response = qa_chain.run(question=query)
63
+
64
+ # Update conversation memory
65
+ self.memory.chat_memory.add_user_message(query)
66
+ self.memory.chat_memory.add_ai_message(response)
67
+
68
+ return response
AI_core/tools/report_generation_tool.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tool for generating legal case reports.
3
+ """
4
+ from typing import Type
5
+ from langchain.tools import BaseTool
6
+ from langchain.prompts import PromptTemplate
7
+
8
+ from AI_core.config import LLM
9
+ from AI_core.models.input_schemas import ReportGenerationInput
10
+
11
+ class ReportGenerationTool(BaseTool):
12
+ """Tool to generate comprehensive legal case reports."""
13
+ name: str = "case_report_generation_tool"
14
+ description: str = "Generates comprehensive legal case reports based on provided case information, input to this tool must be a SINGLE JSON STRING"
15
+ args_schema: Type[ReportGenerationInput] = ReportGenerationInput
16
+
17
+ def _run(self, case_name: str, case_facts: str, legal_issues: str, applicable_laws: str) -> str:
18
+ """
19
+ Generate a comprehensive legal case report.
20
+
21
+ Args:
22
+ case_name: Name of the legal case
23
+ case_facts: Key facts of the case
24
+ legal_issues: Legal issues identified in the case
25
+ applicable_laws: Laws and regulations applicable to this case
26
+
27
+ Returns:
28
+ str: Formatted legal case report
29
+ """
30
+ report_template = """
31
+ You are a legal professional drafting a formal case report.
32
+
33
+ Create a comprehensive legal case report with the following structure:
34
+
35
+ # CASE REPORT: {case_name}
36
+
37
+ ## EXECUTIVE SUMMARY
38
+ Provide a brief overview of the case, its significance, and the outcome (if known).
39
+
40
+ ## CASE FACTS
41
+ {case_facts}
42
+
43
+ ## LEGAL ISSUES
44
+ {legal_issues}
45
+
46
+ ## APPLICABLE LAWS AND REGULATIONS
47
+ {applicable_laws}
48
+
49
+ ## LEGAL ANALYSIS
50
+ Analyze how the applicable laws relate to the facts and issues of this case. Include relevant legal precedents if appropriate.
51
+
52
+ ## POTENTIAL ARGUMENTS
53
+ Outline possible arguments for both sides of the case.
54
+
55
+ ## CONCLUSION
56
+ Provide a concluding assessment of the case's legal position, potential outcomes, and recommendations.
57
+ """
58
+
59
+ report_prompt = PromptTemplate(
60
+ template=report_template,
61
+ input_variables=["case_name", "case_facts", "legal_issues", "applicable_laws"]
62
+ )
63
+
64
+ report_chain = report_prompt | LLM
65
+
66
+ output = report_chain.invoke(
67
+ {
68
+ "case_name": case_name,
69
+ "case_facts": case_facts,
70
+ "legal_issues": legal_issues,
71
+ "applicable_laws": applicable_laws
72
+ }
73
+ )
74
+ return output.content
AI_core/tools/summarization_tool.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tool for summarizing legal documents.
3
+ """
4
+ from langchain.tools import BaseTool
5
+ from langchain.chains import LLMChain, MapReduceDocumentsChain
6
+ from langchain.prompts import PromptTemplate
7
+ from langchain.document_loaders import PyPDFLoader
8
+
9
+ from AI_core.config import LLM, TEXT_SPLITTER
10
+
11
+ class SummarizationTool(BaseTool):
12
+ """Tool to summarize legal documents using a map-reduce approach."""
13
+ name: str = "document_summarization_tool"
14
+ description: str = "Summarizes legal documents. Input should be a file path to a PDF or text document."
15
+
16
+ def _run(self, file_path: str) -> str:
17
+ """
18
+ Run the document summarization process.
19
+
20
+ Args:
21
+ file_path: Path to the document to summarize
22
+
23
+ Returns:
24
+ str: Summarized content
25
+ """
26
+ # Load document
27
+ if file_path.endswith('.pdf'):
28
+ loader = PyPDFLoader(file_path)
29
+ documents = loader.load()
30
+ else:
31
+ with open(file_path, 'r') as f:
32
+ text = f.read()
33
+ documents = [text]
34
+
35
+ # Split documents
36
+ docs = TEXT_SPLITTER.split_documents(documents)
37
+
38
+ # Map step - summarize each chunk
39
+ map_template = """
40
+ You are a legal expert summarizing complex legal documents.
41
+ Summarize the following text in a concise and accurate manner, preserving key legal points:
42
+
43
+ {text}
44
+ """
45
+ map_prompt = PromptTemplate(template=map_template, input_variables=["text"])
46
+ map_chain = LLMChain(llm=LLM, prompt=map_prompt, output_key="summary")
47
+
48
+ # Reduce step - combine summaries
49
+ reduce_template = """
50
+ You are a legal expert creating a comprehensive summary from multiple text segments.
51
+ Combine these summaries into a cohesive overview of the entire document, organized by key legal themes and points:
52
+
53
+ {summaries}
54
+ """
55
+ reduce_prompt = PromptTemplate(template=reduce_template, input_variables=["summaries"])
56
+ reduce_chain = LLMChain(llm=LLM, prompt=reduce_prompt)
57
+
58
+ # Create MapReduce chain
59
+ map_reduce_chain = MapReduceDocumentsChain(
60
+ llm_chain=map_chain,
61
+ reduce_documents_chain=reduce_chain,
62
+ document_variable_name="text",
63
+ )
64
+
65
+ return map_reduce_chain.run(docs)
backend/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """
2
+ Backend package initialization.
3
+ """
backend/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (219 Bytes). View file
 
backend/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (208 Bytes). View file
 
backend/__pycache__/config.cpython-312.pyc ADDED
Binary file (868 Bytes). View file
 
backend/config.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Backend configuration settings.
3
+ """
4
+ import os
5
+
6
+ # Server configuration
7
+ HOST = os.getenv("HOST", "0.0.0.0")
8
+ PORT = int(os.getenv("PORT", 8000))
9
+ DEBUG = os.getenv("DEBUG", "False").lower() == "true"
10
+
11
+ # API configuration
12
+ API_VERSION = "v1"
13
+ API_PREFIX = f"/api/{API_VERSION}"
14
+
15
+ # Logging configuration
16
+ LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
17
+
18
+ # CORS settings
19
+ ALLOWED_ORIGINS = [
20
+ "http://localhost:3000",
21
+ "http://127.0.0.1:3000",
22
+ "http://localhost:8080",
23
+ "http://127.0.0.1:8080",
24
+ "*" # For development - remove in production
25
+ ]
26
+
27
+ # Rate limiting
28
+ RATE_LIMIT = "100/minute"
backend/endpoint/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ """
2
+ Endpoint package initialization.
3
+ """
backend/endpoint/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (229 Bytes). View file
 
backend/endpoint/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (218 Bytes). View file
 
backend/endpoint/__pycache__/api.cpython-312.pyc ADDED
Binary file (4.62 kB). View file
 
backend/endpoint/__pycache__/models.cpython-312.pyc ADDED
Binary file (2.27 kB). View file
 
backend/endpoint/__pycache__/server.cpython-312.pyc ADDED
Binary file (1.46 kB). View file
 
backend/endpoint/__pycache__/server.cpython-38.pyc ADDED
Binary file (1.03 kB). View file
 
backend/endpoint/api.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ API endpoints for the Legal Assistant service.
3
+ """
4
+ import sys
5
+ import time
6
+ import uuid
7
+ import logging
8
+ from fastapi import FastAPI, HTTPException, Depends, Request, BackgroundTasks
9
+ from fastapi.middleware.cors import CORSMiddleware
10
+ from fastapi.responses import JSONResponse
11
+ import asyncio
12
+ from typing import Dict, Optional
13
+
14
+ # Add AI_core to path so we can import it
15
+ import os
16
+ import sys
17
+ sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
18
+
19
+ # Import the process_legal_request function from AI_core
20
+ from AI_core.main import process_legal_request
21
+
22
+ # Import models and config
23
+ from .models import QueryRequest, QueryResponse, HealthResponse
24
+ from ..config import API_PREFIX, API_VERSION, ALLOWED_ORIGINS
25
+
26
+ # Setup logging
27
+ logging.basicConfig(
28
+ level=logging.INFO,
29
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
30
+ )
31
+ logger = logging.getLogger(__name__)
32
+
33
+ # Create FastAPI app
34
+ app = FastAPI(
35
+ title="Legal Assistant API",
36
+ description="API for interacting with the Legal AI Assistant",
37
+ version=API_VERSION,
38
+ )
39
+
40
+ # Add CORS middleware
41
+ app.add_middleware(
42
+ CORSMiddleware,
43
+ allow_origins=ALLOWED_ORIGINS,
44
+ allow_credentials=True,
45
+ allow_methods=["*"],
46
+ allow_headers=["*"],
47
+ )
48
+
49
+ # Session storage to maintain context
50
+ # In production, use a persistent storage solution
51
+ sessions: Dict[str, Dict] = {}
52
+
53
+ @app.get(f"{API_PREFIX}/health", response_model=HealthResponse)
54
+ async def health_check():
55
+ """
56
+ Health check endpoint to verify the API is running correctly.
57
+ """
58
+ return HealthResponse(
59
+ status="ok",
60
+ version=API_VERSION,
61
+ components={
62
+ "api": "healthy",
63
+ "agent": "healthy",
64
+ "tools": "healthy"
65
+ }
66
+ )
67
+
68
+ @app.post(f"{API_PREFIX}/query", response_model=QueryResponse)
69
+ async def query(request: QueryRequest):
70
+ """
71
+ Process a legal query and return a response from the Legal AI Assistant.
72
+ """
73
+ # Generate session ID if not provided
74
+ session_id = request.session_id or str(uuid.uuid4())
75
+
76
+ # Log request
77
+ logger.info(f"Processing query for session {session_id}: {request.query[:50]}...")
78
+
79
+ # Measure processing time
80
+ start_time = time.time()
81
+
82
+ try:
83
+ # Process the query using the AI core
84
+ response = await process_legal_request(request.query)
85
+
86
+ # Calculate processing time
87
+ processing_time = time.time() - start_time
88
+
89
+ # Return the response
90
+ return QueryResponse(
91
+ response=response,
92
+ session_id=session_id,
93
+ metadata={"tools_used": []}, # In a production version, track tools used
94
+ processing_time=processing_time
95
+ )
96
+
97
+ except Exception as e:
98
+ logger.error(f"Error processing query: {str(e)}")
99
+ raise HTTPException(
100
+ status_code=500,
101
+ detail=f"Error processing your request: {str(e)}"
102
+ )
103
+
104
+ @app.get(f"{API_PREFIX}/sessions/{{session_id}}")
105
+ async def get_session(session_id: str):
106
+ """
107
+ Get information about a specific session.
108
+ """
109
+ if session_id not in sessions:
110
+ raise HTTPException(
111
+ status_code=404,
112
+ detail=f"Session {session_id} not found"
113
+ )
114
+
115
+ return sessions[session_id]
116
+
117
+ @app.delete(f"{API_PREFIX}/sessions/{{session_id}}")
118
+ async def delete_session(session_id: str):
119
+ """
120
+ Delete a specific session.
121
+ """
122
+ if session_id in sessions:
123
+ del sessions[session_id]
124
+
125
+ return {"status": "success", "message": f"Session {session_id} deleted"}
backend/endpoint/models.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pydantic models for the API endpoints.
3
+ """
4
+ from pydantic import BaseModel, Field
5
+ from typing import Optional, List, Dict, Any
6
+
7
+
8
+ class QueryRequest(BaseModel):
9
+ """Model for legal query requests."""
10
+ query: str = Field(..., description="The legal query or request from the user")
11
+ session_id: Optional[str] = Field(None, description="Session identifier for tracking conversation context")
12
+ options: Optional[Dict[str, Any]] = Field(None, description="Additional options for query processing")
13
+
14
+
15
+ class QueryResponse(BaseModel):
16
+ """Model for legal query responses."""
17
+ response: str = Field(..., description="The response from the legal assistant")
18
+ session_id: Optional[str] = Field(None, description="Session identifier that tracks the conversation")
19
+ metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata about the response")
20
+ processing_time: Optional[float] = Field(None, description="Time taken to process the query in seconds")
21
+
22
+
23
+ class HealthResponse(BaseModel):
24
+ """Model for health check responses."""
25
+ status: str = Field(..., description="Health status of the API")
26
+ version: str = Field(..., description="API version")
27
+ components: Dict[str, str] = Field(..., description="Status of individual components")
backend/endpoint/server.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Server script to run the FastAPI application for the Legal Assistant API.
3
+ """
4
+ import uvicorn
5
+ import logging
6
+ import os
7
+ from dotenv import load_dotenv
8
+
9
+ # Load environment variables from .env file if it exists
10
+ load_dotenv()
11
+
12
+ # Import configuration
13
+ from ..config import HOST, PORT, DEBUG
14
+
15
+ # Configure logging
16
+ logging.basicConfig(
17
+ level=logging.INFO if not DEBUG else logging.DEBUG,
18
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
19
+ )
20
+ logger = logging.getLogger(__name__)
21
+
22
+ def run_server():
23
+ """
24
+ Run the FastAPI server with uvicorn.
25
+ """
26
+ logger.info(f"Starting Legal Assistant API server at http://{HOST}:{PORT}")
27
+ logger.info(f"Debug mode: {DEBUG}")
28
+
29
+ uvicorn.run(
30
+ "backend.endpoint.api:app",
31
+ host=HOST,
32
+ port=PORT,
33
+ reload=DEBUG,
34
+ log_level="info" if not DEBUG else "debug"
35
+ )
36
+
37
+ if __name__ == "__main__":
38
+ run_server()
backend/requirement.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ fastapi>=0.101.0
2
+ uvicorn>=0.23.0
3
+ pydantic>=2.0.0
4
+ python-dotenv>=1.0.0
5
+ httpx>=0.24.1
6
+ python-multipart>=0.0.6
run_server.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ """
2
+ Main entry point to start the Legal Assistant API server.
3
+ """
4
+ from backend.endpoint.server import run_server
5
+
6
+ if __name__ == "__main__":
7
+ run_server()