File size: 2,453 Bytes
6de87e1
 
 
 
 
 
 
 
 
6a26865
3ed12ed
6de87e1
 
 
5e1645c
6de87e1
 
3ed12ed
6de87e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5e1645c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
from sentence_transformers import SentenceTransformer, util
from datasets import load_dataset
import faiss
import numpy as np
import streamlit as st
import torch

# Load the BillSum dataset
ds = load_dataset("FiscalNote/billsum") 

# Initialize models
sbert_model = SentenceTransformer("all-mpnet-base-v2")
t5_tokenizer = AutoTokenizer.from_pretrained("t5-small")
t5_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")

# Prepare data and build FAISS index
texts = ds["train"]["text"][:100]  # Limiting to 100 samples for speed, and selecting the train split.
case_embeddings = sbert_model.encode(texts, convert_to_tensor=True, show_progress_bar=True)

# Convert embeddings to numpy array and handle deprecation warning
case_embeddings_np = np.asarray(case_embeddings.cpu(), dtype=np.float32)
index = faiss.IndexFlatL2(case_embeddings_np.shape[1])
index.add(case_embeddings_np)

# Define retrieval and summarization functions
def retrieve_cases(query, top_k=3):
    query_embedding = sbert_model.encode(query, convert_to_tensor=True)
    query_embedding_np = np.asarray(query_embedding.cpu(), dtype=np.float32)
    _, indices = index.search(np.array([query_embedding_np]), top_k)
    return [(texts[i], i) for i in indices[0]]

def summarize_text(text):
    inputs = t5_tokenizer("summarize: " + text, return_tensors="pt", max_length=512, truncation=True)
    outputs = t5_model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
    return t5_tokenizer.decode(outputs[0], skip_special_tokens=True)

# Streamlit UI
def main():
    st.title("Legal Case Summarizer")
    query = st.text_input("Enter your case search query here:")
    top_k = st.slider("Number of similar cases to retrieve:", 1, 5, 3)

    if st.button("Search"):
        if query.strip():
            try:
                results = retrieve_cases(query, top_k=top_k)
                for i, (case_text, index) in enumerate(results):
                    st.subheader(f"Case {i+1}")
                    st.write("*Original Text:*", case_text)
                    summary = summarize_text(case_text)
                    st.write("*Summary:*", summary)
            except Exception as e:
                st.error(f"An error occurred: {e}")
        else:
            st.warning("Please enter a query to search.")

if __name__ == "__main__":
    main()