MuhammadQASIM111 commited on
Commit
f79f496
·
verified ·
1 Parent(s): 492222f

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +59 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
2
+ from sentence_transformers import SentenceTransformer, util
3
+ from datasets import load_dataset
4
+ import faiss
5
+ import numpy as np
6
+ import streamlit as st
7
+ import torch
8
+
9
+ # Load the BillSum dataset
10
+ dataset = load_dataset("billsum", split="ca_test")
11
+
12
+ # Initialize models
13
+ sbert_model = SentenceTransformer("all-mpnet-base-v2")
14
+ t5_tokenizer = AutoTokenizer.from_pretrained("t5-small")
15
+ t5_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
16
+
17
+ # Prepare data and build FAISS index
18
+ texts = dataset["text"][:100] # Limiting to 100 samples for speed
19
+ case_embeddings = sbert_model.encode(texts, convert_to_tensor=True, show_progress_bar=True)
20
+
21
+ # Convert embeddings to numpy array and handle deprecation warning
22
+ case_embeddings_np = np.asarray(case_embeddings.cpu(), dtype=np.float32)
23
+ index = faiss.IndexFlatL2(case_embeddings_np.shape[1])
24
+ index.add(case_embeddings_np)
25
+
26
+ # Define retrieval and summarization functions
27
+ def retrieve_cases(query, top_k=3):
28
+ query_embedding = sbert_model.encode(query, convert_to_tensor=True)
29
+ query_embedding_np = np.asarray(query_embedding.cpu(), dtype=np.float32)
30
+ _, indices = index.search(np.array([query_embedding_np]), top_k)
31
+ return [(texts[i], i) for i in indices[0]]
32
+
33
+ def summarize_text(text):
34
+ inputs = t5_tokenizer("summarize: " + text, return_tensors="pt", max_length=512, truncation=True)
35
+ outputs = t5_model.generate(inputs["input_ids"], max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
36
+ return t5_tokenizer.decode(outputs[0], skip_special_tokens=True)
37
+
38
+ # Streamlit UI
39
+ def main():
40
+ st.title("Legal Case Summarizer")
41
+ query = st.text_input("Enter your case search query here:")
42
+ top_k = st.slider("Number of similar cases to retrieve:", 1, 5, 3)
43
+
44
+ if st.button("Search"):
45
+ if query.strip():
46
+ try:
47
+ results = retrieve_cases(query, top_k=top_k)
48
+ for i, (case_text, index) in enumerate(results):
49
+ st.subheader(f"Case {i+1}")
50
+ st.write("*Original Text:*", case_text)
51
+ summary = summarize_text(case_text)
52
+ st.write("*Summary:*", summary)
53
+ except Exception as e:
54
+ st.error(f"An error occurred: {e}")
55
+ else:
56
+ st.warning("Please enter a query to search.")
57
+
58
+ if _name_ == "_main_":
59
+ main()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers
2
+ sentence-transformers
3
+ faiss-cpu
4
+ datasets
5
+ streamlit
6
+ torch
7
+ numpy