Surbhi
Using BERT and GPT-2
e1baa80
import streamlit as st
import torch
from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
from keybert import KeyBERT
import matplotlib.pyplot as plt
# Load models
kw_model = KeyBERT("sentence-transformers/paraphrase-MiniLM-L6-v2")
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2")
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
st.title("πŸ” AI Summarizer: BERT + GPT-2")
st.write("Extract key points with **KeyBERT**, summarize with **BERT (BART)** and **GPT-2**, and compare their accuracy.")
# User input
text = st.text_area("Enter text to summarize:")
if st.button("Summarize"):
if not text.strip():
st.warning("Please enter some text!")
else:
# Extract Key Points using KeyBERT
key_points = kw_model.extract_keywords(text, keyphrase_ngram_range=(1, 2), stop_words='english', top_n=5)
extracted_points = ", ".join([kp[0] for kp in key_points])
# Summarization using BART (BERT-based model)
bart_summary = summarizer(text, max_length=150, min_length=50, do_sample=False)[0]['summary_text']
# Summarization using GPT-2
inputs = gpt2_tokenizer.encode("Summarize: " + text, return_tensors="pt", max_length=512, truncation=True)
gpt2_summary_ids = gpt2_model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2)
gpt2_summary = gpt2_tokenizer.decode(gpt2_summary_ids[0], skip_special_tokens=True)
# Display results
st.subheader("πŸ”‘ Key Points")
st.write(extracted_points)
st.subheader("πŸ“– Summary (BERT - BART)")
st.write(bart_summary)
st.subheader("πŸ€– Summary (GPT-2)")
st.write(gpt2_summary)
# Performance Comparison (Word Count)
bart_length = len(bart_summary.split())
gpt2_length = len(gpt2_summary.split())
# Plotting
fig, ax = plt.subplots()
ax.bar(["BERT (BART)", "GPT-2"], [bart_length, gpt2_length], color=["blue", "red"])
ax.set_ylabel("Word Count")
ax.set_title("Comparison of Summary Lengths")
st.pyplot(fig)