|
import streamlit as st |
|
import torch |
|
from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer |
|
from keybert import KeyBERT |
|
import matplotlib.pyplot as plt |
|
|
|
|
|
kw_model = KeyBERT("sentence-transformers/paraphrase-MiniLM-L6-v2") |
|
summarizer = pipeline("summarization", model="facebook/bart-large-cnn") |
|
gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2") |
|
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") |
|
|
|
st.title("π AI Summarizer: BERT + GPT-2") |
|
st.write("Extract key points with **KeyBERT**, summarize with **BERT (BART)** and **GPT-2**, and compare their accuracy.") |
|
|
|
|
|
text = st.text_area("Enter text to summarize:") |
|
|
|
if st.button("Summarize"): |
|
if not text.strip(): |
|
st.warning("Please enter some text!") |
|
else: |
|
|
|
key_points = kw_model.extract_keywords(text, keyphrase_ngram_range=(1, 2), stop_words='english', top_n=5) |
|
extracted_points = ", ".join([kp[0] for kp in key_points]) |
|
|
|
|
|
bart_summary = summarizer(text, max_length=150, min_length=50, do_sample=False)[0]['summary_text'] |
|
|
|
|
|
inputs = gpt2_tokenizer.encode("Summarize: " + text, return_tensors="pt", max_length=512, truncation=True) |
|
gpt2_summary_ids = gpt2_model.generate(inputs, max_length=150, num_return_sequences=1, no_repeat_ngram_size=2) |
|
gpt2_summary = gpt2_tokenizer.decode(gpt2_summary_ids[0], skip_special_tokens=True) |
|
|
|
|
|
st.subheader("π Key Points") |
|
st.write(extracted_points) |
|
|
|
st.subheader("π Summary (BERT - BART)") |
|
st.write(bart_summary) |
|
|
|
st.subheader("π€ Summary (GPT-2)") |
|
st.write(gpt2_summary) |
|
|
|
|
|
bart_length = len(bart_summary.split()) |
|
gpt2_length = len(gpt2_summary.split()) |
|
|
|
|
|
fig, ax = plt.subplots() |
|
ax.bar(["BERT (BART)", "GPT-2"], [bart_length, gpt2_length], color=["blue", "red"]) |
|
ax.set_ylabel("Word Count") |
|
ax.set_title("Comparison of Summary Lengths") |
|
|
|
st.pyplot(fig) |