File size: 3,690 Bytes
b9ae10a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import streamlit as st
from transformers import pipeline
import json
import langdetect
from keybert import KeyBERT
from sklearn.feature_extraction.text import CountVectorizer

# Load Pretrained Models
@st.cache_resource
def load_models():
    return {
        "emotion": pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True),
        "sentiment": pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment"),
        "summarization": pipeline("summarization", model="facebook/bart-large-cnn"),
        "ner": pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", grouped_entities=True),
        "toxicity": pipeline("text-classification", model="unitary/unbiased-toxic-roberta"),
        "keyword_extraction": KeyBERT()
    }

models = load_models()

# Function: Emotion Detection
def analyze_emotions(text):
    results = models["emotion"](text)
    return {r['label']: round(r['score'], 2) for r in results[0]}

# Function: Sentiment Analysis
def analyze_sentiment(text):
    result = models["sentiment"](text)[0]
    return {result['label']: round(result['score'], 2)}

# Function: Text Summarization
def summarize_text(text):
    return models["summarization"](text[:1024])[0]['summary_text']

# Function: Keyword Extraction
def extract_keywords(text):
    vectorizer = CountVectorizer(ngram_range=(1, 2))
    return models["keyword_extraction"].extract_keywords(text, vectorizer=vectorizer, stop_words='english')

# Function: Named Entity Recognition (NER)
def analyze_ner(text):
    entities = models["ner"](text)
    return {entity["word"]: entity["entity_group"] for entity in entities}

# Function: Language Detection
def detect_language(text):
    try:
        return langdetect.detect(text)
    except:
        return "Error detecting language"

# Function: Toxicity Detection
def detect_toxicity(text):
    results = models["toxicity"](text)
    return {results[0]['label']: round(results[0]['score'], 2)}

# Streamlit UI
st.title("๐Ÿš€ AI-Powered Text Intelligence App")
st.markdown("Analyze text with multiple NLP features: Emotion Detection, Sentiment Analysis, Summarization, NER, Keywords, Language Detection, and more!")

# User Input
text_input = st.text_area("Enter text to analyze:", "")

if st.button("Analyze Text"):
    if text_input.strip():
        st.subheader("๐Ÿ”น Emotion Detection")
        st.json(analyze_emotions(text_input))

        st.subheader("๐Ÿ”น Sentiment Analysis")
        st.json(analyze_sentiment(text_input))

        st.subheader("๐Ÿ”น Text Summarization")
        st.write(summarize_text(text_input))

        st.subheader("๐Ÿ”น Keyword Extraction")
        st.json(extract_keywords(text_input))

        st.subheader("๐Ÿ”น Named Entity Recognition (NER)")
        st.json(analyze_ner(text_input))

        st.subheader("๐Ÿ”น Language Detection")
        st.write(f"Detected Language: `{detect_language(text_input)}`")

        st.subheader("๐Ÿ”น Toxicity Detection")
        st.json(detect_toxicity(text_input))

        # Save results to JSON
        results = {
            "emotion": analyze_emotions(text_input),
            "sentiment": analyze_sentiment(text_input),
            "summary": summarize_text(text_input),
            "keywords": extract_keywords(text_input),
            "ner": analyze_ner(text_input),
            "language": detect_language(text_input),
            "toxicity": detect_toxicity(text_input)
        }
        st.download_button("Download JSON Report", json.dumps(results, indent=2), "text_analysis.json", "application/json")
    else:
        st.warning("โš ๏ธ Please enter some text to analyze.")