milorable commited on
Commit
b9ae10a
ยท
verified ยท
1 Parent(s): f0ae5da

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ import json
4
+ import langdetect
5
+ from keybert import KeyBERT
6
+ from sklearn.feature_extraction.text import CountVectorizer
7
+
8
+ # Load Pretrained Models
9
+ @st.cache_resource
10
+ def load_models():
11
+ return {
12
+ "emotion": pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base", return_all_scores=True),
13
+ "sentiment": pipeline("text-classification", model="nlptown/bert-base-multilingual-uncased-sentiment"),
14
+ "summarization": pipeline("summarization", model="facebook/bart-large-cnn"),
15
+ "ner": pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", grouped_entities=True),
16
+ "toxicity": pipeline("text-classification", model="unitary/unbiased-toxic-roberta"),
17
+ "keyword_extraction": KeyBERT()
18
+ }
19
+
20
+ models = load_models()
21
+
22
+ # Function: Emotion Detection
23
+ def analyze_emotions(text):
24
+ results = models["emotion"](text)
25
+ return {r['label']: round(r['score'], 2) for r in results[0]}
26
+
27
+ # Function: Sentiment Analysis
28
+ def analyze_sentiment(text):
29
+ result = models["sentiment"](text)[0]
30
+ return {result['label']: round(result['score'], 2)}
31
+
32
+ # Function: Text Summarization
33
+ def summarize_text(text):
34
+ return models["summarization"](text[:1024])[0]['summary_text']
35
+
36
+ # Function: Keyword Extraction
37
+ def extract_keywords(text):
38
+ vectorizer = CountVectorizer(ngram_range=(1, 2))
39
+ return models["keyword_extraction"].extract_keywords(text, vectorizer=vectorizer, stop_words='english')
40
+
41
+ # Function: Named Entity Recognition (NER)
42
+ def analyze_ner(text):
43
+ entities = models["ner"](text)
44
+ return {entity["word"]: entity["entity_group"] for entity in entities}
45
+
46
+ # Function: Language Detection
47
+ def detect_language(text):
48
+ try:
49
+ return langdetect.detect(text)
50
+ except:
51
+ return "Error detecting language"
52
+
53
+ # Function: Toxicity Detection
54
+ def detect_toxicity(text):
55
+ results = models["toxicity"](text)
56
+ return {results[0]['label']: round(results[0]['score'], 2)}
57
+
58
+ # Streamlit UI
59
+ st.title("๐Ÿš€ AI-Powered Text Intelligence App")
60
+ st.markdown("Analyze text with multiple NLP features: Emotion Detection, Sentiment Analysis, Summarization, NER, Keywords, Language Detection, and more!")
61
+
62
+ # User Input
63
+ text_input = st.text_area("Enter text to analyze:", "")
64
+
65
+ if st.button("Analyze Text"):
66
+ if text_input.strip():
67
+ st.subheader("๐Ÿ”น Emotion Detection")
68
+ st.json(analyze_emotions(text_input))
69
+
70
+ st.subheader("๐Ÿ”น Sentiment Analysis")
71
+ st.json(analyze_sentiment(text_input))
72
+
73
+ st.subheader("๐Ÿ”น Text Summarization")
74
+ st.write(summarize_text(text_input))
75
+
76
+ st.subheader("๐Ÿ”น Keyword Extraction")
77
+ st.json(extract_keywords(text_input))
78
+
79
+ st.subheader("๐Ÿ”น Named Entity Recognition (NER)")
80
+ st.json(analyze_ner(text_input))
81
+
82
+ st.subheader("๐Ÿ”น Language Detection")
83
+ st.write(f"Detected Language: `{detect_language(text_input)}`")
84
+
85
+ st.subheader("๐Ÿ”น Toxicity Detection")
86
+ st.json(detect_toxicity(text_input))
87
+
88
+ # Save results to JSON
89
+ results = {
90
+ "emotion": analyze_emotions(text_input),
91
+ "sentiment": analyze_sentiment(text_input),
92
+ "summary": summarize_text(text_input),
93
+ "keywords": extract_keywords(text_input),
94
+ "ner": analyze_ner(text_input),
95
+ "language": detect_language(text_input),
96
+ "toxicity": detect_toxicity(text_input)
97
+ }
98
+ st.download_button("Download JSON Report", json.dumps(results, indent=2), "text_analysis.json", "application/json")
99
+ else:
100
+ st.warning("โš ๏ธ Please enter some text to analyze.")