File size: 7,948 Bytes
f44d7de
706fc89
 
34d7c10
706fc89
 
 
34d7c10
 
 
 
 
 
 
 
 
 
 
 
 
706fc89
 
34d7c10
706fc89
34d7c10
706fc89
5c3fa48
 
 
 
 
 
 
 
 
 
706fc89
 
34d7c10
706fc89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34d7c10
 
706fc89
 
34d7c10
773ca30
34d7c10
 
 
 
 
 
 
 
 
 
 
 
 
 
706fc89
34d7c10
 
 
773ca30
 
 
 
706fc89
34d7c10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773ca30
706fc89
 
 
 
 
 
 
 
 
 
 
 
 
f44d7de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34d7c10
 
706fc89
f44d7de
706fc89
773ca30
706fc89
 
f44d7de
 
 
 
 
706fc89
34d7c10
706fc89
 
f44d7de
706fc89
 
34d7c10
 
 
 
 
706fc89
34d7c10
706fc89
f44d7de
 
 
706fc89
 
34d7c10
 
 
 
 
706fc89
f44d7de
 
 
 
 
 
 
 
 
 
706fc89
34d7c10
706fc89
34d7c10
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import pandas as pd
import streamlit as st
import re
import logging
import nltk
from docx import Document
import io
from langdetect import detect
from transformers import pipeline
from groq import ChatGroq
from dotenv import load_dotenv

# Load environment variables
load_dotenv()

# Initialize logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

# Initialize LLM (Groq API)
llm = ChatGroq(temperature=0.5, groq_api_key="GROQ_API_KEY", model_name="llama3-8b-8192")

# Download required NLTK resources
nltk.download("punkt")

# Tone categories for fallback method
tone_categories = {
    "Emotional": ["urgent", "violence", "disappearances", "forced", "killing", "crisis", "concern"],
    "Harsh": ["corrupt", "oppression", "failure", "repression", "exploit", "unjust", "authoritarian"],
    "Somber": ["tragedy", "loss", "pain", "sorrow", "mourning", "grief", "devastation"],
    "Motivational": ["rise", "resist", "mobilize", "inspire", "courage", "change", "determination"],
    "Informative": ["announcement", "event", "scheduled", "update", "details", "protest", "statement"],
    "Positive": ["progress", "unity", "hope", "victory", "together", "solidarity", "uplifting"],
    "Angry": ["rage", "injustice", "fury", "resentment", "outrage", "betrayal"],
    "Fearful": ["threat", "danger", "terror", "panic", "risk", "warning"],
    "Sarcastic": ["brilliant", "great job", "amazing", "what a surprise", "well done", "as expected"],
    "Hopeful": ["optimism", "better future", "faith", "confidence", "looking forward"]
}

# Frame categories for fallback method
frame_categories = {
    "Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"],
    "Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"],
    "Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"],
    "Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"],
    "Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"],
    "Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"],
    "Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"],
    "Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"],
    "Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"],
    "Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"],
    "Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"],
    "Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"],
    "Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"],
    "Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"],
    "Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"]
}

# Detect language
def detect_language(text):
    try:
        return detect(text)
    except Exception as e:
        logging.error(f"Error detecting language: {e}")
        return "unknown"

# Extract tone using Groq API (or fallback method)
def extract_tone(text):
    try:
        response = llm.chat([
            {"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."},
            {"role": "user", "content": text}
        ])
        return response["choices"][0]["message"]["content"].split(", ")
    except Exception as e:
        logging.error(f"Groq API error: {e}")
        return extract_tone_fallback(text)

# Fallback method for tone extraction
def extract_tone_fallback(text):
    detected_tones = set()
    text_lower = text.lower()
    for category, keywords in tone_categories.items():
        if any(word in text_lower for word in keywords):
            detected_tones.add(category)
    return list(detected_tones) if detected_tones else ["Neutral"]

# Extract hashtags
def extract_hashtags(text):
    return re.findall(r"#\w+", text)

# Extract frames using Groq API (or fallback)
def extract_frames(text):
    try:
        response = llm.chat([
            {"role": "system", "content": "Classify the following text into relevant activism frames and assign Major, Significant, or Minor focus."},
            {"role": "user", "content": text}
        ])
        return response["choices"][0]["message"]["content"]
    except Exception as e:
        logging.error(f"Groq API error: {e}")
        return extract_frames_fallback(text)

# Fallback method for frame extraction
def extract_frames_fallback(text):
    detected_frames = set()
    text_lower = text.lower()
    for category, keywords in frame_categories.items():
        if any(word in text_lower for word in keywords):
            detected_frames.add(category)
    return list(detected_frames)

# Extract captions from DOCX
def extract_captions_from_docx(docx_file):
    doc = Document(docx_file)
    captions = {}
    current_post = None
    for para in doc.paragraphs:
        text = para.text.strip()
        if re.match(r"Post \d+", text, re.IGNORECASE):
            current_post = text
            captions[current_post] = []
        elif current_post:
            captions[current_post].append(text)
    return {post: " ".join(lines) for post, lines in captions.items() if lines}

# Extract metadata from Excel file
def extract_metadata_from_excel(excel_file):
    try:
        df = pd.read_excel(excel_file)
        # Assuming the Excel sheet has columns: 'Post Number', 'Likes', 'Comments', 'Media Type'
        metadata = df.set_index("Post Number").to_dict(orient="index")
        return metadata
    except Exception as e:
        logging.error(f"Error reading Excel file: {e}")
        return {}

# Merge metadata from Excel with the generated data
def merge_metadata_with_generated_data(generated_data, excel_metadata):
    for post, metadata in excel_metadata.items():
        if post in generated_data:
            generated_data[post].update(metadata)
    return generated_data

# Streamlit app
st.title("AI-Powered Activism Message Analyzer")

st.write("Enter text or upload a DOCX/Excel file for analysis:")

# Text input
input_text = st.text_area("Input Text", height=200)

# File upload (DOCX)
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"])

# File upload (Excel)
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"])

# Initialize output dictionary
output_data = {}

# Process Text Input
if input_text:
    output_data["Manual Input"] = {
        "Full Caption": input_text,
        "Language": detect_language(input_text),
        "Tone": extract_tone(input_text),
        "Hashtags": extract_hashtags(input_text),
        "Frames": extract_frames(input_text),
    }
    st.success("Analysis completed for text input.")

# Process DOCX file
if uploaded_docx:
    captions = extract_captions_from_docx(uploaded_docx)
    for caption, text in captions.items():
        output_data[caption] = {
            "Full Caption": text,
            "Language": detect_language(text),
            "Tone": extract_tone(text),
            "Hashtags": extract_hashtags(text),
            "Frames": extract_frames(text),
        }
    st.success(f"Analysis completed for {len(captions)} posts from DOCX.")

# Process Excel file
if uploaded_excel:
    excel_metadata = extract_metadata_from_excel(uploaded_excel)
    st.success(f"Excel metadata extracted with {len(excel_metadata)} posts.")

# Merge and display final data
if uploaded_excel:
    output_data = merge_metadata_with_generated_data(output_data, excel_metadata)

# Display results
if output_data:
    st.write(output_data)