|
import os |
|
import pandas as pd |
|
import streamlit as st |
|
import re |
|
import logging |
|
import nltk |
|
from docx import Document |
|
import io |
|
from langdetect import detect |
|
from collections import Counter |
|
from dotenv import load_dotenv |
|
from langchain_groq import ChatGroq |
|
from langchain_core.output_parsers import StrOutputParser |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from transformers import pipeline |
|
|
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
if not GROQ_API_KEY: |
|
logging.error("Missing Groq API key. Please set the GROQ_API_KEY environment variable.") |
|
st.error("API key is missing. Please provide a valid API key.") |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
|
|
llm = ChatGroq(temperature=0.5, groq_api_key=GROQ_API_KEY, model_name="llama3-8b-8192") |
|
|
|
|
|
nltk.download("punkt") |
|
|
|
|
|
tone_categories = { |
|
"Emotional": ["urgent", "violence", "disappearances", "forced", "killing", "crisis", "concern"], |
|
"Harsh": ["corrupt", "oppression", "failure", "repression", "exploit", "unjust", "authoritarian"], |
|
"Somber": ["tragedy", "loss", "pain", "sorrow", "mourning", "grief", "devastation"], |
|
"Motivational": ["rise", "resist", "mobilize", "inspire", "courage", "change", "determination"], |
|
"Informative": ["announcement", "event", "scheduled", "update", "details", "protest", "statement"], |
|
"Positive": ["progress", "unity", "hope", "victory", "together", "solidarity", "uplifting"], |
|
"Angry": ["rage", "injustice", "fury", "resentment", "outrage", "betrayal"], |
|
"Fearful": ["threat", "danger", "terror", "panic", "risk", "warning"], |
|
"Sarcastic": ["brilliant", "great job", "amazing", "what a surprise", "well done", "as expected"], |
|
"Hopeful": ["optimism", "better future", "faith", "confidence", "looking forward"] |
|
} |
|
|
|
|
|
frame_categories = { |
|
"Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"], |
|
"Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"], |
|
"Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"], |
|
"Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"], |
|
"Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"], |
|
"Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"], |
|
"Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"], |
|
"Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"], |
|
"Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"], |
|
"Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"], |
|
"Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"], |
|
"Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"], |
|
"Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"], |
|
"Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"], |
|
"Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"] |
|
} |
|
|
|
|
|
def detect_language(text): |
|
try: |
|
return detect(text) |
|
except Exception as e: |
|
logging.error(f"Error detecting language: {e}") |
|
return "unknown" |
|
|
|
|
|
def extract_tone(text): |
|
try: |
|
response = llm.chat([{"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."}, |
|
{"role": "user", "content": text}]) |
|
return response["choices"][0]["message"]["content"].split(", ") |
|
except Exception as e: |
|
logging.error(f"Groq API error: {e}") |
|
return extract_tone_fallback(text) |
|
|
|
|
|
def extract_tone_fallback(text): |
|
detected_tones = set() |
|
text_lower = text.lower() |
|
for category, keywords in tone_categories.items(): |
|
if any(word in text_lower for word in keywords): |
|
detected_tones.add(category) |
|
return list(detected_tones) if detected_tones else ["Neutral"] |
|
|
|
|
|
def extract_hashtags(text): |
|
return re.findall(r"#\w+", text) |
|
|
|
|
|
def categorize_frames(frame_list): |
|
frame_counter = Counter(frame_list) |
|
categorized_frames = {"Major Focus": [], "Significant Focus": [], "Minor Mention": []} |
|
|
|
sorted_frames = sorted(frame_counter.items(), key=lambda x: x[1], reverse=True) |
|
|
|
for i, (frame, count) in enumerate(sorted_frames): |
|
if i == 0: |
|
categorized_frames["Major Focus"].append(frame) |
|
elif i < 3: |
|
categorized_frames["Significant Focus"].append(frame) |
|
else: |
|
categorized_frames["Minor Mention"].append(frame) |
|
|
|
return categorized_frames |
|
|
|
|
|
def extract_frames_fallback(text): |
|
detected_frames = [] |
|
text_lower = text.lower() |
|
|
|
for category, keywords in frame_categories.items(): |
|
keyword_count = sum(1 for word in keywords if word in text_lower) |
|
if keyword_count > 0: |
|
detected_frames.append(category) |
|
|
|
return categorize_frames(detected_frames) |
|
|
|
|
|
def extract_captions_from_docx(docx_file): |
|
doc = Document(docx_file) |
|
captions = {} |
|
current_post = None |
|
for para in doc.paragraphs: |
|
text = para.text.strip() |
|
if re.match(r"Post \d+", text, re.IGNORECASE): |
|
current_post = text |
|
captions[current_post] = [] |
|
elif current_post: |
|
captions[current_post].append(text) |
|
return {post: " ".join(lines) for post, lines in captions.items() if lines} |
|
|
|
|
|
def extract_metadata_from_excel(excel_file): |
|
try: |
|
df = pd.read_excel(excel_file) |
|
extracted_data = df.to_dict(orient="records") |
|
return extracted_data |
|
except Exception as e: |
|
logging.error(f"Error processing Excel file: {e}") |
|
return [] |
|
|
|
|
|
def merge_metadata_with_generated_data(generated_data, excel_metadata): |
|
for post_data in excel_metadata: |
|
post_number = f"Post {post_data.get('Post Number', len(generated_data) + 1)}" |
|
if post_number in generated_data: |
|
generated_data[post_number].update(post_data) |
|
else: |
|
generated_data[post_number] = post_data |
|
return generated_data |
|
|
|
|
|
def create_docx_from_data(extracted_data): |
|
doc = Document() |
|
|
|
for post_number, data in extracted_data.items(): |
|
doc.add_heading(post_number, level=1) |
|
|
|
ordered_keys = [ |
|
"Post Number", "Date of Post", "Media Type", "Number of Pictures", |
|
"Number of Videos", "Number of Audios", "Likes", "Comments", "Tagged Audience", |
|
"Full Caption", "Language", "Tone", "Hashtags", "Frames" |
|
] |
|
|
|
for key in ordered_keys: |
|
value = data.get(key, "N/A") |
|
|
|
if key in ["Tone", "Hashtags"]: |
|
value = ", ".join(value) if isinstance(value, list) else value |
|
elif key == "Frames" and isinstance(value, dict): |
|
frame_text = "\n".join([f" {category}: {', '.join(frames)}" for category, frames in value.items() if frames]) |
|
value = f"\n{frame_text}" if frame_text else "N/A" |
|
|
|
doc.add_paragraph(f"**{key}:** {value}") |
|
|
|
doc.add_paragraph("\n") |
|
|
|
return doc |
|
|
|
|
|
st.title("AI-Powered Activism Message Analyzer") |
|
|
|
st.write("Enter text or upload a DOCX/Excel file for analysis:") |
|
|
|
input_text = st.text_area("Input Text", height=200) |
|
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"]) |
|
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"]) |
|
|
|
output_data = {} |
|
|
|
if input_text: |
|
output_data["Manual Input"] = { |
|
"Full Caption": input_text, |
|
"Language": detect_language(input_text), |
|
"Tone": extract_tone(input_text), |
|
"Hashtags": extract_hashtags(input_text), |
|
"Frames": extract_frames_fallback(input_text), |
|
} |
|
|
|
if uploaded_docx: |
|
captions = extract_captions_from_docx(uploaded_docx) |
|
for caption, text in captions.items(): |
|
output_data[caption] = { |
|
"Full Caption": text, |
|
"Language": detect_language(text), |
|
"Tone": extract_tone(text), |
|
"Hashtags": extract_hashtags(text), |
|
"Frames": extract_frames_fallback(text), |
|
} |
|
|
|
if uploaded_excel: |
|
excel_metadata = extract_metadata_from_excel(uploaded_excel) |
|
output_data = merge_metadata_with_generated_data(output_data, excel_metadata) |
|
|
|
|
|
if output_data: |
|
for post_number, data in output_data.items(): |
|
with st.expander(post_number): |
|
for key, value in data.items(): |
|
st.write(f"**{key}:** {value}") |
|
|
|
if output_data: |
|
docx_output = create_docx_from_data(output_data) |
|
docx_io = io.BytesIO() |
|
docx_output.save(docx_io) |
|
docx_io.seek(0) |
|
st.download_button("Download Merged Analysis as DOCX", data=docx_io, file_name="merged_analysis.docx") |
|
|
|
|