|
import os |
|
import pandas as pd |
|
import streamlit as st |
|
import re |
|
import logging |
|
import nltk |
|
from docx import Document |
|
import io |
|
from langdetect import detect |
|
from collections import Counter |
|
from dotenv import load_dotenv |
|
from langchain_groq import ChatGroq |
|
from langchain_core.output_parsers import StrOutputParser |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from transformers import pipeline |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
if not GROQ_API_KEY: |
|
logging.error("Missing Groq API key. Please set the GROQ_API_KEY environment variable.") |
|
st.error("API key is missing. Please provide a valid API key.") |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
|
|
llm = ChatGroq(temperature=0.5, groq_api_key=GROQ_API_KEY, model_name="llama3-8b-8192") |
|
|
|
|
|
nltk.download("punkt") |
|
|
|
|
|
frame_categories = { |
|
"Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"], |
|
"Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"], |
|
"Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"], |
|
"Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"], |
|
"Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"], |
|
"Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"], |
|
"Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"], |
|
"Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"], |
|
} |
|
|
|
|
|
def detect_language(text): |
|
try: |
|
return detect(text) |
|
except Exception as e: |
|
logging.error(f"Error detecting language: {e}") |
|
return "unknown" |
|
|
|
|
|
def extract_tone(text): |
|
try: |
|
response = llm.chat([{"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."}, |
|
{"role": "user", "content": text}]) |
|
return response["choices"][0]["message"]["content"].split(", ") |
|
except Exception as e: |
|
logging.error(f"Groq API error: {e}") |
|
return ["Neutral"] |
|
|
|
|
|
def extract_hashtags(text): |
|
return re.findall(r"#\w+", text) |
|
|
|
|
|
def categorize_frames(frame_list): |
|
frame_counter = Counter(frame_list) |
|
categorized_frames = {"Major Focus": [], "Significant Focus": [], "Minor Mention": []} |
|
|
|
sorted_frames = sorted(frame_counter.items(), key=lambda x: x[1], reverse=True) |
|
|
|
for i, (frame, count) in enumerate(sorted_frames): |
|
if i == 0: |
|
categorized_frames["Major Focus"].append(frame) |
|
elif i < 3: |
|
categorized_frames["Significant Focus"].append(frame) |
|
else: |
|
categorized_frames["Minor Mention"].append(frame) |
|
|
|
return categorized_frames |
|
|
|
|
|
def extract_frames_fallback(text): |
|
detected_frames = [] |
|
text_lower = text.lower() |
|
|
|
for category, keywords in frame_categories.items(): |
|
keyword_count = sum(1 for word in keywords if word in text_lower) |
|
if keyword_count > 0: |
|
detected_frames.append(category) |
|
|
|
return categorize_frames(detected_frames) |
|
|
|
|
|
def extract_captions_from_docx(docx_file): |
|
doc = Document(docx_file) |
|
captions = {} |
|
current_post = None |
|
for para in doc.paragraphs: |
|
text = para.text.strip() |
|
if re.match(r"Post \d+", text, re.IGNORECASE): |
|
current_post = text |
|
captions[current_post] = [] |
|
elif current_post: |
|
captions[current_post].append(text) |
|
return {post: " ".join(lines) for post, lines in captions.items() if lines} |
|
|
|
|
|
def extract_metadata_from_excel(excel_file): |
|
try: |
|
df = pd.read_excel(excel_file) |
|
required_columns = ["Date", "Media Type", "Number of Pictures", "Number of Videos", "Number of Audios", "Likes", "Comments", "Tagged Audience"] |
|
if not all(col in df.columns for col in required_columns): |
|
st.error("Excel file is missing required columns.") |
|
return [] |
|
|
|
extracted_data = [] |
|
for index, row in df.iterrows(): |
|
post_data = { |
|
"Post Number": f"Post {index + 1}", |
|
"Date of Post": row.get("Date", "N/A"), |
|
"Media Type": row.get("Media Type", "N/A"), |
|
"Number of Pictures": row.get("Number of Pictures", 0), |
|
"Number of Videos": row.get("Number of Videos", 0), |
|
"Number of Audios": row.get("Number of Audios", 0), |
|
"Likes": row.get("Likes", 0), |
|
"Comments": row.get("Comments", 0), |
|
"Tagged Audience": row.get("Tagged Audience", "No"), |
|
} |
|
extracted_data.append(post_data) |
|
return extracted_data |
|
except Exception as e: |
|
logging.error(f"Error processing Excel file: {e}") |
|
return [] |
|
|
|
|
|
def merge_metadata_with_generated_data(generated_data, excel_metadata): |
|
for post_data in excel_metadata: |
|
post_number = post_data["Post Number"] |
|
if post_number in generated_data: |
|
generated_data[post_number].update(post_data) |
|
else: |
|
generated_data[post_number] = post_data |
|
|
|
return generated_data |
|
|
|
|
|
def create_docx_from_data(extracted_data): |
|
doc = Document() |
|
for post_number, data in extracted_data.items(): |
|
doc.add_heading(post_number, level=1) |
|
for key, value in data.items(): |
|
doc.add_paragraph(f"**{key}:** {value}") |
|
doc.add_paragraph("\n") |
|
return doc |
|
|
|
|
|
st.title("AI-Powered Activism Message Analyzer") |
|
|
|
st.write("Enter text or upload a DOCX/Excel file for analysis:") |
|
|
|
input_text = st.text_area("Input Text", height=200) |
|
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"]) |
|
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"]) |
|
|
|
output_data = {} |
|
|
|
if input_text: |
|
output_data["Manual Input"] = { |
|
"Full Caption": input_text, |
|
"Language": detect_language(input_text), |
|
"Tone": extract_tone(input_text), |
|
"Hashtags": extract_hashtags(input_text), |
|
"Frames": extract_frames_fallback(input_text), |
|
} |
|
|
|
if uploaded_docx: |
|
captions = extract_captions_from_docx(uploaded_docx) |
|
for caption, text in captions.items(): |
|
output_data[caption] = { |
|
"Full Caption": text, |
|
"Language": detect_language(text), |
|
"Tone": extract_tone(text), |
|
"Hashtags": extract_hashtags(text), |
|
"Frames": extract_frames_fallback(text), |
|
} |
|
|
|
if uploaded_excel: |
|
excel_metadata = extract_metadata_from_excel(uploaded_excel) |
|
output_data = merge_metadata_with_generated_data(output_data, excel_metadata) |
|
|
|
if output_data: |
|
docx_output = create_docx_from_data(output_data) |
|
docx_io = io.BytesIO() |
|
docx_output.save(docx_io) |
|
docx_io.seek(0) |
|
st.download_button("Download Merged Analysis as DOCX", data=docx_io, file_name="merged_analysis.docx") |
|
|