|
import os |
|
import pandas as pd |
|
import streamlit as st |
|
import re |
|
import logging |
|
import nltk |
|
from docx import Document |
|
from docx.shared import Pt |
|
import io |
|
from langdetect import detect |
|
from collections import Counter |
|
from dotenv import load_dotenv |
|
from langchain_groq import ChatGroq |
|
from langchain_core.output_parsers import StrOutputParser |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from transformers import pipeline |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
if not GROQ_API_KEY: |
|
logging.error("Missing Groq API key. Please set the GROQ_API_KEY environment variable.") |
|
st.error("API key is missing. Please provide a valid API key.") |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") |
|
|
|
|
|
llm = ChatGroq(temperature=0.5, groq_api_key=GROQ_API_KEY, model_name="llama3-8b-8192") |
|
|
|
|
|
nltk.download("punkt") |
|
|
|
|
|
tone_categories = { |
|
"Emotional": ["urgent", "violence", "disappearances", "forced", "killing", "crisis", "concern"], |
|
"Harsh": ["corrupt", "oppression", "failure", "repression", "exploit", "unjust", "authoritarian"], |
|
"Somber": ["tragedy", "loss", "pain", "sorrow", "mourning", "grief", "devastation"], |
|
"Motivational": ["rise", "resist", "mobilize", "inspire", "courage", "change", "determination"], |
|
"Informative": ["announcement", "event", "scheduled", "update", "details", "protest", "statement"], |
|
"Positive": ["progress", "unity", "hope", "victory", "together", "solidarity", "uplifting"], |
|
"Angry": ["rage", "injustice", "fury", "resentment", "outrage", "betrayal"], |
|
"Fearful": ["threat", "danger", "terror", "panic", "risk", "warning"], |
|
"Sarcastic": ["brilliant", "great job", "amazing", "what a surprise", "well done", "as expected"], |
|
"Hopeful": ["optimism", "better future", "faith", "confidence", "looking forward"] |
|
} |
|
|
|
|
|
frame_categories = { |
|
"Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"], |
|
"Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"], |
|
"Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"], |
|
"Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"], |
|
"Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"], |
|
"Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"], |
|
"Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"], |
|
"Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"], |
|
"Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"], |
|
"Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"], |
|
"Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"], |
|
"Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"], |
|
"Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"], |
|
"Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"], |
|
"Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"] |
|
} |
|
|
|
|
|
classifier = pipeline( |
|
"zero-shot-classification", |
|
model="valhalla/distilbart-mnli-12-3", |
|
device=0, |
|
torch_dtype="auto", |
|
batch_size=16 |
|
) |
|
|
|
candidate_labels = ["Major Focus", "Significant Focus", "Minor Mention", "Not Applicable"] |
|
|
|
def detect_language(text): |
|
try: |
|
return detect(text) |
|
except Exception as e: |
|
logging.error(f"Error detecting language: {e}") |
|
return "unknown" |
|
|
|
def extract_tone(text): |
|
try: |
|
response = llm.chat([ |
|
{"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."}, |
|
{"role": "user", "content": text} |
|
]) |
|
return response["choices"][0]["message"]["content"].split(", ") |
|
except Exception as e: |
|
logging.error(f"Groq API error: {e}") |
|
return extract_tone_fallback(text) |
|
|
|
def extract_tone_fallback(text): |
|
detected_tones = set() |
|
text_lower = text.lower() |
|
for category, keywords in tone_categories.items(): |
|
if any(word in text_lower for word in keywords): |
|
detected_tones.add(category) |
|
return list(detected_tones) if detected_tones else ["Neutral"] |
|
|
|
def extract_hashtags(text): |
|
return re.findall(r"#\w+", text) |
|
|
|
|
|
|
|
|
|
|
|
def get_frame_category_mapping(text): |
|
frames = list(frame_categories.keys()) |
|
mapping = {} |
|
for frame in frames: |
|
|
|
hypothesis_template = f"This text is {{}} about {frame}." |
|
|
|
result = classifier( |
|
text, |
|
candidate_labels=candidate_labels, |
|
hypothesis_template=hypothesis_template |
|
) |
|
mapping[frame] = result["labels"][0] |
|
return mapping |
|
|
|
def format_frame_categories_table(mapping): |
|
""" |
|
Returns a markdown-formatted table displaying each frame with columns: |
|
Major Focus, Significant Focus, Minor Mention, and Not Applicable. |
|
A tick (✓) marks the assigned category. |
|
""" |
|
header = "| Frame | Major Focus | Significant Focus | Minor Mention | Not Applicable |\n" |
|
header += "| --- | --- | --- | --- | --- |\n" |
|
tick = "✓" |
|
rows = "" |
|
for frame, category in mapping.items(): |
|
major = tick if category == "Major Focus" else "" |
|
significant = tick if category == "Significant Focus" else "" |
|
minor = tick if category == "Minor Mention" else "" |
|
not_applicable = tick if category == "Not Applicable" else "" |
|
rows += f"| {frame} | {major} | {significant} | {minor} | {not_applicable} |\n" |
|
return header + rows |
|
|
|
|
|
|
|
|
|
|
|
def extract_captions_from_docx(docx_file): |
|
doc = Document(docx_file) |
|
captions = {} |
|
current_post = None |
|
for para in doc.paragraphs: |
|
text = para.text.strip() |
|
if re.match(r"Post \d+", text, re.IGNORECASE): |
|
current_post = text |
|
captions[current_post] = [] |
|
elif current_post: |
|
captions[current_post].append(text) |
|
return {post: " ".join(lines) for post, lines in captions.items() if lines} |
|
|
|
def extract_metadata_from_excel(excel_file): |
|
try: |
|
df = pd.read_excel(excel_file) |
|
extracted_data = df.to_dict(orient="records") |
|
return extracted_data |
|
except Exception as e: |
|
logging.error(f"Error processing Excel file: {e}") |
|
return [] |
|
|
|
def merge_metadata_with_generated_data(generated_data, excel_metadata): |
|
for post_data in excel_metadata: |
|
post_number = f"Post {post_data.get('Post Number', len(generated_data) + 1)}" |
|
if post_number in generated_data: |
|
generated_data[post_number].update(post_data) |
|
else: |
|
generated_data[post_number] = post_data |
|
return generated_data |
|
|
|
def create_docx_from_data(extracted_data): |
|
doc = Document() |
|
for post_number, data in extracted_data.items(): |
|
doc.add_heading(post_number, level=1) |
|
ordered_keys = [ |
|
"Post Number", "Date of Post", "Media Type", "Number of Pictures", |
|
"Number of Videos", "Number of Audios", "Likes", "Comments", "Tagged Audience", |
|
"Full Caption", "Language", "Tone", "Hashtags" |
|
] |
|
for key in ordered_keys: |
|
value = data.get(key, "N/A") |
|
if key in ["Tone", "Hashtags"]: |
|
value = ", ".join(value) if isinstance(value, list) else value |
|
para = doc.add_paragraph() |
|
run = para.add_run(f"**{key}:** {value}") |
|
run.font.size = Pt(11) |
|
|
|
if "FramesMapping" in data: |
|
doc.add_paragraph("Frames:") |
|
mapping = data["FramesMapping"] |
|
table = doc.add_table(rows=1, cols=5) |
|
table.style = "Light List Accent 1" |
|
hdr_cells = table.rows[0].cells |
|
hdr_cells[0].text = "Frame" |
|
hdr_cells[1].text = "Major Focus" |
|
hdr_cells[2].text = "Significant Focus" |
|
hdr_cells[3].text = "Minor Mention" |
|
hdr_cells[4].text = "Not Applicable" |
|
tick = "✓" |
|
for frame, category in mapping.items(): |
|
row_cells = table.add_row().cells |
|
row_cells[0].text = frame |
|
row_cells[1].text = tick if category == "Major Focus" else "" |
|
row_cells[2].text = tick if category == "Significant Focus" else "" |
|
row_cells[3].text = tick if category == "Minor Mention" else "" |
|
row_cells[4].text = tick if category == "Not Applicable" else "" |
|
else: |
|
value = data.get("Frames", "N/A") |
|
doc.add_paragraph(f"**Frames:** {value}") |
|
doc.add_paragraph("\n") |
|
return doc |
|
|
|
|
|
|
|
|
|
|
|
st.title("AI-Powered Coding Sheet Generator") |
|
st.write("Enter text or upload a DOCX/Excel file for analysis:") |
|
|
|
input_text = st.text_area("Input Text", height=200) |
|
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"]) |
|
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"]) |
|
|
|
output_data = {} |
|
|
|
if input_text: |
|
frame_mapping = get_frame_category_mapping(input_text) |
|
frames_table = format_frame_categories_table(frame_mapping) |
|
output_data["Manual Input"] = { |
|
"Full Caption": input_text, |
|
"Language": detect_language(input_text), |
|
"Tone": extract_tone(input_text), |
|
"Hashtags": extract_hashtags(input_text), |
|
"Frames": frames_table, |
|
"FramesMapping": frame_mapping |
|
} |
|
|
|
if uploaded_docx: |
|
captions = extract_captions_from_docx(uploaded_docx) |
|
for caption, text in captions.items(): |
|
frame_mapping = get_frame_category_mapping(text) |
|
frames_table = format_frame_categories_table(frame_mapping) |
|
output_data[caption] = { |
|
"Full Caption": text, |
|
"Language": detect_language(text), |
|
"Tone": extract_tone(text), |
|
"Hashtags": extract_hashtags(text), |
|
"Frames": frames_table, |
|
"FramesMapping": frame_mapping |
|
} |
|
|
|
if uploaded_excel: |
|
excel_metadata = extract_metadata_from_excel(uploaded_excel) |
|
output_data = merge_metadata_with_generated_data(output_data, excel_metadata) |
|
|
|
if output_data: |
|
for post_number, data in output_data.items(): |
|
with st.expander(post_number): |
|
for key, value in data.items(): |
|
if key == "Frames": |
|
st.markdown(f"**{key}:**\n{value}") |
|
else: |
|
st.write(f"**{key}:** {value}") |
|
|
|
if output_data: |
|
docx_output = create_docx_from_data(output_data) |
|
docx_io = io.BytesIO() |
|
docx_output.save(docx_io) |
|
docx_io.seek(0) |
|
st.download_button("Download Merged Analysis as DOCX", data=docx_io, file_name="coding_sheet.docx") |
|
|