File size: 10,820 Bytes
0d3d327
f44d7de
706fc89
 
34d7c10
23eb166
706fc89
609d4a9
34d7c10
0d3d327
34d7c10
c1221c4
 
 
0d3d327
34d7c10
 
 
 
0d3d327
 
 
 
 
 
34d7c10
 
 
 
0d3d327
706fc89
 
34d7c10
706fc89
da716d7
3b8e826
 
 
 
 
 
 
 
 
 
 
 
 
bba1b37
dac6f4a
bba1b37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
706fc89
 
74033b7
 
 
 
706fc89
 
 
34d7c10
 
706fc89
 
773ca30
34d7c10
bba1b37
 
34d7c10
 
 
3b8e826
 
 
 
 
 
 
 
 
773ca30
609d4a9
 
 
bba1b37
74033b7
bba1b37
 
 
 
74033b7
 
 
bba1b37
74033b7
bba1b37
74033b7
 
 
 
 
bba1b37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
706fc89
 
 
 
 
 
 
 
609d4a9
706fc89
609d4a9
 
706fc89
da716d7
 
 
 
 
 
 
 
5893c88
da716d7
 
 
 
 
 
bba1b37
da716d7
5893c88
bba1b37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
706fc89
d4724ab
da716d7
 
 
f44d7de
 
706fc89
da716d7
bba1b37
da716d7
bba1b37
 
da716d7
 
 
 
 
74033b7
da716d7
 
 
 
 
bba1b37
 
da716d7
 
 
 
 
bba1b37
da716d7
 
 
5893c88
da716d7
5893c88
da716d7
 
 
 
bba1b37
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
import os
import pandas as pd
import streamlit as st
import re
import logging
import nltk
from docx import Document
import io
from langdetect import detect
from collections import Counter
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from transformers import pipeline

# Load environment variables
load_dotenv()

# Check if Groq API key is available
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
if not GROQ_API_KEY:
    logging.error("Missing Groq API key. Please set the GROQ_API_KEY environment variable.")
    st.error("API key is missing. Please provide a valid API key.")

# Initialize logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")

# Initialize LLM (Groq API)
llm = ChatGroq(temperature=0.5, groq_api_key=GROQ_API_KEY, model_name="llama3-8b-8192")

# Download required NLTK resources
nltk.download("punkt")

# Tone categories for fallback method
tone_categories = {
    "Emotional": ["urgent", "violence", "disappearances", "forced", "killing", "crisis", "concern"],
    "Harsh": ["corrupt", "oppression", "failure", "repression", "exploit", "unjust", "authoritarian"],
    "Somber": ["tragedy", "loss", "pain", "sorrow", "mourning", "grief", "devastation"],
    "Motivational": ["rise", "resist", "mobilize", "inspire", "courage", "change", "determination"],
    "Informative": ["announcement", "event", "scheduled", "update", "details", "protest", "statement"],
    "Positive": ["progress", "unity", "hope", "victory", "together", "solidarity", "uplifting"],
    "Angry": ["rage", "injustice", "fury", "resentment", "outrage", "betrayal"],
    "Fearful": ["threat", "danger", "terror", "panic", "risk", "warning"],
    "Sarcastic": ["brilliant", "great job", "amazing", "what a surprise", "well done", "as expected"],
    "Hopeful": ["optimism", "better future", "faith", "confidence", "looking forward"]
}

# Frame categories for fallback method
frame_categories = {
    "Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"],
    "Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"],
    "Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"],
    "Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"],
    "Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"],
    "Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"],
    "Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"],
    "Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"],
    "Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"],
    "Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"],
    "Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"],
    "Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"],
    "Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"],
    "Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"],
    "Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"]
}

# Initialize zero-shot classifier for qualitative frame categorization
classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
candidate_labels = ["Major Focus", "Significant Focus", "Minor Mention", "Not Applicable"]

def detect_language(text):
    try:
        return detect(text)
    except Exception as e:
        logging.error(f"Error detecting language: {e}")
        return "unknown"

def extract_tone(text):
    try:
        response = llm.chat([{"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."},
                             {"role": "user", "content": text}])
        return response["choices"][0]["message"]["content"].split(", ")
    except Exception as e:
        logging.error(f"Groq API error: {e}")
        return extract_tone_fallback(text)

def extract_tone_fallback(text):
    detected_tones = set()
    text_lower = text.lower()
    for category, keywords in tone_categories.items():
        if any(word in text_lower for word in keywords):
            detected_tones.add(category)
    return list(detected_tones) if detected_tones else ["Neutral"]

def extract_hashtags(text):
    return re.findall(r"#\w+", text)

# -------------------------------------------------------------------
# New functions for qualitative frame categorization using zero-shot classification
# -------------------------------------------------------------------

def get_frame_category_mapping(text):
    """
    For each frame category defined in frame_categories, this function uses a zero-shot classification
    approach to qualitatively assess how strongly the text discusses the frame. The classifier returns one of:
    "Major Focus", "Significant Focus", "Minor Mention", or "Not Applicable".
    """
    mapping = {}
    for frame in frame_categories.keys():
        hypothesis_template = f"This text is {{}} about {frame}."
        result = classifier(text, candidate_labels=candidate_labels, hypothesis_template=hypothesis_template)
        best_label = result["labels"][0]  # select the highest scoring label
        mapping[frame] = best_label
    return mapping

def format_frame_categories_table(mapping):
    """
    Returns a markdown-formatted table that displays each frame along with four columns:
    Major Focus, Significant Focus, Minor Mention, and Not Applicable.
    A tick (✓) is shown only in the column corresponding to the assigned category.
    """
    header = "| Frame | Major Focus | Significant Focus | Minor Mention | Not Applicable |\n"
    header += "| --- | --- | --- | --- | --- |\n"
    rows = ""
    tick = "✓"
    for frame, category in mapping.items():
        major = tick if category == "Major Focus" else ""
        significant = tick if category == "Significant Focus" else ""
        minor = tick if category == "Minor Mention" else ""
        not_applicable = tick if category == "Not Applicable" else ""
        rows += f"| {frame} | {major} | {significant} | {minor} | {not_applicable} |\n"
    return header + rows

# -------------------------------------------------------------------
# Existing functions for file processing
# -------------------------------------------------------------------

def extract_captions_from_docx(docx_file):
    doc = Document(docx_file)
    captions = {}
    current_post = None
    for para in doc.paragraphs:
        text = para.text.strip()
        if re.match(r"Post \d+", text, re.IGNORECASE):
            current_post = text
            captions[current_post] = []
        elif current_post:
            captions[current_post].append(text)
    return {post: " ".join(lines) for post, lines in captions.items() if lines}

def extract_metadata_from_excel(excel_file):
    try:
        df = pd.read_excel(excel_file)
        extracted_data = df.to_dict(orient="records")
        return extracted_data
    except Exception as e:
        logging.error(f"Error processing Excel file: {e}")
        return []

def merge_metadata_with_generated_data(generated_data, excel_metadata):
    for post_data in excel_metadata:
        post_number = f"Post {post_data.get('Post Number', len(generated_data) + 1)}"
        if post_number in generated_data:
            generated_data[post_number].update(post_data)
        else:
            generated_data[post_number] = post_data  
    return generated_data

def create_docx_from_data(extracted_data):
    doc = Document()
    for post_number, data in extracted_data.items():
        doc.add_heading(post_number, level=1)
        ordered_keys = [
            "Post Number", "Date of Post", "Media Type", "Number of Pictures",
            "Number of Videos", "Number of Audios", "Likes", "Comments", "Tagged Audience",
            "Full Caption", "Language", "Tone", "Hashtags", "Frames"
        ]
        for key in ordered_keys:
            value = data.get(key, "N/A")
            if key in ["Tone", "Hashtags"]:
                value = ", ".join(value) if isinstance(value, list) else value
            doc.add_paragraph(f"**{key}:** {value}")
        doc.add_paragraph("\n")
    return doc

# -------------------------------------------------------------------
# Streamlit App UI
# -------------------------------------------------------------------

st.title("AI-Powered Coding Sheet Generator")
st.write("Enter text or upload a DOCX/Excel file for analysis:")

input_text = st.text_area("Input Text", height=200)
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"])
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"])

output_data = {}

if input_text:
    frame_mapping = get_frame_category_mapping(input_text)
    frames_table = format_frame_categories_table(frame_mapping)
    output_data["Manual Input"] = {
        "Full Caption": input_text,
        "Language": detect_language(input_text),
        "Tone": extract_tone(input_text),
        "Hashtags": extract_hashtags(input_text),
        "Frames": frames_table,
    }

if uploaded_docx:
    captions = extract_captions_from_docx(uploaded_docx)
    for caption, text in captions.items():
        frame_mapping = get_frame_category_mapping(text)
        frames_table = format_frame_categories_table(frame_mapping)
        output_data[caption] = {
            "Full Caption": text,
            "Language": detect_language(text),
            "Tone": extract_tone(text),
            "Hashtags": extract_hashtags(text),
            "Frames": frames_table,
        }

if uploaded_excel:
    excel_metadata = extract_metadata_from_excel(uploaded_excel)
    output_data = merge_metadata_with_generated_data(output_data, excel_metadata)

if output_data:
    for post_number, data in output_data.items():
        with st.expander(post_number):
            for key, value in data.items():
                if key == "Frames":
                    st.markdown(f"**{key}:**\n{value}")
                else:
                    st.write(f"**{key}:** {value}")

if output_data:
    docx_output = create_docx_from_data(output_data)
    docx_io = io.BytesIO()
    docx_output.save(docx_io)
    docx_io.seek(0)
    st.download_button("Download Merged Analysis as DOCX", data=docx_io, file_name="coding_sheet.docx")