File size: 8,505 Bytes
f44d7de 706fc89 34d7c10 706fc89 34d7c10 c1221c4 34d7c10 706fc89 34d7c10 706fc89 2bc09f4 706fc89 34d7c10 706fc89 34d7c10 773ca30 34d7c10 cf0ede7 34d7c10 706fc89 34d7c10 773ca30 34d7c10 cf0ede7 9e0b8b3 34d7c10 2bc09f4 34d7c10 9e0b8b3 34d7c10 2bc09f4 34d7c10 773ca30 706fc89 2bc09f4 9e0b8b3 2bc09f4 9e0b8b3 2bc09f4 9e0b8b3 2bc09f4 cf0ede7 2bc09f4 34d7c10 706fc89 f44d7de 706fc89 773ca30 706fc89 f44d7de 706fc89 34d7c10 706fc89 2bc09f4 f44d7de 2bc09f4 f44d7de 2bc09f4 cf0ede7 2bc09f4 9e0b8b3 2bc09f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
import pandas as pd
import streamlit as st
import re
import logging
from docx import Document
from langdetect import detect
from transformers import pipeline
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
# Load environment variables
load_dotenv()
# Initialize logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# Initialize LLM (Groq API)
llm = ChatGroq(temperature=0.5, groq_api_key="GROQ_API_KEY", model_name="llama3-8b-8192")
# Download required NLTK resources
nltk.download("punkt")
# Frame categories for fallback method (with Major, Significant, Minor focus)
frame_categories = {
"Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"],
"Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"],
"Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"],
"Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"],
"Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"],
"Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"],
"Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"],
"Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"],
"Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"],
"Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"],
"Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"],
"Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"],
"Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"],
"Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"],
"Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"]
}
# Detect language
def detect_language(text):
try:
return detect(text)
except Exception as e:
logging.error(f"Error detecting language: {e}")
return "unknown"
# Extract tone using Groq API (or fallback method)
def extract_tone(text):
try:
response = llm.chat([{"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."},
{"role": "user", "content": text}])
return response["choices"][0]["message"]["content"].split(", ")
except Exception as e:
logging.error(f"Groq API error: {e}")
return extract_tone_fallback(text)
# Fallback method for tone extraction
def extract_tone_fallback(text):
detected_tones = set()
text_lower = text.lower()
for category, keywords in tone_categories.items():
if any(word in text_lower for word in keywords):
detected_tones.add(category)
return list(detected_tones) if detected_tones else ["Neutral"]
# Extract frames using Groq API (or fallback)
def extract_frames(text):
try:
response = llm.chat([{"role": "system", "content": "Classify the following text into relevant activism frames and assign Major, Significant, or Minor focus."},
{"role": "user", "content": text}])
return response["choices"][0]["message"]["content"]
except Exception as e:
logging.error(f"Groq API error: {e}")
return extract_frames_fallback(text)
# Fallback method for frame extraction
def extract_frames_fallback(text):
detected_frames = set()
text_lower = text.lower()
for category, keywords in frame_categories.items():
if any(word in text_lower for word in keywords):
detected_frames.add(f"{category}: Major Focus")
return list(detected_frames) if detected_frames else ["No Focus"]
# Extract captions from DOCX
def extract_captions_from_docx(docx_file):
doc = Document(docx_file)
captions = {}
current_post = None
for para in doc.paragraphs:
text = para.text.strip()
if re.match(r"Post \d+", text, re.IGNORECASE):
current_post = text
captions[current_post] = []
elif current_post:
captions[current_post].append(text)
return {post: " ".join(lines) for post, lines in captions.items() if lines}
# Extract metadata from Excel file
def extract_metadata_from_excel(excel_file):
try:
df = pd.read_excel(excel_file)
metadata = df.set_index("Post Number").to_dict(orient="index")
return metadata
except Exception as e:
logging.error(f"Error reading Excel file: {e}")
return {}
# Merge metadata from Excel with the generated data
def merge_metadata_with_generated_data(generated_data, excel_metadata):
for post, metadata in excel_metadata.items():
if post in generated_data:
generated_data[post].update(metadata)
return generated_data
# Function to create the final DOCX with structured output (without tables)
def create_structured_output_without_table(merged_data, output_path):
doc = Document()
doc.add_heading('Extracted Social Media Data', 0)
# Loop through each post and add its structured data
for sr_no, (post, data) in enumerate(merged_data.items(), 1):
doc.add_heading(f'Post {sr_no}', level=1)
# Adding the details for each post
doc.add_paragraph(f"Date of Post: {data.get('Date of Post', 'N/A')}")
doc.add_paragraph(f"Media Type: {data.get('Media Type', 'N/A')}")
doc.add_paragraph(f"No of Pictures: {data.get('No of Pictures', 0)}")
doc.add_paragraph(f"No of Videos: {data.get('No of Videos', 0)}")
doc.add_paragraph(f"No of Audios: {data.get('No of Audios', 0)}")
doc.add_paragraph(f"Likes: {data.get('Likes', 'N/A')}")
doc.add_paragraph(f"Comments: {data.get('Comments', 'N/A')}")
doc.add_paragraph(f"Tagged Audience: {data.get('Tagged Audience', 'No')}")
doc.add_paragraph(f"Caption: {data.get('Full Caption', 'N/A')}")
doc.add_paragraph(f"Language of Caption: {data.get('Language', 'N/A')}")
doc.add_paragraph(f"Total No of Hashtags: {len(data.get('Hashtags', []))}")
if data.get('Hashtags'):
doc.add_paragraph(f"Hashtags: {', '.join(data['Hashtags'])}")
else:
doc.add_paragraph("Hashtags: N/A")
# Adding Frames for each post
doc.add_heading("Frames", level=2)
if data.get("Frames"):
for frame in data['Frames']:
doc.add_paragraph(f"- {frame}")
else:
doc.add_paragraph("No Frames available")
doc.add_paragraph("\n") # Add a space between posts
# Save the document
doc.save(output_path)
# Streamlit app setup
st.title("AI-Powered Activism Message Analyzer")
st.write("Enter text or upload a DOCX/Excel file for analysis:")
# Text input
input_text = st.text_area("Input Text", height=200)
# File upload (DOCX)
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"])
# File upload (Excel)
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"])
# Initialize output dictionary
output_data = {}
# Extract and process data based on file uploads or input text
if uploaded_docx:
output_data = extract_captions_from_docx(uploaded_docx)
if uploaded_excel:
metadata = extract_metadata_from_excel(uploaded_excel)
output_data = merge_metadata_with_generated_data(output_data, metadata)
# Generate output
if output_data:
# Process each post to extract frames
for post, data in output_data.items():
# Extract frames using Groq API or fallback method
frames = extract_frames(data)
data['Frames'] = frames
# Call the function to generate the DOCX report
create_structured_output_without_table(output_data, "final_output.docx")
st.write("The DOCX file has been created and saved!")
st.download_button("Download DOCX", data=open("final_output.docx", "rb"), file_name="final_output.docx")
# Further refinement can be added for additional features as necessary
|