AA_T5 / app.py
ahm14's picture
Update app.py
2bc09f4 verified
raw
history blame
8.51 kB
import pandas as pd
import streamlit as st
import re
import logging
from docx import Document
from langdetect import detect
from transformers import pipeline
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
# Load environment variables
load_dotenv()
# Initialize logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
# Initialize LLM (Groq API)
llm = ChatGroq(temperature=0.5, groq_api_key="GROQ_API_KEY", model_name="llama3-8b-8192")
# Download required NLTK resources
nltk.download("punkt")
# Frame categories for fallback method (with Major, Significant, Minor focus)
frame_categories = {
"Human Rights & Justice": ["rights", "law", "justice", "legal", "humanitarian"],
"Political & State Accountability": ["government", "policy", "state", "corruption", "accountability"],
"Gender & Patriarchy": ["gender", "women", "violence", "patriarchy", "equality"],
"Religious Freedom & Persecution": ["religion", "persecution", "minorities", "intolerance", "faith"],
"Grassroots Mobilization": ["activism", "community", "movement", "local", "mobilization"],
"Environmental Crisis & Activism": ["climate", "deforestation", "water", "pollution", "sustainability"],
"Anti-Extremism & Anti-Violence": ["extremism", "violence", "hate speech", "radicalism", "mob attack"],
"Social Inequality & Economic Disparities": ["class privilege", "labor rights", "economic", "discrimination"],
"Activism & Advocacy": ["justice", "rights", "demand", "protest", "march", "campaign", "freedom of speech"],
"Systemic Oppression": ["discrimination", "oppression", "minorities", "marginalized", "exclusion"],
"Intersectionality": ["intersecting", "women", "minorities", "struggles", "multiple oppression"],
"Call to Action": ["join us", "sign petition", "take action", "mobilize", "support movement"],
"Empowerment & Resistance": ["empower", "resist", "challenge", "fight for", "stand up"],
"Climate Justice": ["environment", "climate change", "sustainability", "biodiversity", "pollution"],
"Human Rights Advocacy": ["human rights", "violations", "honor killing", "workplace discrimination", "law reform"]
}
# Detect language
def detect_language(text):
try:
return detect(text)
except Exception as e:
logging.error(f"Error detecting language: {e}")
return "unknown"
# Extract tone using Groq API (or fallback method)
def extract_tone(text):
try:
response = llm.chat([{"role": "system", "content": "Analyze the tone of the following text and provide descriptive tone labels."},
{"role": "user", "content": text}])
return response["choices"][0]["message"]["content"].split(", ")
except Exception as e:
logging.error(f"Groq API error: {e}")
return extract_tone_fallback(text)
# Fallback method for tone extraction
def extract_tone_fallback(text):
detected_tones = set()
text_lower = text.lower()
for category, keywords in tone_categories.items():
if any(word in text_lower for word in keywords):
detected_tones.add(category)
return list(detected_tones) if detected_tones else ["Neutral"]
# Extract frames using Groq API (or fallback)
def extract_frames(text):
try:
response = llm.chat([{"role": "system", "content": "Classify the following text into relevant activism frames and assign Major, Significant, or Minor focus."},
{"role": "user", "content": text}])
return response["choices"][0]["message"]["content"]
except Exception as e:
logging.error(f"Groq API error: {e}")
return extract_frames_fallback(text)
# Fallback method for frame extraction
def extract_frames_fallback(text):
detected_frames = set()
text_lower = text.lower()
for category, keywords in frame_categories.items():
if any(word in text_lower for word in keywords):
detected_frames.add(f"{category}: Major Focus")
return list(detected_frames) if detected_frames else ["No Focus"]
# Extract captions from DOCX
def extract_captions_from_docx(docx_file):
doc = Document(docx_file)
captions = {}
current_post = None
for para in doc.paragraphs:
text = para.text.strip()
if re.match(r"Post \d+", text, re.IGNORECASE):
current_post = text
captions[current_post] = []
elif current_post:
captions[current_post].append(text)
return {post: " ".join(lines) for post, lines in captions.items() if lines}
# Extract metadata from Excel file
def extract_metadata_from_excel(excel_file):
try:
df = pd.read_excel(excel_file)
metadata = df.set_index("Post Number").to_dict(orient="index")
return metadata
except Exception as e:
logging.error(f"Error reading Excel file: {e}")
return {}
# Merge metadata from Excel with the generated data
def merge_metadata_with_generated_data(generated_data, excel_metadata):
for post, metadata in excel_metadata.items():
if post in generated_data:
generated_data[post].update(metadata)
return generated_data
# Function to create the final DOCX with structured output (without tables)
def create_structured_output_without_table(merged_data, output_path):
doc = Document()
doc.add_heading('Extracted Social Media Data', 0)
# Loop through each post and add its structured data
for sr_no, (post, data) in enumerate(merged_data.items(), 1):
doc.add_heading(f'Post {sr_no}', level=1)
# Adding the details for each post
doc.add_paragraph(f"Date of Post: {data.get('Date of Post', 'N/A')}")
doc.add_paragraph(f"Media Type: {data.get('Media Type', 'N/A')}")
doc.add_paragraph(f"No of Pictures: {data.get('No of Pictures', 0)}")
doc.add_paragraph(f"No of Videos: {data.get('No of Videos', 0)}")
doc.add_paragraph(f"No of Audios: {data.get('No of Audios', 0)}")
doc.add_paragraph(f"Likes: {data.get('Likes', 'N/A')}")
doc.add_paragraph(f"Comments: {data.get('Comments', 'N/A')}")
doc.add_paragraph(f"Tagged Audience: {data.get('Tagged Audience', 'No')}")
doc.add_paragraph(f"Caption: {data.get('Full Caption', 'N/A')}")
doc.add_paragraph(f"Language of Caption: {data.get('Language', 'N/A')}")
doc.add_paragraph(f"Total No of Hashtags: {len(data.get('Hashtags', []))}")
if data.get('Hashtags'):
doc.add_paragraph(f"Hashtags: {', '.join(data['Hashtags'])}")
else:
doc.add_paragraph("Hashtags: N/A")
# Adding Frames for each post
doc.add_heading("Frames", level=2)
if data.get("Frames"):
for frame in data['Frames']:
doc.add_paragraph(f"- {frame}")
else:
doc.add_paragraph("No Frames available")
doc.add_paragraph("\n") # Add a space between posts
# Save the document
doc.save(output_path)
# Streamlit app setup
st.title("AI-Powered Activism Message Analyzer")
st.write("Enter text or upload a DOCX/Excel file for analysis:")
# Text input
input_text = st.text_area("Input Text", height=200)
# File upload (DOCX)
uploaded_docx = st.file_uploader("Upload a DOCX file", type=["docx"])
# File upload (Excel)
uploaded_excel = st.file_uploader("Upload an Excel file", type=["xlsx"])
# Initialize output dictionary
output_data = {}
# Extract and process data based on file uploads or input text
if uploaded_docx:
output_data = extract_captions_from_docx(uploaded_docx)
if uploaded_excel:
metadata = extract_metadata_from_excel(uploaded_excel)
output_data = merge_metadata_with_generated_data(output_data, metadata)
# Generate output
if output_data:
# Process each post to extract frames
for post, data in output_data.items():
# Extract frames using Groq API or fallback method
frames = extract_frames(data)
data['Frames'] = frames
# Call the function to generate the DOCX report
create_structured_output_without_table(output_data, "final_output.docx")
st.write("The DOCX file has been created and saved!")
st.download_button("Download DOCX", data=open("final_output.docx", "rb"), file_name="final_output.docx")
# Further refinement can be added for additional features as necessary