Spaces:
Running
Running
import os | |
import threading | |
from flask import Flask, render_template, request, jsonify | |
from rss_processor import fetch_rss_feeds, process_and_store_articles, vector_db, download_from_hf_hub, upload_to_hf_hub, clean_text | |
import logging | |
import time | |
from datetime import datetime | |
import hashlib | |
import glob | |
from langchain.vectorstores import Chroma | |
from langchain.embeddings import HuggingFaceEmbeddings | |
app = Flask(__name__) | |
# Setup logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Global flag to track background loading | |
loading_complete = True # Start as True to allow initial rendering | |
last_update_time = time.time() | |
last_data_hash = None # Track the hash of the last data to detect changes | |
def load_feeds_in_background(): | |
global loading_complete, last_update_time | |
try: | |
logger.info("Starting background RSS feed fetch") | |
articles = fetch_rss_feeds() | |
logger.info(f"Fetched {len(articles)} articles") | |
process_and_store_articles(articles) | |
last_update_time = time.time() | |
logger.info("Background feed processing complete") | |
upload_to_hf_hub() | |
except Exception as e: | |
logger.error(f"Error in background feed loading: {e}") | |
finally: | |
loading_complete = True | |
def get_all_docs_from_dbs(): | |
"""Aggregate documents and metadata from all Chroma DB folders.""" | |
all_docs = {'documents': [], 'metadatas': []} | |
seen_ids = set() | |
for db_path in glob.glob("chroma_db*"): | |
if not os.path.isdir(db_path): | |
continue | |
try: | |
temp_vector_db = Chroma( | |
persist_directory=db_path, | |
embedding_function=HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2"), | |
collection_name="news_articles" | |
) | |
db_data = temp_vector_db.get(include=['documents', 'metadatas']) | |
if db_data.get('documents') and db_data.get('metadatas'): | |
for doc, meta in zip(db_data['documents'], db_data['metadatas']): | |
doc_id = f"{meta.get('title', 'No Title')}|{meta.get('link', '')}|{meta.get('published', 'Unknown Date')}" | |
if doc_id not in seen_ids: | |
seen_ids.add(doc_id) | |
all_docs['documents'].append(doc) | |
all_docs['metadatas'].append(meta) | |
except Exception as e: | |
logger.error(f"Error loading DB {db_path}: {e}") | |
return all_docs | |
def compute_data_hash(categorized_articles): | |
"""Compute a hash of the current articles to detect changes.""" | |
if not categorized_articles: | |
return "" | |
# Create a sorted string representation of the articles for consistent hashing | |
data_str = "" | |
for cat, articles in sorted(categorized_articles.items()): | |
for article in sorted(articles, key=lambda x: x["published"]): | |
data_str += f"{cat}|{article['title']}|{article['link']}|{article['published']}|" | |
return hashlib.sha256(data_str.encode('utf-8')).hexdigest() | |
def index(): | |
global loading_complete, last_update_time, last_data_hash | |
# Check if any DB exists; if not, download from Hugging Face | |
db_exists = any(os.path.exists(db_path) for db_path in glob.glob("chroma_db*")) | |
if not db_exists: | |
logger.info("No Chroma DB found, downloading from Hugging Face Hub...") | |
download_from_hf_hub() | |
# Start background RSS feed update | |
loading_complete = False | |
threading.Thread(target=load_feeds_in_background, daemon=True).start() | |
# Load existing data immediately | |
try: | |
all_docs = get_all_docs_from_dbs() | |
total_docs = len(all_docs['documents']) | |
logger.info(f"Total articles across all DBs at startup: {total_docs}") | |
if not all_docs.get('metadatas'): | |
logger.info("No articles in any DB yet") | |
return render_template("index.html", categorized_articles={}, has_articles=False, loading=True) | |
# Process and categorize articles with deduplication | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
try: | |
published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published | |
except (ValueError, TypeError): | |
published = "1970-01-01T00:00:00" | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
enriched_articles.sort(key=lambda x: x["published"], reverse=True) | |
categorized_articles = {} | |
for article in enriched_articles: | |
cat = article["category"] | |
if cat not in categorized_articles: | |
categorized_articles[cat] = [] | |
categorized_articles[cat].append(article) | |
categorized_articles = dict(sorted(categorized_articles.items(), key=lambda x: x[0].lower())) | |
for cat in categorized_articles: | |
categorized_articles[cat] = sorted(categorized_articles[cat], key=lambda x: x["published"], reverse=True)[:10] | |
if len(categorized_articles[cat]) >= 2: | |
logger.debug(f"Category {cat} top 2: {categorized_articles[cat][0]['title']} | {categorized_articles[cat][1]['title']}") | |
# Compute initial data hash | |
last_data_hash = compute_data_hash(categorized_articles) | |
logger.info(f"Displaying articles at startup: {sum(len(articles) for articles in categorized_articles.values())} total") | |
return render_template("index.html", | |
categorized_articles=categorized_articles, | |
has_articles=True, | |
loading=True) | |
except Exception as e: | |
logger.error(f"Error retrieving articles at startup: {e}") | |
return render_template("index.html", categorized_articles={}, has_articles=False, loading=True) | |
def search(): | |
query = request.form.get('search') | |
if not query: | |
logger.info("Empty search query received") | |
return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}) | |
try: | |
logger.info(f"Searching for: {query}") | |
all_docs = get_all_docs_from_dbs() | |
if not all_docs.get('metadatas'): | |
return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}) | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
if query.lower() in title or query.lower() in description: | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
categorized_articles = {} | |
for article in enriched_articles: | |
cat = article["category"] | |
categorized_articles.setdefault(cat, []).append(article) | |
logger.info(f"Found {len(enriched_articles)} unique articles across {len(categorized_articles)} categories") | |
return jsonify({ | |
"categorized_articles": categorized_articles, | |
"has_articles": bool(enriched_articles), | |
"loading": False | |
}) | |
except Exception as e: | |
logger.error(f"Search error: {e}") | |
return jsonify({"categorized_articles": {}, "has_articles": False, "loading": False}), 500 | |
def check_loading(): | |
global loading_complete, last_update_time | |
if loading_complete: | |
return jsonify({"status": "complete", "last_update": last_update_time}) | |
return jsonify({"status": "loading"}), 202 | |
def get_updates(): | |
global last_update_time, last_data_hash | |
try: | |
all_docs = get_all_docs_from_dbs() | |
if not all_docs.get('metadatas'): | |
return jsonify({"articles": [], "last_update": last_update_time, "has_updates": False}) | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
try: | |
published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published | |
except (ValueError, TypeError): | |
published = "1970-01-01T00:00:00" | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
enriched_articles.sort(key=lambda x: x["published"], reverse=True) | |
categorized_articles = {} | |
for article in enriched_articles: | |
cat = article["category"] | |
if cat not in categorized_articles: | |
categorized_articles[cat] = [] | |
key = f"{article['title']}|{article['link']}|{article['published']}" | |
if key not in [f"{a['title']}|{a['link']}|{a['published']}" for a in categorized_articles[cat]]: | |
categorized_articles[cat].append(article) | |
for cat in categorized_articles: | |
unique_articles = [] | |
seen_cat_keys = set() | |
for article in sorted(categorized_articles[cat], key=lambda x: x["published"], reverse=True): | |
key = f"{clean_text(article['title'])}|{clean_text(article['link'])}|{article['published']}" | |
if key not in seen_cat_keys: | |
seen_cat_keys.add(key) | |
unique_articles.append(article) | |
categorized_articles[cat] = unique_articles[:10] | |
# Compute hash of new data | |
current_data_hash = compute_data_hash(categorized_articles) | |
# Compare with last data hash to determine if there are updates | |
has_updates = last_data_hash != current_data_hash | |
if has_updates: | |
logger.info("New RSS data detected, sending updates to frontend") | |
last_data_hash = current_data_hash | |
return jsonify({ | |
"articles": categorized_articles, | |
"last_update": last_update_time, | |
"has_updates": True | |
}) | |
else: | |
logger.info("No new RSS data, skipping update") | |
return jsonify({ | |
"articles": {}, | |
"last_update": last_update_time, | |
"has_updates": False | |
}) | |
except Exception as e: | |
logger.error(f"Error fetching updates: {e}") | |
return jsonify({"articles": {}, "last_update": last_update_time, "has_updates": False}), 500 | |
def get_all_articles(category): | |
try: | |
all_docs = get_all_docs_from_dbs() | |
if not all_docs.get('metadatas'): | |
return jsonify({"articles": [], "category": category}) | |
enriched_articles = [] | |
seen_keys = set() | |
for doc, meta in zip(all_docs['documents'], all_docs['metadatas']): | |
if not meta or meta.get("category") != category: | |
continue | |
title = meta.get("title", "No Title") | |
link = meta.get("link", "") | |
description = meta.get("original_description", "No Description") | |
published = meta.get("published", "Unknown Date").strip() | |
title = clean_text(title) | |
link = clean_text(link) | |
description = clean_text(description) | |
description_hash = hashlib.sha256(description.encode('utf-8')).hexdigest() | |
key = f"{title}|{link}|{published}|{description_hash}" | |
if key not in seen_keys: | |
seen_keys.add(key) | |
try: | |
published = datetime.strptime(published, "%Y-%m-%d %H:%M:%S").isoformat() if "Unknown" not in published else published | |
except (ValueError, TypeError): | |
published = "1970-01-01T00:00:00" | |
enriched_articles.append({ | |
"title": title, | |
"link": link, | |
"description": description, | |
"category": meta.get("category", "Uncategorized"), | |
"published": published, | |
"image": meta.get("image", "svg"), | |
}) | |
enriched_articles.sort(key=lambda x: x["published"], reverse=True) | |
return jsonify({"articles": enriched_articles, "category": category}) | |
except Exception as e: | |
logger.error(f"Error fetching all articles for category {category}: {e}") | |
return jsonify({"articles": [], "category": category}), 500 | |
def card_load(): | |
return render_template("card.html") | |
if __name__ == "__main__": | |
app.run(host="0.0.0.0", port=7860) |