diff --git "a/app.py" "b/app.py" --- "a/app.py" +++ "b/app.py" @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- # Added for potential emoji issues in some editors import streamlit as st import asyncio import websockets @@ -24,7 +25,7 @@ from dotenv import load_dotenv from streamlit_marquee import streamlit_marquee # Keep import if used from collections import defaultdict, Counter import pandas as pd # Keep for potential fallback logic if needed -from streamlit_js_eval import streamlit_js_eval +from streamlit_js_eval import streamlit_js_eval, sync_js_eval # Import sync version if needed from PIL import Image # Needed for paste_image_component # ============================================================================== @@ -66,8 +67,8 @@ AUDIO_DIR = "audio_logs" # World Builder Constants SAVED_WORLDS_DIR = "saved_worlds" # Directory for MD world files -PLOT_WIDTH = 50.0 # Needed for JS injection -PLOT_DEPTH = 50.0 # Needed for JS injection +PLOT_WIDTH = 50.0 # Needed for JS injection (sent to index.html) +PLOT_DEPTH = 50.0 # Needed for JS injection (sent to index.html) WORLD_STATE_FILE_MD_PREFIX = "🌍_" # Prefix for world save files # File Emojis @@ -94,13 +95,14 @@ load_dotenv() # --- Global State & Locks --- world_objects_lock = threading.Lock() +# CORRECT: This global dict persists across Streamlit reruns for the session. world_objects = defaultdict(dict) # In-memory world state {obj_id: data} -connected_clients = set() # Holds client_id strings +connected_clients = set() # Holds client_id strings (websocket.id) # ============================================================================== # Utility Functions # ============================================================================== - +# ... (Keep existing utility functions: get_current_time_str, clean_filename_part, run_async, ensure_dir) ... def get_current_time_str(tz='UTC'): """Gets formatted timestamp string in specified timezone (default UTC).""" try: @@ -109,11 +111,10 @@ def get_current_time_str(tz='UTC'): except pytz.UnknownTimeZoneError: now_aware = datetime.now(pytz.utc) except Exception as e: - print(f"Timezone error ({tz}), using UTC. Error: {e}") - now_aware = datetime.now(pytz.utc) + print(f"Timezone error ({tz}), using UTC. Error: {e}") + now_aware = datetime.now(pytz.utc) return now_aware.strftime('%Y%m%d_%H%M%S') - def clean_filename_part(text, max_len=30): """Cleans a string part for use in a filename.""" if not isinstance(text, str): text = "invalid_name" @@ -122,14 +123,31 @@ def clean_filename_part(text, max_len=30): return text[:max_len] def run_async(async_func, *args, **kwargs): - """Runs an async function safely from a sync context using create_task.""" + """Runs an async function safely from a sync context using create_task or new loop.""" try: loop = asyncio.get_running_loop() + # Use create_task if a loop is running (typical in async context like WebSocket handler) return loop.create_task(async_func(*args, **kwargs)) - except RuntimeError: # No running loop in this thread - print(f"Warning: Running async func {async_func.__name__} in new event loop.") + except RuntimeError: # No running loop in this thread (typical in Streamlit's main thread) + # Run in a new event loop - suitable for fire-and-forget tasks from sync code + # Be cautious about return values here as it runs separately. + # For tasks needing results back in Streamlit, consider `asyncio.run` carefully or threading. + # print(f"Warning: Running async func {async_func.__name__} in new event loop.") try: + # This will block the Streamlit thread until the async function completes + # return asyncio.run(async_func(*args, **kwargs)) + # For fire-and-forget, especially for broadcasts, starting a task in a managed loop is better + # Let's use create_task in a new loop if necessary for fire-and-forget + new_loop = asyncio.new_event_loop() + asyncio.set_event_loop(new_loop) + task = new_loop.create_task(async_func(*args, **kwargs)) + # Schedule loop closure? Requires more complex management. + # For simple broadcasts, maybe just let the task run. + # Consider a dedicated asyncio thread if many background tasks are needed. + # Simpler: Just run and block if needed, or use a thread if it's long. + # Let's revert to asyncio.run for simplicity, assuming functions are relatively quick. return asyncio.run(async_func(*args, **kwargs)) + except Exception as e: print(f"Error running async func {async_func.__name__} in new loop: {e}") return None @@ -138,62 +156,65 @@ def run_async(async_func, *args, **kwargs): return None def ensure_dir(dir_path): - """Creates directory if it doesn't exist.""" - os.makedirs(dir_path, exist_ok=True) - -# Ensure directories exist on startup -for d in [CHAT_DIR, AUDIO_DIR, AUDIO_CACHE_DIR, SAVED_WORLDS_DIR]: - ensure_dir(d) + """Creates directory if it doesn't exist.""" + os.makedirs(dir_path, exist_ok=True) # ============================================================================== -# World State File Handling (Markdown + JSON) +# World State File Handling (Markdown + JSON) - CORRECT LOGIC # ============================================================================== def generate_world_save_filename(name="World"): """Generates a filename for saving world state MD files.""" timestamp = get_current_time_str() # Use UTC for consistency clean_name = clean_filename_part(name) - rand_hash = hashlib.md5(str(time.time()).encode() + name.encode()).hexdigest()[:6] # Seed hash - return f"{WORLD_STATE_FILE_MD_PREFIX}{clean_name}_{timestamp}_{rand_hash}.md" + # Use uuid for better uniqueness than time-based hash + rand_id = str(uuid.uuid4())[:8] + return f"{WORLD_STATE_FILE_MD_PREFIX}{clean_name}_{timestamp}_{rand_id}.md" def parse_world_filename(filename): """Extracts info from filename if possible, otherwise returns defaults.""" basename = os.path.basename(filename) - # Check prefix and suffix if basename.startswith(WORLD_STATE_FILE_MD_PREFIX) and basename.endswith(".md"): core_name = basename[len(WORLD_STATE_FILE_MD_PREFIX):-3] parts = core_name.split('_') - if len(parts) >= 3: # Expecting Name_Timestamp_Hash + if len(parts) >= 3: # Expecting Name_Timestamp_Hash/UUID + hash_part = parts[-1] timestamp_str = parts[-2] - name_parts = parts[:-2]; name = "_".join(name_parts) if name_parts else "Untitled" + name_parts = parts[:-2]; name = "_".join(name_parts).replace('_', ' ') if name_parts else "Untitled" dt_obj = None - try: # Try parsing timestamp + try: dt_obj = datetime.strptime(timestamp_str, '%Y%m%d_%H%M%S') - dt_obj = pytz.utc.localize(dt_obj) # Assume UTC + dt_obj = pytz.utc.localize(dt_obj) except (ValueError, pytz.exceptions.AmbiguousTimeError, pytz.exceptions.NonExistentTimeError): dt_obj = None - return {"name": name.replace('_', ' '), "timestamp": timestamp_str, "dt": dt_obj, "filename": filename} + return {"name": name, "timestamp": timestamp_str, "dt": dt_obj, "filename": filename, "id": hash_part} - # Fallback for unknown format + # Fallback for unknown format or direct path dt_fallback = None - try: mtime = os.path.getmtime(filename); dt_fallback = datetime.fromtimestamp(mtime, tz=pytz.utc) + try: + full_path = filename if os.path.exists(filename) else os.path.join(SAVED_WORLDS_DIR, filename) + if os.path.exists(full_path): + mtime = os.path.getmtime(full_path) + dt_fallback = datetime.fromtimestamp(mtime, tz=pytz.utc) except Exception: pass - return {"name": basename.replace('.md',''), "timestamp": "Unknown", "dt": dt_fallback, "filename": filename} + return {"name": basename.replace('.md',''), "timestamp": "Unknown", "dt": dt_fallback, "filename": filename, "id": "N/A"} def save_world_state_to_md(target_filename_base): - """Saves the current in-memory world state to a specific MD file (basename).""" + """Saves the current in-memory world state to a specific MD file (basename). CORRECT""" global world_objects save_path = os.path.join(SAVED_WORLDS_DIR, target_filename_base) print(f"Acquiring lock to save world state to: {save_path}...") success = False with world_objects_lock: - world_data_dict = dict(world_objects) # Convert defaultdict for saving + # Create a plain dict copy for serialization, locking ensures consistency + world_data_dict = dict(world_objects) print(f"Saving {len(world_data_dict)} objects...") - parsed_info = parse_world_filename(save_path) # Parse the full path/intended name + parsed_info = parse_world_filename(save_path) # Use intended filename timestamp_save = get_current_time_str() md_content = f"""# World State: {parsed_info['name']} * **File Saved:** {timestamp_save} (UTC) * **Source Timestamp:** {parsed_info['timestamp']} +* **File ID:** {parsed_info['id']} * **Objects:** {len(world_data_dict)} ```json @@ -206,50 +227,65 @@ def save_world_state_to_md(target_filename_base): success = True except Exception as e: print(f"Error saving world state to {save_path}: {e}") + st.error(f"Error saving world: {e}") return success def load_world_state_from_md(filename_base): - """Loads world state from an MD file (basename), updates global state, returns success bool.""" + """Loads world state from an MD file (basename), updates global state, returns success bool. CORRECT""" global world_objects load_path = os.path.join(SAVED_WORLDS_DIR, filename_base) - print(f"Loading world state from MD file: {load_path}...") - if not os.path.exists(load_path): st.error(f"World file not found: {filename_base}"); return False + print(f"Attempting to load world state from MD file: {load_path}...") + if not os.path.exists(load_path): + st.error(f"World file not found: {filename_base}"); return False try: with open(load_path, 'r', encoding='utf-8') as f: content = f.read() - json_match = re.search(r"```json\s*(\{[\s\S]*?\})\s*```", content, re.IGNORECASE) # More robust regex - if not json_match: st.error(f"Could not find valid JSON block in {filename_base}"); return False + # More robust regex to find the JSON block + json_match = re.search(r"```json\s*(\{[\s\S]*?\})\s*```", content, re.IGNORECASE | re.MULTILINE) + if not json_match: + st.error(f"Could not find valid JSON block in {filename_base}"); return False world_data_dict = json.loads(json_match.group(1)) + # Basic validation (optional but recommended) + if not isinstance(world_data_dict, dict): + raise ValueError("Loaded JSON is not a dictionary (expected format: {obj_id: data})") print(f"Acquiring lock to update world state from {filename_base}...") with world_objects_lock: + # IMPORTANT: Clear existing objects and load new ones world_objects.clear() - for k, v in world_data_dict.items(): world_objects[str(k)] = v + # Convert keys to string just in case JSON loaded them as numbers etc. + for k, v in world_data_dict.items(): + world_objects[str(k)] = v loaded_count = len(world_objects) print(f"Loaded {loaded_count} objects from {filename_base}. Lock released.") st.session_state.current_world_file = filename_base # Track loaded file (basename) return True except json.JSONDecodeError as e: st.error(f"Invalid JSON found in {filename_base}: {e}"); return False + except ValueError as e: st.error(f"Invalid world data format in {filename_base}: {e}"); return False except Exception as e: st.error(f"Error loading world state from {filename_base}: {e}"); st.exception(e); return False def get_saved_worlds(): - """Scans the saved worlds directory for world MD files and parses them.""" + """Scans the saved worlds directory for world MD files and parses them. CORRECT""" try: ensure_dir(SAVED_WORLDS_DIR) + # Use the specific prefix to avoid grabbing other md files world_files = glob.glob(os.path.join(SAVED_WORLDS_DIR, f"{WORLD_STATE_FILE_MD_PREFIX}*.md")) parsed_worlds = [parse_world_filename(f) for f in world_files] + # Sort by datetime object for accuracy, handle None cases parsed_worlds.sort(key=lambda x: x['dt'] if x['dt'] else datetime.min.replace(tzinfo=pytz.utc), reverse=True) return parsed_worlds except Exception as e: - print(f"Error scanning saved worlds: {e}"); st.error(f"Could not scan saved worlds: {e}"); return [] + print(f"Error scanning saved worlds: {e}") + st.error(f"Could not scan saved worlds: {e}") + return [] # ============================================================================== # User State & Session Init # ============================================================================== - +# ... (Keep existing functions: save_username, load_username, init_session_state) ... def save_username(username): try: with open(STATE_FILE, 'w') as f: f.write(username) @@ -260,53 +296,82 @@ def load_username(): try: with open(STATE_FILE, 'r') as f: return f.read().strip() except Exception as e: print(f"Failed load username: {e}") - return None + # Provide a default if file doesn't exist or fails to load + return list(FUN_USERNAMES.keys())[0] # Default to first fun username def init_session_state(): """Initializes Streamlit session state variables.""" defaults = { - 'server_running_flag': False, 'server_instance': None, 'server_task': None, + 'server_running_flag': False, 'server_instance': None, 'server_task': None, 'websocket_stop_event': None, 'active_connections': defaultdict(dict), 'last_chat_update': 0, 'message_input': "", 'audio_cache': {}, 'tts_voice': "en-US-AriaNeural", 'chat_history': [], 'marquee_settings': {"background": "#1E1E1E", "color": "#FFFFFF", "font-size": "14px", "animationDuration": "20s", "width": "100%", "lineHeight": "35px"}, - 'enable_audio': True, 'download_link_cache': {}, 'username': None, 'autosend': False, + 'enable_audio': True, 'download_link_cache': {}, + 'username': load_username(), # Load username during init + 'autosend': False, 'last_message': "", 'timer_start': time.time(), 'last_sent_transcript': "", 'last_refresh': time.time(), 'auto_refresh': False, 'refresh_rate': 30, - 'selected_object': 'None', 'initial_world_state_loaded': False, + 'selected_object': 'None', # Tracks the *currently selected tool* + 'initial_world_state_loaded': False, # Flag for one-time load 'current_world_file': None, # Track loaded world filename (basename) 'operation_timings': {}, 'performance_metrics': defaultdict(list), 'paste_image_base64': "", 'new_world_name': "MyWorld" } for k, v in defaults.items(): if k not in st.session_state: st.session_state[k] = v - # Ensure complex types initialized correctly + # Ensure complex types initialized correctly (redundant with defaults but safe) if not isinstance(st.session_state.active_connections, defaultdict): st.session_state.active_connections = defaultdict(dict) if not isinstance(st.session_state.chat_history, list): st.session_state.chat_history = [] if not isinstance(st.session_state.marquee_settings, dict): st.session_state.marquee_settings = defaults['marquee_settings'] if not isinstance(st.session_state.audio_cache, dict): st.session_state.audio_cache = {} if not isinstance(st.session_state.download_link_cache, dict): st.session_state.download_link_cache = {} + # Set derived state like tts_voice based on loaded username + if st.session_state.username and st.session_state.username in FUN_USERNAMES: + st.session_state.tts_voice = FUN_USERNAMES[st.session_state.username] + # ============================================================================== # Audio / TTS / Chat / File Handling Helpers # ============================================================================== +# ... (Keep existing helper functions: clean_text_for_tts, create_file, get_download_link, async_edge_tts_generate, play_and_download_audio, save_chat_entry, load_chat_history, create_zip_of_files, delete_files, save_pasted_image, paste_image_component, AudioProcessor, process_pdf_tab etc.) ... +# --- These seem generally okay, focus is on the world state --- +# Example (ensure generate_filename exists or is defined if create_file uses it) +def generate_filename(prefix, username, file_type): + timestamp = get_current_time_str() + clean_user = clean_filename_part(username, 15) + clean_prefix = clean_filename_part(prefix, 20) + rand_hash = hashlib.md5(str(time.time()).encode() + prefix.encode()).hexdigest()[:6] + return f"{clean_prefix}_{clean_user}_{timestamp}_{rand_hash}.{file_type}" -# --- Text & File Helpers --- def clean_text_for_tts(text): if not isinstance(text, str): return "No text" text = re.sub(r'\[([^\]]+)\]\([^\)]+\)', r'\1', text); text = re.sub(r'[#*_`!]', '', text) text = ' '.join(text.split()); return text[:250] or "No text" def create_file(content, username, file_type="md", save_path=None): - if not save_path: filename = generate_filename(content, username, file_type); save_path = os.path.join(MEDIA_DIR, filename) + if not save_path: + # Assume a naming convention if path not given, e.g., for chat logs + prefix = content[:20] if isinstance(content, str) else 'file' + filename = generate_filename(prefix, username, file_type) + # Determine directory based on type or default + target_dir = MEDIA_DIR + if file_type == 'md' and CHAT_DIR in filename: target_dir = CHAT_DIR # Heuristic + elif file_type == 'mp3' and AUDIO_DIR in filename: target_dir = AUDIO_DIR + save_path = os.path.join(target_dir, filename) + ensure_dir(os.path.dirname(save_path)) try: - with open(save_path, 'w', encoding='utf-8') as f: f.write(content) + mode = 'wb' if isinstance(content, bytes) else 'w' + encoding = None if mode == 'wb' else 'utf-8' + with open(save_path, mode, encoding=encoding) as f: f.write(content) # print(f"Created file: {save_path}"); # Can be too verbose return save_path except Exception as e: print(f"Error creating file {save_path}: {e}"); return None def get_download_link(file_path, file_type="md"): - if not file_path or not os.path.exists(file_path): basename = os.path.basename(file_path) if file_path else "N/A"; return f"Not found: {basename}" + if not file_path or not os.path.exists(file_path): + basename = os.path.basename(file_path) if file_path else "N/A" + return f"Not found: {basename}" try: mtime = os.path.getmtime(file_path) except OSError: mtime = 0 cache_key = f"dl_{file_path}_{mtime}"; @@ -318,50 +383,65 @@ def get_download_link(file_path, file_type="md"): basename = os.path.basename(file_path) link_html = f'{FILE_EMOJIS.get(file_type, "📄")}' st.session_state.download_link_cache[cache_key] = link_html - except Exception as e: print(f"Error generating DL link for {file_path}: {e}"); return f"Err" + except Exception as e: print(f"Error generating DL link for {file_path}: {e}"); return f"Err DL" return st.session_state.download_link_cache.get(cache_key, "CacheErr") -# --- Audio / TTS --- async def async_edge_tts_generate(text, voice, username): if not text: return None cache_key = hashlib.md5(f"{text[:150]}_{voice}".encode()).hexdigest(); if 'audio_cache' not in st.session_state: st.session_state.audio_cache = {} cached_path = st.session_state.audio_cache.get(cache_key); if cached_path and os.path.exists(cached_path): return cached_path + text_cleaned = clean_text_for_tts(text); if not text_cleaned or text_cleaned == "No text": return None + filename_base = generate_filename(text_cleaned, username, "mp3"); save_path = os.path.join(AUDIO_DIR, filename_base); ensure_dir(AUDIO_DIR) try: communicate = edge_tts.Communicate(text_cleaned, voice); await communicate.save(save_path); if os.path.exists(save_path) and os.path.getsize(save_path) > 0: st.session_state.audio_cache[cache_key] = save_path; return save_path - else: print(f"Audio file {save_path} failed generation."); return None - except Exception as e: print(f"Edge TTS Error: {e}"); return None + else: print(f"Audio file {save_path} failed generation or is empty."); return None + except Exception as e: print(f"Edge TTS Error for '{text_cleaned[:50]}...': {e}"); return None + def play_and_download_audio(file_path): if file_path and os.path.exists(file_path): try: - st.audio(file_path) - file_type = file_path.split('.')[-1] - st.markdown(get_download_link(file_path, file_type), unsafe_allow_html=True) + st.audio(file_path, format='audio/mpeg') # Specify format + file_type = file_path.split('.')[-1].lower() + st.markdown(get_download_link(file_path, file_type), unsafe_allow_html=True) except Exception as e: st.error(f"Audio display error for {os.path.basename(file_path)}: {e}") + # else: st.caption("Audio file not available.") # Optional feedback -# --- Chat --- async def save_chat_entry(username, message, voice, is_markdown=False): - if not message.strip(): return None, None + if not message or not message.strip(): return None, None timestamp_str = get_current_time_str(); - entry = f"[{timestamp_str}] {username} ({voice}): {message}" if not is_markdown else f"[{timestamp_str}] {username} ({voice}):\n```markdown\n{message}\n```" - md_filename_base = generate_filename(message, username, "md"); md_file_path = os.path.join(CHAT_DIR, md_filename_base); - md_file = create_file(entry, username, "md", save_path=md_file_path) # Save to file + # Format entry for display + entry_display = f"**{username}** ({timestamp_str}): {message}" + # Format entry for saving to file (can be simpler) + entry_file = f"[{timestamp_str}] {username} ({voice}): {message}" if not is_markdown else f"[{timestamp_str}] {username} ({voice}):\n```markdown\n{message}\n```" + + # Generate filename based on message content for uniqueness + md_filename_base = generate_filename(message, username, "md"); + md_file_path = os.path.join(CHAT_DIR, md_filename_base); + md_file = create_file(entry_file, username, "md", save_path=md_file_path) # Save to file + + # Update live session state history (using display format) if 'chat_history' not in st.session_state: st.session_state.chat_history = []; - st.session_state.chat_history.append(entry) # Add to live history + st.session_state.chat_history.append(entry_display) # Add display format + audio_file = None; if st.session_state.get('enable_audio', True): - tts_message = message # Use original message for TTS - audio_file = await async_edge_tts_generate(tts_message, voice, username) + tts_message = message # Use original message for TTS + audio_file = await async_edge_tts_generate(tts_message, voice, username) + + # Return path to saved MD file and audio file (if generated) return md_file, audio_file async def load_chat_history(): + # This function primarily loads history if session state is empty, + # otherwise it relies on the running session state. if 'chat_history' not in st.session_state: st.session_state.chat_history = [] if not st.session_state.chat_history: # Only load from files if session state is empty ensure_dir(CHAT_DIR) @@ -369,275 +449,541 @@ async def load_chat_history(): chat_files = sorted(glob.glob(os.path.join(CHAT_DIR, "*.md")), key=os.path.getmtime); loaded_count = 0 temp_history = [] for f_path in chat_files: - try: - with open(f_path, 'r', encoding='utf-8') as file: temp_history.append(file.read().strip()); loaded_count += 1 - except Exception as e: print(f"Err read chat {f_path}: {e}") + try: + with open(f_path, 'r', encoding='utf-8') as file: + # Attempt to parse file content back into display format (best effort) + content = file.read().strip() + match = re.match(r"\[(.*?)\] (.*?) \(.*?\): (.*)", content, re.DOTALL) + if match: + ts, user, msg = match.groups() + # Handle potential markdown blocks within the message + if "```markdown" in msg: + msg_clean = msg.split("```markdown\n")[1].split("\n```")[0] + display_entry = f"**{user}** ({ts}): \n```markdown\n{msg_clean}\n```" + else: + display_entry = f"**{user}** ({ts}): {msg}" + temp_history.append(display_entry) + loaded_count += 1 + else: + # Fallback if parsing fails, add raw content + temp_history.append(content) + loaded_count += 1 + + except Exception as e: print(f"Err read chat {f_path}: {e}") st.session_state.chat_history = temp_history # Assign loaded history print(f"Loaded {loaded_count} chat entries from files.") return st.session_state.chat_history # --- File Management --- +def format_timestamp_prefix(prefix): # Helper assumed needed by save_pasted_image + return f"{prefix}_{get_current_time_str()}" + def create_zip_of_files(files_to_zip, prefix="Archive"): if not files_to_zip: st.warning("No files provided to zip."); return None - timestamp = format_timestamp_prefix(f"Zip_{prefix}"); zip_name = f"{prefix}_{timestamp}.zip" + # Use a descriptive timestamp format + timestamp_str = get_current_time_str() # Format: YYYYMMDD_HHMMSS + zip_name = f"{prefix}_{timestamp_str}.zip" + zip_path = os.path.join(MEDIA_DIR, zip_name) # Save zip in base dir + ensure_dir(MEDIA_DIR) try: - print(f"Creating zip: {zip_name}..."); - with zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED) as z: + print(f"Creating zip: {zip_path}..."); + count = 0 + with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as z: for f in files_to_zip: - if os.path.exists(f): z.write(f, os.path.basename(f)) # Use basename in archive - else: print(f"Skip zip missing: {f}") - print("Zip success."); st.success(f"Created {zip_name}"); return zip_name + if os.path.exists(f): + z.write(f, os.path.basename(f)) # Use basename in archive + count +=1 + else: print(f"Skip zip missing: {f}") + if count > 0: + print(f"Zip success: {zip_name} ({count} files)."); st.success(f"Created {zip_name}"); return zip_path + else: + print("Zip created but no files added."); st.warning("Zip created, but no valid source files found."); os.remove(zip_path); return None except Exception as e: print(f"Zip failed: {e}"); st.error(f"Zip failed: {e}"); return None + def delete_files(file_patterns, exclude_files=None): """Deletes files matching patterns, excluding protected/specified files.""" - protected = [STATE_FILE, "app.py", "index.html", "requirements.txt", "README.md"] - # Don't automatically protect current world file during generic delete - if exclude_files: protected.extend(exclude_files) + protected_defaults = [STATE_FILE, "app.py", "index.html", "requirements.txt", "README.md", ".env"] + # Combine default protected with specific exclusions + protected = set(protected_defaults) + if exclude_files: protected.update(exclude_files) deleted_count = 0; errors = 0 for pattern in file_patterns: - pattern_path = pattern # Assume pattern includes path from os.path.join - print(f"Attempting to delete files matching: {pattern_path}") - try: - files_to_delete = glob.glob(pattern_path) - if not files_to_delete: print(f"No files found for pattern: {pattern}"); continue - for f_path in files_to_delete: - basename = os.path.basename(f_path) - if os.path.isfile(f_path) and basename not in protected: - try: os.remove(f_path); print(f"Deleted: {f_path}"); deleted_count += 1 - except Exception as e: print(f"Failed delete {f_path}: {e}"); errors += 1 - elif os.path.isdir(f_path): print(f"Skipping directory: {f_path}") - except Exception as glob_e: print(f"Error matching pattern {pattern}: {glob_e}"); errors += 1 + # Ensure pattern includes the directory path correctly + # Example: os.path.join(CHAT_DIR, "*.md") + print(f"Attempting to delete files matching pattern: {pattern}") + try: + files_to_delete = glob.glob(pattern, recursive=False) # Avoid recursive unless intended + if not files_to_delete: print(f"No files found for pattern: {pattern}"); continue + + for f_path in files_to_delete: + basename = os.path.basename(f_path) + # Check if it's a file and not in the protected set + if os.path.isfile(f_path) and basename not in protected: + try: os.remove(f_path); print(f"Deleted: {f_path}"); deleted_count += 1 + except Exception as e: print(f"Failed delete {f_path}: {e}"); errors += 1 + elif os.path.isdir(f_path): print(f"Skipping directory: {f_path}") + elif basename in protected: print(f"Skipping protected file: {f_path}") + + except Exception as glob_e: print(f"Error processing pattern {pattern}: {glob_e}"); errors += 1 + msg = f"Deleted {deleted_count} files."; if errors > 0: msg += f" Encountered {errors} errors."; st.warning(msg) elif deleted_count > 0: st.success(msg) - else: st.info("No matching files found to delete.") - st.session_state['download_link_cache'] = {}; st.session_state['audio_cache'] = {} + else: st.info("No matching, unprotected files found to delete.") + # Clear caches after deletion might be relevant + st.session_state['download_link_cache'] = {}; st.session_state['audio_cache'] = {} # --- Image Handling --- async def save_pasted_image(image, username): if not image: return None try: + # Use content hash for filename uniqueness img_hash = hashlib.md5(image.tobytes()).hexdigest()[:8]; timestamp = format_timestamp_prefix(username); filename = f"{timestamp}_pasted_{img_hash}.png"; filepath = os.path.join(MEDIA_DIR, filename) + ensure_dir(MEDIA_DIR) image.save(filepath, "PNG"); print(f"Pasted image saved: {filepath}"); return filepath except Exception as e: print(f"Failed image save: {e}"); return None def paste_image_component(): - pasted_img = None; img_type = None - # Added default value to text_area to ensure key exists even if empty - paste_input = st.text_area("Paste Image Data Here", key="paste_input_area", height=50, value="") - if st.button("Process Pasted Image 📋", key="paste_form_button"): # Simplified trigger - if paste_input and paste_input.startswith('data:image'): + pasted_img = None; img_type = None + # Use a unique key and provide default value + paste_input = st.text_area("Paste Image Data Here (Ctrl+V):", key="paste_input_area_widget", height=50, value="", help="Paste image data directly (e.g., from clipboard if browser supports).") + + # Check if the text area has content that looks like image data + if paste_input and paste_input.startswith('data:image'): + if st.button("Process Pasted Image 📋", key="process_paste_button"): try: - mime_type = paste_input.split(';')[0].split(':')[1]; base64_str = paste_input.split(',')[1]; img_bytes = base64.b64decode(base64_str); pasted_img = Image.open(io.BytesIO(img_bytes)); img_type = mime_type.split('/')[1] - st.image(pasted_img, caption=f"Pasted ({img_type.upper()})", width=150); st.session_state.paste_image_base64 = base64_str - except ImportError: st.error("Pillow library needed for image pasting.") - except Exception as e: st.error(f"Img decode err: {e}"); st.session_state.paste_image_base64 = "" - else: st.warning("No valid image data pasted."); st.session_state.paste_image_base64 = "" - # Return image if processed successfully in this run - return pasted_img + # Extract MIME type and base64 data + header, base64_str = paste_input.split(',', 1) + mime_type = header.split(';')[0].split(':')[1] + img_type = mime_type.split('/')[1] # e.g., 'png' + img_bytes = base64.b64decode(base64_str); + pasted_img = Image.open(io.BytesIO(img_bytes)); + + st.image(pasted_img, caption=f"Pasted Image ({img_type.upper()})", width=150); + # Store base64 in session state if needed elsewhere + st.session_state.paste_image_base64 = base64_str + st.success("Image processed successfully!") + # Optionally clear the text area after processing + # st.session_state.paste_input_area_widget = "" # This doesn't work directly with text_area value persistence + # Consider using streamlit_js_eval to clear the JS input if needed: + # streamlit_js_eval("document.getElementById('paste_input_area_widget').value = '';") + except ImportError: st.error("Pillow library needed for image pasting.") + except (ValueError, TypeError, IndexError) as decode_err: st.error(f"Invalid image data format: {decode_err}"); st.session_state.paste_image_base64 = "" + except Exception as e: st.error(f"Image processing error: {e}"); st.session_state.paste_image_base64 = "" + # No button shown if input doesn't look like image data + # Return the PIL image object if processed in this run + return pasted_img -# --- PDF Processing --- +# --- PDF Processing --- Add error handling and clarity class AudioProcessor: - def __init__(self): self.cache_dir=AUDIO_CACHE_DIR; ensure_dir(self.cache_dir); self.metadata=json.load(open(f"{self.cache_dir}/metadata.json", 'r')) if os.path.exists(f"{self.cache_dir}/metadata.json") else {} - def _save_metadata(self): - try: - with open(f"{self.cache_dir}/metadata.json", 'w') as f: json.dump(self.metadata, f, indent=2) - except Exception as e: print(f"Failed metadata save: {e}") - async def create_audio(self, text, voice='en-US-AriaNeural'): - cache_key=hashlib.md5(f"{text[:150]}:{voice}".encode()).hexdigest(); cache_path=os.path.join(self.cache_dir, f"{cache_key}.mp3"); - if cache_key in self.metadata and os.path.exists(cache_path): return cache_path - text_cleaned=clean_text_for_tts(text); - if not text_cleaned: return None - ensure_dir(os.path.dirname(cache_path)) - try: - communicate=edge_tts.Communicate(text_cleaned,voice); await communicate.save(cache_path) - if os.path.exists(cache_path) and os.path.getsize(cache_path) > 0: self.metadata[cache_key]={'timestamp': datetime.now().isoformat(), 'text_length': len(text_cleaned), 'voice': voice}; self._save_metadata(); return cache_path - else: return None - except Exception as e: print(f"TTS Create Audio Error: {e}"); return None + def __init__(self): + self.cache_dir=AUDIO_CACHE_DIR; ensure_dir(self.cache_dir); + self.metadata_file = os.path.join(self.cache_dir, "metadata.json") + self.metadata = self._load_metadata() + + def _load_metadata(self): + try: + if os.path.exists(self.metadata_file): + with open(self.metadata_file, 'r', encoding='utf-8') as f: return json.load(f) + except (IOError, json.JSONDecodeError) as e: print(f"Warning: Could not load audio metadata: {e}") + return {} + + def _save_metadata(self): + try: + with open(self.metadata_file, 'w', encoding='utf-8') as f: json.dump(self.metadata, f, indent=2) + except IOError as e: print(f"Failed metadata save: {e}") + + async def create_audio(self, text, voice='en-US-AriaNeural'): + cache_key=hashlib.md5(f"{text[:150]}:{voice}".encode()).hexdigest(); cache_path=os.path.join(self.cache_dir, f"{cache_key}.mp3"); + + # Check cache first + if cache_key in self.metadata and os.path.exists(cache_path): + print(f"Cache hit for audio: {cache_key}"); return cache_path + + text_cleaned=clean_text_for_tts(text); + if not text_cleaned: print("Skipping TTS for empty text."); return None + + ensure_dir(os.path.dirname(cache_path)) # Ensure cache dir exists + print(f"Generating TTS for: '{text_cleaned[:50]}...' (Voice: {voice})") + try: + communicate=edge_tts.Communicate(text_cleaned, voice); await communicate.save(cache_path) + if os.path.exists(cache_path) and os.path.getsize(cache_path) > 0: + # Update metadata on successful creation + self.metadata[cache_key]={'timestamp': datetime.now().isoformat(), 'text_length': len(text_cleaned), 'voice': voice}; self._save_metadata(); + print(f"TTS success: {cache_path}") + return cache_path + else: + print(f"TTS failed: File not created or empty '{cache_path}'"); return None + except edge_tts.NoAudioReceived: print(f"TTS Error: No audio received from Edge TTS for '{text_cleaned[:50]}...'"); return None + except Exception as e: print(f"TTS Create Audio Error: {e}"); return None def process_pdf_tab(pdf_file, max_pages, voice): st.subheader("PDF Processing Results") # Change header if pdf_file is None: st.info("Upload a PDF file and click 'Process PDF' to begin."); return - audio_processor = AudioProcessor() + + audio_processor = AudioProcessor() # Instantiate the processor try: - reader=PdfReader(pdf_file) - if reader.is_encrypted: st.warning("PDF is encrypted."); return - total_pages=min(len(reader.pages),max_pages); - st.write(f"Processing first {total_pages} pages of '{pdf_file.name}'...") - texts, audios={}, {}; page_threads = []; results_lock = threading.Lock() + # Use file bytes directly with PdfReader + pdf_bytes = pdf_file.getvalue() + reader = PdfReader(io.BytesIO(pdf_bytes)) + if reader.is_encrypted: + try: + # Attempt decryption with an empty password, common for some PDFs + if reader.decrypt("") == 0: # 0 indicates failure + st.warning("PDF is encrypted and could not be decrypted with an empty password."); return + except Exception as decrypt_e: + st.warning(f"PDF is encrypted. Decryption failed: {decrypt_e}"); return + + total_pages_in_pdf = len(reader.pages) + pages_to_process = min(total_pages_in_pdf, max_pages) + st.write(f"Processing first {pages_to_process} of {total_pages_in_pdf} pages from '{pdf_file.name}'...") + + texts = {} # page_num: text + audios = {} # page_num: audio_path + page_threads = [] + results_lock = threading.Lock() # Lock for safely updating shared dicts + + # Define the target function for each thread (synchronous wrapper) def process_page_sync(page_num, page_text): - async def run_async_audio(): return await audio_processor.create_audio(page_text, voice) + async def run_async_audio(): + return await audio_processor.create_audio(page_text, voice) try: - # asyncio.run can cause issues if called repeatedly in threads, use run_async helper - audio_path_future = run_async(run_async_audio) - # This needs careful handling. run_async might return a task or result. - # For simplicity here, let's assume it blocks or we retrieve result later. - # A better pattern uses concurrent.futures or manages event loop properly. - # Sticking to asyncio.run for now as run_async tries it. - audio_path = asyncio.run(run_async_audio()) # Revert to simpler asyncio.run for thread context + # Run the async audio creation in the current thread's event loop (or a new one) + # asyncio.run is suitable here as each thread needs its own sync->async bridge + audio_path = asyncio.run(run_async_audio()) if audio_path: - with results_lock: audios[page_num] = audio_path - except Exception as page_e: print(f"Err process page {page_num+1}: {page_e}") + with results_lock: audios[page_num] = audio_path + except Exception as page_e: print(f"Error processing page {page_num+1} audio: {page_e}") - # Start threads - for i in range(total_pages): - try: - page = reader.pages[i]; text = page.extract_text(); - if text and text.strip(): texts[i]=text; thread = threading.Thread(target=process_page_sync, args=(i, text)); page_threads.append(thread); thread.start() - else: texts[i] = "[No text extracted]" - except Exception as extract_e: texts[i] = f"[Error extract: {extract_e}]"; print(f"Error page {i+1} extract: {extract_e}") + # Extract text and start threads + extraction_errors = 0 + for i in range(pages_to_process): + try: + page = reader.pages[i]; text = page.extract_text(); + if text and text.strip(): + texts[i]=text; + # Start a thread only if text was extracted + thread = threading.Thread(target=process_page_sync, args=(i, text)); + page_threads.append(thread); thread.start() + else: texts[i] = "[No text extracted or page is blank]" + except Exception as extract_e: + texts[i] = f"[Error extracting text: {extract_e}]"; print(f"Error extracting text from page {i+1}: {extract_e}"); extraction_errors += 1 + + if extraction_errors > 0: st.warning(f"Encountered errors extracting text from {extraction_errors} pages.") # Wait for threads and display progress - progress_bar = st.progress(0.0, text="Processing pages...") + progress_bar = st.progress(0.0, text="Generating audio for extracted pages...") total_threads = len(page_threads) start_join_time = time.time() - while any(t.is_alive() for t in page_threads): - completed_threads = total_threads - sum(t.is_alive() for t in page_threads) - progress = completed_threads / total_threads if total_threads > 0 else 1.0 - progress_bar.progress(min(progress, 1.0), text=f"Processed {completed_threads}/{total_threads} pages...") - if time.time() - start_join_time > 600: print("PDF processing timed out."); break # 10 min timeout - time.sleep(0.5) - progress_bar.progress(1.0, text="Processing complete.") - - # Display results - for i in range(total_pages): - with st.expander(f"Page {i+1}"): - st.markdown(texts.get(i, "[Error getting text]")) - audio_file = audios.get(i) - if audio_file: play_and_download_audio(audio_file) - else: st.caption("Audio generation failed or was skipped.") - - except Exception as pdf_e: st.error(f"Err read PDF: {pdf_e}"); st.exception(pdf_e) + processed_count = 0 + if total_threads > 0: + while processed_count < total_threads: + processed_count = total_threads - sum(t.is_alive() for t in page_threads) + progress = processed_count / total_threads + progress_bar.progress(min(progress, 1.0), text=f"Audio generated for {processed_count}/{total_threads} pages...") + if time.time() - start_join_time > 600: # 10 min timeout + print("PDF audio processing timed out joining threads."); st.warning("Audio generation timed out."); break + time.sleep(0.5) # Reduce busy-waiting + progress_bar.progress(1.0, text="Audio generation complete.") + + # Display results sequentially + st.write("---") + st.subheader("Processed Content:") + for i in range(pages_to_process): + with st.expander(f"Page {i+1}"): + page_text = texts.get(i, "[Error retrieving text]") + st.markdown(f"**Text:**\n```\n{page_text}\n```" if not page_text.startswith("[") else page_text) # Show text in code block + st.markdown("---") + st.markdown("**Audio:**") + audio_file = audios.get(i) + if audio_file: play_and_download_audio(audio_file) + else: st.caption("Audio not generated or generation failed.") + + except Exception as pdf_e: st.error(f"Error reading or processing PDF '{pdf_file.name}': {pdf_e}"); st.exception(pdf_e) + # ============================================================================== -# WebSocket Server Logic +# WebSocket Server Logic - CORRECT LOGIC FOR STATE SYNC # ============================================================================== async def register_client(websocket): - client_id = str(websocket.id); connected_clients.add(client_id); + """Adds client to tracking sets/dicts.""" + client_id = str(websocket.id); + connected_clients.add(client_id); + # Ensure active_connections dict exists in session state if 'active_connections' not in st.session_state: st.session_state.active_connections = defaultdict(dict); - st.session_state.active_connections[client_id] = websocket; print(f"Client registered: {client_id}. Total: {len(connected_clients)}") + st.session_state.active_connections[client_id] = websocket; + print(f"Client registered: {client_id}. Total clients: {len(connected_clients)}") async def unregister_client(websocket): - client_id = str(websocket.id); connected_clients.discard(client_id); + """Removes client from tracking sets/dicts.""" + client_id = str(websocket.id); + connected_clients.discard(client_id); + # Safely remove from session state dict if 'active_connections' in st.session_state: st.session_state.active_connections.pop(client_id, None); - print(f"Client unregistered: {client_id}. Remaining: {len(connected_clients)}") + print(f"Client unregistered: {client_id}. Remaining clients: {len(connected_clients)}") async def send_safely(websocket, message, client_id): - try: await websocket.send(message) - except websockets.ConnectionClosed: print(f"WS Send failed (Closed) client {client_id}"); raise - except RuntimeError as e: print(f"WS Send failed (Runtime {e}) client {client_id}"); raise - except Exception as e: print(f"WS Send failed (Other {e}) client {client_id}"); raise + """Sends a message to a single client with error handling.""" + try: + await websocket.send(message) + # print(f"Sent message to {client_id}: {message[:100]}...") # Verbose + return True + except websockets.ConnectionClosed: print(f"WS Send failed (ConnectionClosed) for client {client_id}"); return False + except RuntimeError as e: print(f"WS Send failed (RuntimeError: {e}) for client {client_id}"); return False + except Exception as e: print(f"WS Send failed (Other: {e}) for client {client_id}"); return False async def broadcast_message(message, exclude_id=None): - if not connected_clients: return - tasks = []; current_client_ids = list(connected_clients); active_connections_copy = st.session_state.active_connections.copy() - for client_id in current_client_ids: - if client_id == exclude_id: continue - websocket = active_connections_copy.get(client_id) - if websocket: tasks.append(asyncio.create_task(send_safely(websocket, message, client_id))) - if tasks: await asyncio.gather(*tasks, return_exceptions=True) + """Broadcasts a message to all connected clients except the excluded one.""" + if not connected_clients: return # No clients to broadcast to + + # Create tasks for sending concurrently + # Use copies of the sets/dicts to avoid issues if they change during iteration + active_connections_copy = st.session_state.get('active_connections', {}).copy() + tasks = [] + # print(f"Broadcasting message (exclude {exclude_id}): {message[:100]}...") # Verbose + for client_id, websocket in active_connections_copy.items(): + if client_id == exclude_id: continue # Skip the excluded client + # Check if websocket object seems valid (basic check) + if websocket and websocket.open: + tasks.append(send_safely(websocket, message, client_id)) + else: + print(f"Skipping broadcast to inactive/invalid websocket for client {client_id}") + # Optionally: Clean up inactive connections here or periodically + connected_clients.discard(client_id) # Remove from main set + st.session_state.active_connections.pop(client_id, None) + + + if tasks: + results = await asyncio.gather(*tasks, return_exceptions=True) + # Optional: Log results or handle failures + success_count = sum(1 for r in results if isinstance(r, bool) and r) + fail_count = len(tasks) - success_count + # print(f"Broadcast complete: {success_count} success, {fail_count} failures.") # Verbose + # else: print("No active clients to broadcast to.") # Verbose async def broadcast_world_update(): - with world_objects_lock: current_state_payload = dict(world_objects) + """Sends the entire current world state to ALL connected clients. CORRECT for sync.""" + with world_objects_lock: + # Ensure payload is serializable (convert defaultdict to dict) + current_state_payload = dict(world_objects) update_msg = json.dumps({"type": "initial_state", "payload": current_state_payload}) - print(f"Broadcasting full world update ({len(current_state_payload)} objects)...") - await broadcast_message(update_msg) + object_count = len(current_state_payload) + print(f"Broadcasting full world update ({object_count} objects) to all clients...") + # Send to ALL clients, as this often follows a load operation affecting everyone + await broadcast_message(update_msg) # No exclude_id needed here + async def websocket_handler(websocket, path): - await register_client(websocket); client_id = str(websocket.id); + """Handles incoming WebSocket connections and messages. CORRECT""" + client_id = str(websocket.id) # Use websocket's built-in unique ID + await register_client(websocket) + # Determine username - use session state default or fallback username = st.session_state.get('username', f"User_{client_id[:4]}") - try: # Send initial state + print(f"WebSocket connection established for {client_id} ({username}) from {websocket.remote_address}") + + try: + # 1. Send Initial State - CRITICAL for client sync on connect/reconnect with world_objects_lock: initial_state_payload = dict(world_objects) - initial_state_msg = json.dumps({"type": "initial_state", "payload": initial_state_payload}); await websocket.send(initial_state_msg) - print(f"Sent initial state ({len(initial_state_payload)} objs) to {client_id}") - await broadcast_message(json.dumps({"type": "user_join", "payload": {"username": username, "id": client_id}}), exclude_id=client_id) - except Exception as e: print(f"Error initial phase {client_id}: {e}") + initial_state_msg = json.dumps({"type": "initial_state", "payload": initial_state_payload}) + if await send_safely(websocket, initial_state_msg, client_id): + print(f"Sent initial state ({len(initial_state_payload)} objs) to {client_id}") + else: raise websockets.ConnectionClosed # Failed to send initial state, abort + + # 2. Announce User Join to others + join_msg = json.dumps({"type": "user_join", "payload": {"username": username, "id": client_id}}) + await broadcast_message(join_msg, exclude_id=client_id) - try: # Message loop + # 3. Message Processing Loop async for message in websocket: + # print(f"Received message from {client_id}: {message[:150]}...") # Verbose try: data = json.loads(message); msg_type = data.get("type"); payload = data.get("payload", {}); - sender_username = payload.get("username", username) + # Use payload's username if provided (e.g., from client input), else fallback + sender_username = payload.get("username", username) # Prefer payload username + # --- Handle Different Message Types --- if msg_type == "chat_message": - chat_text = payload.get('message', ''); voice = payload.get('voice', FUN_USERNAMES.get(sender_username, "en-US-AriaNeural")); - run_async(save_chat_entry, sender_username, chat_text, voice) - await broadcast_message(message, exclude_id=client_id) + chat_text = payload.get('message', ''); + # Use voice associated with sender_username from FUN_USERNAMES map + voice = FUN_USERNAMES.get(sender_username, EDGE_TTS_VOICES[0]) # Fallback voice + if chat_text: + # Save locally (creates file, adds to session history, generates TTS async) + run_async(save_chat_entry, sender_username, chat_text, voice) + # Broadcast the original message so other clients update their chat UI + await broadcast_message(message, exclude_id=client_id) elif msg_type == "place_object": obj_data = payload.get("object_data"); - if obj_data and 'obj_id' in obj_data and 'type' in obj_data: - with world_objects_lock: world_objects[obj_data['obj_id']] = obj_data + # Validate required fields + if obj_data and 'obj_id' in obj_data and 'type' in obj_data and 'position' in obj_data: + obj_id = str(obj_data['obj_id']) # Ensure key is string + with world_objects_lock: world_objects[obj_id] = obj_data + # Broadcast that an object was placed (incl. who placed it) broadcast_payload = json.dumps({"type": "object_placed", "payload": {"object_data": obj_data, "username": sender_username}}); - await broadcast_message(broadcast_payload, exclude_id=client_id) - else: print(f"WS Invalid place_object payload: {payload}") + await broadcast_message(broadcast_payload) # Send to ALL including sender for confirmation/sync + print(f"Object placed by {sender_username}: {obj_id} ({obj_data.get('type')})") + else: print(f"WS Warning: Invalid place_object payload received from {client_id}: {payload}") elif msg_type == "delete_object": - obj_id = payload.get("obj_id"); removed = False - if obj_id: - with world_objects_lock: - if obj_id in world_objects: del world_objects[obj_id]; removed = True - if removed: - broadcast_payload = json.dumps({"type": "object_deleted", "payload": {"obj_id": obj_id, "username": sender_username}}); - await broadcast_message(broadcast_payload, exclude_id=client_id) - else: print(f"WS Invalid delete_object payload: {payload}") + obj_id = payload.get("obj_id"); removed = False + if obj_id: + obj_id = str(obj_id) # Ensure key is string + with world_objects_lock: + if obj_id in world_objects: + del world_objects[obj_id]; removed = True + if removed: + # Broadcast that an object was deleted (incl. who deleted it) + broadcast_payload = json.dumps({"type": "object_deleted", "payload": {"obj_id": obj_id, "username": sender_username}}); + await broadcast_message(broadcast_payload) # Send to ALL for sync + print(f"Object deleted by {sender_username}: {obj_id}") + # else: print(f"WS Info: Attempt to delete non-existent object {obj_id} by {client_id}") + else: print(f"WS Warning: Invalid delete_object payload (missing obj_id) from {client_id}: {payload}") elif msg_type == "player_position": - pos_data = payload.get("position"); rot_data = payload.get("rotation") - if pos_data: - broadcast_payload = json.dumps({"type": "player_moved", "payload": {"username": sender_username, "id": client_id, "position": pos_data, "rotation": rot_data}}); - await broadcast_message(broadcast_payload, exclude_id=client_id) - - except json.JSONDecodeError: print(f"WS Invalid JSON from {client_id}: {message[:100]}...") - except Exception as e: print(f"WS Error processing msg from {client_id}: {e}") - except websockets.ConnectionClosed: print(f"WS Client disconnected: {client_id} ({username})") - except Exception as e: print(f"WS Unexpected handler error {client_id}: {e}") + pos_data = payload.get("position"); rot_data = payload.get("rotation") # Optional rotation + # Basic validation for position data existence + if pos_data and isinstance(pos_data, dict) and 'x' in pos_data and 'y' in pos_data and 'z' in pos_data: + # Broadcast player movement to other clients + broadcast_payload = json.dumps({ + "type": "player_moved", + "payload": {"username": sender_username, "id": client_id, "position": pos_data, "rotation": rot_data} + }); + await broadcast_message(broadcast_payload, exclude_id=client_id) # Don't send back to self + # else: print(f"WS Debug: Invalid or missing player position data from {client_id}") # Can be noisy + + elif msg_type == "request_initial_state": + # Client explicitly asks for state (e.g., after reconnect) + print(f"Client {client_id} explicitly requested initial state.") + with world_objects_lock: requested_state_payload = dict(world_objects) + state_msg = json.dumps({"type": "initial_state", "payload": requested_state_payload}) + await send_safely(websocket, state_msg, client_id) + + # Add handlers for other message types if needed + + except json.JSONDecodeError: print(f"WS Error: Invalid JSON received from {client_id}: {message[:100]}...") + except Exception as e: print(f"WS Error processing message from {client_id}: {e}"); st.exception(e) # Log full traceback for debugging + + # --- Handle Client Disconnection --- + except websockets.ConnectionClosedOK: print(f"WS Client disconnected normally: {client_id} ({username})") + except websockets.ConnectionClosedError as e: print(f"WS Client connection closed with error: {client_id} ({username}) - Code: {e.code}, Reason: {e.reason}") + except Exception as e: print(f"WS Unexpected error in handler for {client_id}: {e}"); st.exception(e) finally: - await broadcast_message(json.dumps({"type": "user_leave", "payload": {"username": username, "id": client_id}}), exclude_id=client_id); - await unregister_client(websocket) - - -async def run_websocket_server(): - if st.session_state.get('server_running_flag', False): return - st.session_state['server_running_flag'] = True; print("Attempting start WS server 0.0.0.0:8765...") - stop_event = asyncio.Event(); st.session_state['websocket_stop_event'] = stop_event + print(f"Cleaning up connection for {client_id}...") + # Announce User Leave to others + leave_msg = json.dumps({"type": "user_leave", "payload": {"username": username, "id": client_id}}) + # Use run_async here as the handler loop is closing + run_async(broadcast_message, leave_msg, exclude_id=client_id) # Fire and forget broadcast + # Ensure client is fully unregistered + await unregister_client(websocket) # This removes from connected_clients and session_state dict + +# --- WebSocket Server Management --- +async def run_websocket_server(host="0.0.0.0", port=8765): + """Coroutine that runs the WebSocket server until stopped.""" + # Use a stop event managed in session state + if 'websocket_stop_event' not in st.session_state or st.session_state.websocket_stop_event is None: + st.session_state.websocket_stop_event = asyncio.Event() + + stop_event = st.session_state.websocket_stop_event server = None + st.session_state.server_running_flag = True # Set flag early + print(f"Attempting to start WebSocket server on {host}:{port}...") + try: - server = await websockets.serve(websocket_handler, "0.0.0.0", 8765); st.session_state['server_instance'] = server - print(f"WS server started: {server.sockets[0].getsockname()}. Waiting for stop signal...") + # Start the server + server = await websockets.serve(websocket_handler, host, port); + st.session_state['server_instance'] = server # Store server object + server_address = server.sockets[0].getsockname() # Get actual bound address + print(f"WebSocket server started successfully on {server_address}. Waiting for stop signal...") + # Keep server running until stop event is set await stop_event.wait() - except OSError as e: print(f"### FAILED START WS SERVER: {e}"); st.session_state['server_running_flag'] = False; - except Exception as e: print(f"### UNEXPECTED WS SERVER ERROR: {e}"); st.session_state['server_running_flag'] = False; + print("Stop signal received, shutting down WebSocket server...") + + except OSError as e: + print(f"### FAILED TO START WEBSOCKET SERVER on {host}:{port}: {e} ###") + st.error(f"Server start failed: {e}. Port likely in use.") + st.session_state['server_running_flag'] = False # Reset flag on failure + except Exception as e: + print(f"### UNEXPECTED WEBSOCKET SERVER ERROR: {e} ###") + st.exception(e) # Log full traceback + st.session_state['server_running_flag'] = False # Reset flag on failure finally: - print("WS server task finishing..."); - if server: server.close(); await server.wait_closed(); print("WS server closed.") - st.session_state['server_running_flag'] = False; st.session_state['server_instance'] = None; st.session_state['websocket_stop_event'] = None + print("WebSocket server task finishing...") + if server: + server.close() + await server.wait_closed() + print("WebSocket server closed.") + # Clear server state variables + st.session_state['server_instance'] = None + st.session_state.server_running_flag = False + # Don't clear stop event here, might be needed if restart is attempted + print("Server state cleared.") + def start_websocket_server_thread(): - if st.session_state.get('server_task') and st.session_state.server_task.is_alive(): return - if st.session_state.get('server_running_flag', False): return - print("Creating/starting new server thread."); - def run_loop(): - loop = None - try: loop = asyncio.get_running_loop() - except RuntimeError: loop = asyncio.new_event_loop(); asyncio.set_event_loop(loop) - try: loop.run_until_complete(run_websocket_server()) + """Starts the WebSocket server in a separate daemon thread.""" + # Check if already running or thread exists + if st.session_state.get('server_running_flag', False): + print("Server start requested, but flag indicates it's already running or starting.") + return + if st.session_state.get('server_task') and st.session_state.server_task.is_alive(): + print("Server start requested, but thread appears to be alive.") + return + + print("Creating and starting new WebSocket server thread.") + st.session_state.server_running_flag = True # Assume it will start + + # Define the function to run in the new thread + def run_server_in_new_loop(): + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + # Ensure stop event exists for this thread's loop + if 'websocket_stop_event' not in st.session_state or st.session_state.websocket_stop_event is None: + st.session_state.websocket_stop_event = asyncio.Event() + + # Run the server coroutine until it completes (due to stop signal or error) + loop.run_until_complete(run_websocket_server()) + except Exception as thread_e: + print(f"Error in server thread run_loop: {thread_e}") + st.session_state.server_running_flag = False # Mark as stopped on error finally: - if loop and not loop.is_closed(): - tasks = asyncio.all_tasks(loop); - for task in tasks: task.cancel() - try: loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True)) - except asyncio.CancelledError: pass - finally: loop.close(); print("Server thread loop closed.") - else: print("Server thread loop already closed or None.") - st.session_state.server_task = threading.Thread(target=run_loop, daemon=True); st.session_state.server_task.start(); time.sleep(1.5) - if not st.session_state.server_task.is_alive(): print("### Server thread failed to stay alive!") + print("Server thread: Cleaning up event loop...") + try: + # Cancel any remaining tasks in the loop + tasks = asyncio.all_tasks(loop) + if tasks: + for task in tasks: task.cancel() + loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True)) + loop.run_until_complete(loop.shutdown_asyncgens()) + except Exception as cleanup_e: print(f"Error during server thread loop cleanup: {cleanup_e}") + finally: + if not loop.is_closed(): loop.close(); print("Server thread event loop closed.") + # Ensure flag is false after thread finishes + st.session_state.server_running_flag = False + st.session_state.server_task = None + + + # Create and start the daemon thread + st.session_state.server_task = threading.Thread(target=run_server_in_new_loop, daemon=True); + st.session_state.server_task.start(); + time.sleep(2) # Give thread time to start and potentially fail + + # Check if thread actually started and server flag is still set + if not st.session_state.server_task.is_alive(): + print("### Server thread failed to start or exited immediately! ###") + st.session_state.server_running_flag = False # Ensure flag is correct + st.error("WebSocket server thread failed to start. Check console logs.") + elif not st.session_state.get('server_running_flag', False): + print("### Server thread started but server flag is false (likely failed during startup). ###") + st.error("WebSocket server failed to start properly. Check console logs.") + else: + print("Server thread appears to have started.") # ============================================================================== @@ -650,49 +996,88 @@ def render_sidebar(): st.header("💾 World Versions") st.caption("Load or save named world states.") - saved_worlds = get_saved_worlds() - world_options_display = {os.path.basename(w['filename']): f"{w['name']} ({w['timestamp']})" for w in saved_worlds} + saved_worlds = get_saved_worlds() # Fetch list of available worlds + # Create display names mapping basename to user-friendly string + world_options_display = { + os.path.basename(w['filename']): f"{w['name']} ({w['timestamp']})" + for w in saved_worlds + } + # Basenames for radio options, add None for "Live State" radio_options_basenames = [None] + [os.path.basename(w['filename']) for w in saved_worlds] + + # Get the currently selected/loaded file basename from session state current_selection_basename = st.session_state.get('current_world_file', None) - current_radio_index = 0 + + # Find the index for the radio button based on the current selection + current_radio_index = 0 # Default to "Live State" if current_selection_basename and current_selection_basename in radio_options_basenames: - try: current_radio_index = radio_options_basenames.index(current_selection_basename) - except ValueError: current_radio_index = 0 + try: current_radio_index = radio_options_basenames.index(current_selection_basename) + except ValueError: current_radio_index = 0 # Fallback if inconsistency + # --- World Selector Radio --- + st.markdown("**Load World:**") selected_basename = st.radio( - "Load World:", options=radio_options_basenames, index=current_radio_index, - format_func=lambda x: "Live State (Unsaved)" if x is None else world_options_display.get(x, x), - key="world_selector_radio" - ) - + "Select a saved world to load or view live state:", # Clearer label + options=radio_options_basenames, + index=current_radio_index, + format_func=lambda x: "Live State (Unsaved Changes)" if x is None else world_options_display.get(x, x), + key="world_selector_radio", + label_visibility="collapsed" # Hide redundant label + ) + + # --- Handle World Load on Selection Change --- if selected_basename != current_selection_basename: - st.session_state.current_world_file = selected_basename - if selected_basename: - with st.spinner(f"Loading {selected_basename}..."): - if load_world_state_from_md(selected_basename): - run_async(broadcast_world_update) - st.toast("World loaded!", icon="✅") - else: st.error("Failed to load world."); st.session_state.current_world_file = None - else: - print("Switched to live state."); st.toast("Switched to Live State.") - st.rerun() - - st.caption("Download:") - cols = st.columns([4, 1]) - with cols[0]: st.write("**Name** (Timestamp)") - with cols[1]: st.write("**DL**") - display_limit = 10 + st.session_state.current_world_file = selected_basename # Update session state tracking + if selected_basename: # A specific world file was selected + print(f"User selected world to load: {selected_basename}") + with st.spinner(f"Loading {world_options_display.get(selected_basename, selected_basename)}..."): + # Load data from file into server's memory (`world_objects`) + if load_world_state_from_md(selected_basename): + # IMPORTANT: Broadcast the newly loaded state to ALL clients + run_async(broadcast_world_update) # Fire-and-forget broadcast + st.toast("World loaded successfully!", icon="✅") + # No need to rerun here, broadcast handles client update. + # Let Streamlit finish the current run naturally. + else: + st.error("Failed to load world file.") + st.session_state.current_world_file = None # Revert selection on failure + else: # "Live State" was selected + print("User switched to view Live State (no file loaded).") + # No load needed, but maybe inform user? + st.toast("Switched to view Live State.") + # If switching *from* a loaded file to live state, should we clear world_objects? + # Current logic assumes world_objects always reflects the *active* state, + # whether loaded or built live. This seems reasonable. + # Trigger a rerun to update the UI elements reflecting the new state/selection + st.rerun() + + + # --- Download List --- + st.caption("Download Saved Worlds:") + # Use columns for better layout + cols_dl_header = st.columns([4, 1]) + with cols_dl_header[0]: st.write("**Name** (Timestamp)") + with cols_dl_header[1]: st.write("**DL**") + + display_limit = 10 # Show first N worlds directly + if not saved_worlds: st.caption("No saved worlds found.") + for i, world_info in enumerate(saved_worlds): - if i >= display_limit and len(saved_worlds) > display_limit + 1: # Show expander after limit - with st.expander(f"Show {len(saved_worlds)-display_limit} more..."): - for world_info_more in saved_worlds[display_limit:]: - f_basename = os.path.basename(world_info_more['filename']); f_fullpath = os.path.join(SAVED_WORLDS_DIR, f_basename); display_name = world_info_more.get('name', f_basename); timestamp = world_info_more.get('timestamp', 'N/A') - colA, colB = st.columns([4, 1]); - with colA: st.write(f"{display_name} ({timestamp})", unsafe_allow_html=True) - with colB: st.markdown(get_download_link(f_fullpath, "md"), unsafe_allow_html=True) - break # Stop outer loop after showing expander - - f_basename = os.path.basename(world_info['filename']); f_fullpath = os.path.join(SAVED_WORLDS_DIR, f_basename); display_name = world_info.get('name', f_basename); timestamp = world_info.get('timestamp', 'N/A') + f_basename = os.path.basename(world_info['filename']) + f_fullpath = os.path.join(SAVED_WORLDS_DIR, f_basename) + display_name = world_info.get('name', f_basename); timestamp = world_info.get('timestamp', 'N/A') + + # Handle display limit with expander + if i >= display_limit: + with st.expander(f"Show {len(saved_worlds)-display_limit} more...", expanded=False): + for world_info_more in saved_worlds[display_limit:]: + f_basename_more = os.path.basename(world_info_more['filename']); f_fullpath_more = os.path.join(SAVED_WORLDS_DIR, f_basename_more); display_name_more = world_info_more.get('name', f_basename_more); timestamp_more = world_info_more.get('timestamp', 'N/A') + colA, colB = st.columns([4, 1]); + with colA: st.write(f"{display_name_more} ({timestamp_more})", unsafe_allow_html=True) + with colB: st.markdown(get_download_link(f_fullpath_more, "md"), unsafe_allow_html=True) + break # Stop outer loop after starting expander + + # Display items within the limit col1, col2 = st.columns([4, 1]); with col1: st.write(f"{display_name} ({timestamp})", unsafe_allow_html=True) with col2: st.markdown(get_download_link(f_fullpath, "md"), unsafe_allow_html=True) @@ -700,181 +1085,359 @@ def render_sidebar(): st.markdown("---") st.header("🏗️ Build Tools") - st.caption("Select an object to place.") - cols = st.columns(5) + st.caption("Select an object type to place.") + # Use columns for a grid layout + cols_tools = st.columns(5) col_idx = 0 - current_tool = st.session_state.get('selected_object', 'None') + current_tool = st.session_state.get('selected_object', 'None') # Get the currently selected tool type + + # --- Tool Selection Buttons --- for emoji, name in PRIMITIVE_MAP.items(): - button_key = f"primitive_{name}"; button_type = "primary" if current_tool == name else "secondary" - if cols[col_idx % 5].button(emoji, key=button_key, help=name, type=button_type, use_container_width=True): - if st.session_state.get('selected_object', 'None') != name: - st.session_state.selected_object = name - run_async(lambda name_arg=name: streamlit_js_eval(f"updateSelectedObjectType({json.dumps(name_arg)});", key=f"update_tool_js_{name_arg}")) + button_key = f"primitive_tool_{name}"; + # Highlight the currently selected tool + button_type = "primary" if current_tool == name else "secondary" + # Place button in the grid + if cols_tools[col_idx % 5].button(emoji, key=button_key, help=name, type=button_type, use_container_width=True): + # --- Action on Tool Button Click --- + if st.session_state.selected_object != name: + print(f"Tool changed to: {name}") + st.session_state.selected_object = name # Update selected tool in session state + # IMPORTANT: Inform the client-side JS about the tool change + # This JS function should ONLY update the client's internal 'next object type' variable. + # It MUST NOT reset the scene. + js_command = f"if (typeof window.updateSelectedObjectType === 'function') {{ window.updateSelectedObjectType({json.dumps(name)}); }} else {{ console.warn('updateSelectedObjectType function not found on client'); }}" + run_async(lambda cmd=js_command: streamlit_js_eval(cmd, key=f"update_tool_js_{name}")) + # Rerun needed to update the button highlighting (primary/secondary) st.rerun() + col_idx += 1 - st.markdown("---") - if st.button("🚫 Clear Tool", key="clear_tool", use_container_width=True): - if st.session_state.get('selected_object', 'None') != 'None': - st.session_state.selected_object = 'None'; - run_async(lambda: streamlit_js_eval("updateSelectedObjectType('None');", key="update_tool_js_none")) - st.rerun() + + # --- Clear Tool Button --- + st.markdown("---") # Separator + if st.button("🚫 Clear Tool / Select Mode", key="clear_tool_button", use_container_width=True, help="Switch to selection/interaction mode (no object placement)."): + if st.session_state.selected_object != 'None': + print("Tool cleared (Select Mode enabled).") + st.session_state.selected_object = 'None'; + # Inform client JS + js_command_none = "if (typeof window.updateSelectedObjectType === 'function') { window.updateSelectedObjectType('None'); } else { console.warn('updateSelectedObjectType function not found on client'); }" + run_async(lambda: streamlit_js_eval(js_command_none, key="update_tool_js_none")) + st.rerun() # Rerun to update button highlights st.markdown("---") st.header("🗣️ Voice & User") - current_username = st.session_state.get('username', list(FUN_USERNAMES.keys())[0]) + current_username = st.session_state.get('username', list(FUN_USERNAMES.keys())[0]) # Use loaded username username_options = list(FUN_USERNAMES.keys()); current_index = 0 try: current_index = username_options.index(current_username) - except ValueError: current_index = 0 - new_username = st.selectbox("Change Name/Voice", options=username_options, index=current_index, key="username_select", format_func=lambda x: x.split(" ")[0]) + except ValueError: current_index = 0 # Fallback if name not in list + + # --- Username/Voice Selection --- + new_username = st.selectbox("Change Your Name/Voice:", options=username_options, index=current_index, key="username_select_widget", format_func=lambda x: x) # Show full name with emoji + if new_username != st.session_state.username: old_username = st.session_state.username - change_msg = json.dumps({"type":"user_rename", "payload": {"old_username": old_username, "new_username": new_username}}) - run_async(broadcast_message, change_msg) - st.session_state.username = new_username; st.session_state.tts_voice = FUN_USERNAMES[new_username]; save_username(st.session_state.username) - st.rerun() - st.session_state['enable_audio'] = st.toggle("Enable TTS Audio", value=st.session_state.get('enable_audio', True)) + print(f"Username changed from {old_username} to {new_username}") + st.session_state.username = new_username; + st.session_state.tts_voice = FUN_USERNAMES[new_username]; # Update voice + save_username(st.session_state.username) # Persist choice + + # Broadcast rename event (optional, good for user list UI on client) + rename_msg = json.dumps({"type":"user_rename", "payload": {"old_username": old_username, "new_username": new_username}}) + run_async(broadcast_message, rename_msg) + + # Inform the current client's JS about the username change immediately + js_user_update = f"if(window.updateUsername) {{ window.updateUsername({json.dumps(new_username)}); }} else {{ console.warn('updateUsername function not found'); }}" + run_async(lambda: streamlit_js_eval(js_user_update, key="update_user_js")) + + st.rerun() # Rerun to reflect change in UI title etc. + + # --- Audio Toggle --- + st.session_state['enable_audio'] = st.toggle("Enable TTS Audio Output", value=st.session_state.get('enable_audio', True), key="tts_toggle") def render_main_content(): """Renders the main content area with tabs.""" - st.title(f"{Site_Name} - User: {st.session_state.username}") + # Ensure username is initialized before using in title + username_display = st.session_state.get('username', 'N/A') + st.title(f"{Site_Name} - User: {username_display}") tab_world, tab_chat, tab_pdf, tab_files = st.tabs(["🏗️ World Builder", "🗣️ Chat", "📚 PDF Tools", "📂 Files & Settings"]) # --- World Builder Tab --- with tab_world: st.header("Shared 3D World") - st.caption("Place objects using the sidebar tools. Changes are shared live!") + st.caption("Place objects using the sidebar tools. Click objects to potentially delete (implement delete logic in index.html). Changes are shared live!") + + # Display current world status current_file_basename = st.session_state.get('current_world_file', None) if current_file_basename: - parsed = parse_world_filename(os.path.join(SAVED_WORLDS_DIR, current_file_basename)); st.info(f"Current World: **{parsed['name']}** (`{current_file_basename}`)") - else: st.info("Live State Active (Unsaved changes only persist if saved)") + try: parsed = parse_world_filename(os.path.join(SAVED_WORLDS_DIR, current_file_basename)); world_name = parsed['name'] + except Exception: world_name = "Unknown" + st.info(f"Current World: **{world_name}** (Loaded from: `{current_file_basename}`)") + else: st.info("🏗️ Live State Active (Unsaved changes only persist if saved)") - # Embed HTML Component + # --- Embed HTML/JS Component (index.html) --- html_file_path = 'index.html' + if not os.path.exists(html_file_path): + st.error(f"CRITICAL ERROR: Could not find '{html_file_path}'. The 3D view cannot be loaded.") + return # Stop rendering this tab if HTML is missing + try: with open(html_file_path, 'r', encoding='utf-8') as f: html_template = f.read() - ws_url = "ws://localhost:8765" # Default - try: # Get WS URL (Best effort) - from streamlit.web.server.server import Server - session_info = Server.get_current()._get_session_info(st.runtime.scriptrunner.get_script_run_ctx().session_id) - server_host = session_info.ws.stream.request.host.split(':')[0] - ws_url = f"ws://{server_host}:8765" - except Exception as e: print(f"WS URL detection failed ({e}), using localhost.") + # --- Determine WebSocket URL --- + ws_url = "ws://localhost:8765" # Default fallback + # Attempt to dynamically get the host the browser is connected to + try: + # This relies on internal Streamlit structure, might break in future versions + # Prefer getting host from JS if possible, but this is a server-side attempt + # session_info = st.runtime.get_instance().get_client(st.runtime.scriptrunner.get_script_run_ctx().session_id) + # server_host = session_info.request.host.split(':')[0] # Needs checking if this path is valid + + # More robust: Ask JS for its `window.location.hostname` + # This requires streamlit_js_eval to be ready, might have timing issues on first load. + # Let's stick to a configurable approach or rely on JS determining it. + # For local testing, localhost is fine. For deployment, need configuration. + # Using localhost as a placeholder, assuming server runs locally relative to client. + # PRODUCTION: Replace with actual external IP/domain or configure via env var. + server_host = os.getenv("WEBSOCKET_HOST", "localhost") # Allow override via env var + ws_port = os.getenv("WEBSOCKET_PORT", "8765") + ws_proto = "ws" # Use "wss" if using TLS/SSL + ws_url = f"{ws_proto}://{server_host}:{ws_port}" + print(f"Using WebSocket URL: {ws_url}") + + except Exception as e: print(f"WS URL detection failed ({e}), using default: {ws_url}") + + # --- Inject State into JavaScript --- + # This script block will be inserted into the of index.html js_injection_script = f"""""" + # Insert the script before the closing tag html_content_with_state = html_template.replace('', js_injection_script + '\n', 1) + + # --- Render the HTML Component --- components.html(html_content_with_state, height=700, scrolling=False) - except FileNotFoundError: st.error(f"CRITICAL ERROR: Could not find '{html_file_path}'.") - except Exception as e: st.error(f"Error loading 3D component: {e}"); st.exception(e) + + except Exception as e: st.error(f"Error loading or embedding the 3D component: {e}"); st.exception(e) # --- Chat Tab --- with tab_chat: st.header(f"{START_ROOM} Chat") - chat_history_task = run_async(load_chat_history) # Schedule load - chat_history = chat_history_task.result() if chat_history_task else st.session_state.chat_history # Get result if task ran sync, else use current state + # Load history async - needed if history is large, otherwise direct load is fine + # Use existing session state which gets updated by save_chat_entry + chat_history = st.session_state.get('chat_history', []) + + # Display chat history (most recent messages at the bottom) chat_container = st.container(height=500) with chat_container: - if chat_history: st.markdown("----\n".join(reversed(chat_history[-50:]))) - else: st.caption("No chat messages yet.") - - message_value = st.text_input("Your Message:", key="message_input", label_visibility="collapsed") - send_button_clicked = st.button("Send Chat 💬", key="send_chat_button") - should_autosend = st.session_state.get('autosend', False) and message_value - - if send_button_clicked or should_autosend: + if chat_history: + # Display messages, ensuring Markdown is handled + for entry in reversed(chat_history[-100:]): # Show last 100 messages + st.markdown(entry, unsafe_allow_html=True) # Allow basic HTML if needed, be cautious + else: st.caption("No chat messages yet. Say hello!") + + # --- Chat Input Form --- + # Use a form for better Enter key handling + with st.form(key="chat_form", clear_on_submit=True): + message_value = st.text_input("Your Message:", key="message_input_widget", label_visibility="collapsed", placeholder="Type your message...") + send_button_clicked = st.form_submit_button("Send Chat 💬") + + # Process message sending outside the form + if send_button_clicked and message_value.strip(): message_to_send = message_value - if message_to_send.strip() and message_to_send != st.session_state.get('last_message', ''): + # Avoid sending same message twice in quick succession + if message_to_send != st.session_state.get('last_message', ''): st.session_state.last_message = message_to_send - voice = FUN_USERNAMES.get(st.session_state.username, "en-US-AriaNeural") - ws_message = json.dumps({"type": "chat_message", "payload": {"username": st.session_state.username, "message": message_to_send, "voice": voice}}) - run_async(broadcast_message, ws_message) + voice = FUN_USERNAMES.get(st.session_state.username, EDGE_TTS_VOICES[0]) # Get current voice + + # 1. Create WebSocket message payload + ws_message_payload = { + "type": "chat_message", + "payload": {"username": st.session_state.username, "message": message_to_send, "voice": voice} + } + ws_message_json = json.dumps(ws_message_payload) + + # 2. Broadcast message to other clients + run_async(broadcast_message, ws_message_json) # Fire and forget broadcast + + # 3. Save entry locally (adds to session history, saves file, generates TTS) + # This also updates st.session_state.chat_history run_async(save_chat_entry, st.session_state.username, message_to_send, voice) - st.session_state.message_input = "" # Clear state for next run - st.rerun() + + # No need to rerun here, session state is updated, broadcast informs others. + # Streamlit should update the chat display on the *next* natural interaction or scheduled refresh. + # If immediate update is desired, could potentially use js_eval to add message client-side, + # but relying on session state update is usually sufficient. + # Force immediate redraw of chat (might feel sluggish): st.rerun() + elif send_button_clicked: st.toast("Message empty or same as last.") - st.checkbox("Autosend Chat", key="autosend") + # Autosend option removed for simplicity, form submit is clearer # --- PDF Tab --- with tab_pdf: - st.header("📚 PDF Tools") - pdf_file = st.file_uploader("Upload PDF for Audio Conversion", type="pdf", key="pdf_upload") - max_pages = st.slider('Max Pages to Process', 1, 50, 10, key="pdf_pages") - if pdf_file: - if st.button("Process PDF to Audio", key="process_pdf_button"): - with st.spinner("Processing PDF... This may take time."): - process_pdf_tab(pdf_file, max_pages, st.session_state.tts_voice) + st.header("📚 PDF Tools") + st.caption("Extract text from PDF pages and generate audio using Edge TTS.") + pdf_file = st.file_uploader("Upload PDF for Audio Conversion", type="pdf", key="pdf_upload_widget") + max_pages = st.slider('Max Pages to Process:', 1, 50, 10, key="pdf_pages_slider", help="Limits processing to the first N pages.") + + if pdf_file: + if st.button("Process PDF to Audio 🔊", key="process_pdf_button"): + # Show spinner during processing + with st.spinner(f"Processing '{pdf_file.name}'... This may take time."): + process_pdf_tab(pdf_file, max_pages, st.session_state.tts_voice) + st.caption("Click the button above to start processing the uploaded PDF.") + else: st.info("Upload a PDF file using the uploader above.") + # --- Files & Settings Tab --- with tab_files: - st.header("📂 Files & Settings") + st.header("📂 Files, Settings & Server") st.subheader("💾 World State Management") current_file_basename = st.session_state.get('current_world_file', None) - if current_file_basename: - parsed = parse_world_filename(os.path.join(SAVED_WORLDS_DIR, current_file_basename)) - save_label = f"Save Changes to '{parsed['name']}'" - if st.button(save_label, key="save_current_world", help=f"Overwrite '{current_file_basename}'"): - with st.spinner(f"Overwriting {current_file_basename}..."): - if save_world_state_to_md(current_file_basename): st.success("Current world saved!") - else: st.error("Failed to save world state.") - else: - st.info("Load a world from the sidebar to enable saving changes to it.") - - st.subheader("Save As New Version") - new_name_files = st.text_input("New World Name:", key="new_world_name_files", value=st.session_state.get('new_world_name', 'MyWorld')) - if st.button("💾 Save Live State as New Version", key="save_new_version_files"): - if new_name_files.strip(): - new_filename_base = generate_world_save_filename(new_name_files) - with st.spinner(f"Saving new version '{new_name_files}'..."): - if save_world_state_to_md(new_filename_base): - st.success(f"Saved as {new_filename_base}") - st.session_state.current_world_file = new_filename_base; st.session_state.new_world_name = "MyWorld"; st.rerun() - else: st.error("Failed to save new version.") - else: st.warning("Please enter a name.") + # --- Overwrite Save --- + save_col, save_as_col = st.columns(2) + with save_col: + # Enable overwrite only if a world is currently loaded + can_overwrite = current_file_basename is not None + if st.button("💾 Save Changes", key="save_current_world_button", disabled=not can_overwrite, help="Overwrite the currently loaded world file with the live state."): + if current_file_basename: # Should always be true if button is enabled + parsed = parse_world_filename(os.path.join(SAVED_WORLDS_DIR, current_file_basename)) + world_name_to_save = parsed.get('name', current_file_basename) + with st.spinner(f"Overwriting '{world_name_to_save}'..."): + if save_world_state_to_md(current_file_basename): st.success(f"World '{world_name_to_save}' saved successfully!") + else: st.error("Failed to save world state.") + else: st.warning("No world file loaded to save over.") # Should not happen if disabled logic works + elif not can_overwrite: st.caption("Load a world from the sidebar to enable saving changes to it.") + + # --- Save As New --- + with save_as_col: + st.text_input("New World Name:", key="new_world_name_input", value=st.session_state.get('new_world_name', 'MyWorld')) + if st.button("💾 Save Live State as New", key="save_new_version_button", help="Save the current live state as a new world file."): + new_name = st.session_state.new_world_name_input.strip() + if new_name: + new_filename_base = generate_world_save_filename(new_name) + with st.spinner(f"Saving new version '{new_name}'..."): + if save_world_state_to_md(new_filename_base): + st.success(f"Saved as new world: '{new_filename_base}'") + # Update state to reflect the newly saved file is now "loaded" + st.session_state.current_world_file = new_filename_base; + st.session_state.new_world_name = "MyWorld"; # Reset input for next time + st.rerun() # Rerun to update sidebar selection and file list + else: st.error("Failed to save new version.") + else: st.warning("Please enter a name for the new world version.") + st.markdown("---") st.subheader("⚙️ Server Status") + # Check server thread status more reliably + server_thread = st.session_state.get('server_task') + server_thread_alive = server_thread is not None and server_thread.is_alive() + # Use the running flag as primary indicator, as thread might be alive but server failed startup + server_running = st.session_state.get('server_running_flag', False) and server_thread_alive + col_ws, col_clients = st.columns(2) with col_ws: - server_alive = st.session_state.get('server_task') and st.session_state.server_task.is_alive(); ws_status = "Running" if server_alive else "Stopped"; st.metric("WebSocket Server", ws_status) - if not server_alive and st.button("Restart Server Thread", key="restart_ws"): start_websocket_server_thread(); st.rerun() - with col_clients: st.metric("Connected Clients", len(connected_clients)) + ws_status = "Running" if server_running else "Stopped" + st.metric("WebSocket Server", ws_status) + # Show start/stop button based on status + if not server_running: + if st.button("Start Server", key="start_ws_button"): start_websocket_server_thread(); time.sleep(1); st.rerun() # Allow time for thread start attempt + else: + if st.button("Stop Server", key="stop_ws_button", type="primary"): + if st.session_state.get('websocket_stop_event'): + print("Setting stop event for WebSocket server...") + st.session_state.websocket_stop_event.set() # Signal server loop to stop + st.session_state.websocket_stop_event = None # Clear event for potential restart + time.sleep(1) # Give server time to react + st.rerun() + else: st.warning("Stop event not found, cannot stop server gracefully.") + + with col_clients: + # Get client count safely from the global set + st.metric("Connected Clients", len(connected_clients)) - st.subheader("🗑️ Delete Files") + + st.markdown("---") + st.subheader("🗑️ Delete Generated Files") st.warning("Deletion is permanent!", icon="⚠️") + # Use columns for delete buttons col_del1, col_del2, col_del3, col_del4 = st.columns(4) with col_del1: - if st.button("🗑️ Chats", key="del_chat_md"): delete_files([os.path.join(CHAT_DIR, "*.md")]); st.session_state.chat_history = []; st.rerun() + if st.button("🗑️ Chats", key="del_chat_button"): delete_files([os.path.join(CHAT_DIR, "*.md")]); st.session_state.chat_history = []; st.rerun() with col_del2: - if st.button("🗑️ Audio", key="del_audio_mp3"): delete_files([os.path.join(AUDIO_DIR, "*.mp3"), os.path.join(AUDIO_CACHE_DIR, "*.mp3")]); st.session_state.audio_cache = {}; st.rerun() + if st.button("🗑️ Audio", key="del_audio_button"): delete_files([os.path.join(AUDIO_DIR, "*.mp3"), os.path.join(AUDIO_CACHE_DIR, "*.mp3"), os.path.join(AUDIO_CACHE_DIR, "metadata.json")]); st.session_state.audio_cache = {}; st.rerun() with col_del3: - if st.button("🗑️ Worlds", key="del_worlds_md"): delete_files([os.path.join(SAVED_WORLDS_DIR, f"{WORLD_STATE_FILE_MD_PREFIX}*.md")]); st.session_state.current_world_file = None; st.rerun() + # Be specific with world file pattern + if st.button("🗑️ Worlds", key="del_worlds_button"): delete_files([os.path.join(SAVED_WORLDS_DIR, f"{WORLD_STATE_FILE_MD_PREFIX}*.md")]); st.session_state.current_world_file = None; run_async(broadcast_world_update); st.rerun() # Clear current selection and notify clients with col_del4: - if st.button("🗑️ All Gen", key="del_all_gen"): delete_files([os.path.join(CHAT_DIR, "*.md"), os.path.join(AUDIO_DIR, "*.mp3"), os.path.join(AUDIO_CACHE_DIR, "*.mp3"), os.path.join(SAVED_WORLDS_DIR, "*.md"), "*.zip"]); st.session_state.chat_history = []; st.session_state.audio_cache = {}; st.session_state.current_world_file = None; st.rerun() + if st.button("🗑️ All Gen", key="del_all_gen_button", type="primary"): + patterns = [ + os.path.join(CHAT_DIR, "*.md"), + os.path.join(AUDIO_DIR, "*.mp3"), + os.path.join(AUDIO_CACHE_DIR, "*.mp3"), + os.path.join(AUDIO_CACHE_DIR, "metadata.json"), + os.path.join(SAVED_WORLDS_DIR, f"{WORLD_STATE_FILE_MD_PREFIX}*.md"), + os.path.join(MEDIA_DIR, "*.zip"), # Zip files in base dir + os.path.join(MEDIA_DIR, "*_pasted_*.png") # Pasted images in base dir + ] + delete_files(patterns) + # Reset relevant session state parts + st.session_state.chat_history = []; st.session_state.audio_cache = {}; st.session_state.current_world_file = None; + # Clear server world state and notify clients + with world_objects_lock: world_objects.clear() + run_async(broadcast_world_update) + st.rerun() + st.markdown("---") st.subheader("📦 Download Archives") + # Buttons to create Zip archives + col_zip1, col_zip2, col_zip3 = st.columns(3) + with col_zip1: + if st.button("Zip Worlds 📦"): + world_files_full_paths = [os.path.join(SAVED_WORLDS_DIR, os.path.basename(w['filename'])) for w in get_saved_worlds()] + create_zip_of_files(world_files_full_paths, "Worlds"); st.rerun() + with col_zip2: + if st.button("Zip Chats 📦"): create_zip_of_files(glob.glob(os.path.join(CHAT_DIR, "*.md")), "Chats"); st.rerun() + with col_zip3: + if st.button("Zip Audio 📦"): create_zip_of_files(glob.glob(os.path.join(AUDIO_DIR, "*.mp3")) + glob.glob(os.path.join(AUDIO_CACHE_DIR, "*.mp3")), "Audio"); st.rerun() + + # List existing Zip files for download zip_files = sorted(glob.glob(os.path.join(MEDIA_DIR,"*.zip")), key=os.path.getmtime, reverse=True) if zip_files: - col_zip1, col_zip2, col_zip3 = st.columns(3) - with col_zip1: - if st.button("Zip Worlds"): create_zip_of_files([w['filename'] for w in get_saved_worlds()], "Worlds") - with col_zip2: - if st.button("Zip Chats"): create_zip_of_files(glob.glob(os.path.join(CHAT_DIR, "*.md")), "Chats") - with col_zip3: - if st.button("Zip Audio"): create_zip_of_files(glob.glob(os.path.join(AUDIO_DIR, "*.mp3")) + glob.glob(os.path.join(AUDIO_CACHE_DIR, "*.mp3")), "Audio") - - st.caption("Existing Zip Files:") - for zip_file in zip_files: st.markdown(get_download_link(zip_file, "zip"), unsafe_allow_html=True) - else: - # Ensure correct indentation here for the else block - st.caption("No zip archives found.") + st.caption("Existing Zip Archives:") + for zip_file in zip_files: + # Display filename and download link + col_zip_name, col_zip_dl = st.columns([4,1]) + with col_zip_name: st.write(f"{os.path.basename(zip_file)}", unsafe_allow_html=True) + with col_zip_dl: st.markdown(get_download_link(zip_file, "zip"), unsafe_allow_html=True) + else: st.caption("No zip archives created yet.") # ============================================================================== @@ -883,34 +1446,56 @@ def render_main_content(): def initialize_world(): """Loads initial world state (most recent) if not already done for this session.""" + # Only run this once per Streamlit session if not st.session_state.get('initial_world_state_loaded', False): - print("Performing initial world load for session...") - saved_worlds = get_saved_worlds() - loaded_successfully = False + print("Performing initial world load check for session...") + saved_worlds = get_saved_worlds() # Get sorted list + loaded_file_basename = None + if saved_worlds: - latest_world_file_basename = os.path.basename(saved_worlds[0]['filename']) - print(f"Loading most recent world on startup: {latest_world_file_basename}") - if load_world_state_from_md(latest_world_file_basename): loaded_successfully = True - else: print("Failed to load most recent world, starting empty.") - else: print("No saved worlds found, starting with empty state.") - if not loaded_successfully: - with world_objects_lock: world_objects.clear() - st.session_state.current_world_file = None + # Load the most recent valid world file found + latest_world_file_basename = os.path.basename(saved_worlds[0]['filename']) + print(f"Attempting to load most recent world on startup: {latest_world_file_basename}") + # load_world_state_from_md updates world_objects and sets current_world_file on success + if load_world_state_from_md(latest_world_file_basename): + print(f"Successfully loaded initial world: {latest_world_file_basename}") + loaded_file_basename = latest_world_file_basename # Keep track + else: + print("Failed to load most recent world, starting empty.") + with world_objects_lock: world_objects.clear() # Ensure clean state on failure + st.session_state.current_world_file = None # Ensure no file is marked as loaded + else: + print("No saved worlds found, starting with empty state.") + with world_objects_lock: world_objects.clear() # Ensure clean state + st.session_state.current_world_file = None + + # Mark initial load as complete, regardless of success st.session_state.initial_world_state_loaded = True print("Initial world load process complete.") if __name__ == "__main__": - # 1. Initialize session state + # 1. Initialize session state (must be first) init_session_state() - # 2. Start WebSocket server thread if needed - server_thread = st.session_state.get('server_task'); server_alive = server_thread is not None and server_thread.is_alive() - if not st.session_state.get('server_running_flag', False) and not server_alive: start_websocket_server_thread() - elif server_alive and not st.session_state.get('server_running_flag', False): st.session_state.server_running_flag = True - - # 3. Load initial world state (once per session) + # 2. Start WebSocket server thread if not running + # Check both flag and thread aliveness for robustness + server_thread = st.session_state.get('server_task') + server_thread_alive = server_thread is not None and server_thread.is_alive() + if not st.session_state.get('server_running_flag', False) or not server_thread_alive: + print("Main: Server detected as not running or thread dead, attempting start...") + start_websocket_server_thread() + else: + print("Main: Server appears to be running.") + + # 3. Load initial world state (once per session, after server thread start attempt) initialize_world() - # 4. Render UI + # 4. Render UI components render_sidebar() - render_main_content() \ No newline at end of file + render_main_content() + + # 5. Optional: Auto-refresh mechanism (if needed, be cautious of performance) + if st.session_state.get('auto_refresh', False): + refresh_interval = st.session_state.get('refresh_rate', 30) + time.sleep(refresh_interval) + st.rerun() \ No newline at end of file