import gradio as gr import json import importlib import os import sys from pathlib import Path import concurrent.futures import multiprocessing import time import threading import queue import uuid import numpy as np from datetime import datetime from tqdm.auto import tqdm import redis import pickle from src.containerized_eval import eval_string_script # Add current directory and src directory to module search path current_dir = os.path.dirname(os.path.abspath(__file__)) src_dir = os.path.join(current_dir, "src") if current_dir not in sys.path: sys.path.append(current_dir) if src_dir not in sys.path: sys.path.append(src_dir) # Initialize Redis connection (will use environment variables in Hugging Face Space) REDIS_URL = os.environ.get('REDIS_URL', 'redis://localhost:6379/0') redis_client = redis.from_url(REDIS_URL) # Keys for Redis QUEUE_KEY = 'eval_task_queue' STATUS_KEY = 'eval_task_status' HISTORY_KEY = 'eval_task_history' TASK_TIMES_KEY = 'eval_task_times' # Local queue for worker threads local_task_queue = queue.Queue() # Lock for shared resources lock = threading.Lock() # Number of worker threads worker_threads = max(1, multiprocessing.cpu_count() // 2) # Using half the available cores for better stability # Flag for running background threads running = True def redis_queue_monitor(): """Monitor Redis queue and add tasks to local queue""" last_check = 0 while running: try: # Check Redis queue every second if time.time() - last_check >= 1: last_check = time.time() # Get all tasks in the queue task_list = redis_client.lrange(QUEUE_KEY, 0, -1) for task_data in task_list: task = pickle.loads(task_data) task_id = task['id'] # Check if task is already in processing status_data = redis_client.hget(STATUS_KEY, task_id) if status_data: status = pickle.loads(status_data) if status['status'] == 'queued': # Add to local queue if not already processing local_task_queue.put((task_id, task['input_data'], task['request_time'])) # Update status to processing with lock: status['status'] = 'processing' status['start_time'] = time.time() redis_client.hset(STATUS_KEY, task_id, pickle.dumps(status)) # Remove from Redis queue redis_client.lrem(QUEUE_KEY, 1, task_data) time.sleep(0.1) except Exception as e: print(f"Redis queue monitor error: {e}") time.sleep(1) def queue_processor(): """Process tasks in the local queue""" while running: try: task_id, input_data, request_time = local_task_queue.get(timeout=0.1) # Get current status status_data = redis_client.hget(STATUS_KEY, task_id) if status_data: task_status = pickle.loads(status_data) else: task_status = { 'status': 'processing', 'queued_time': request_time, 'start_time': time.time() } # Update status task_status['status'] = 'processing' task_status['start_time'] = time.time() redis_client.hset(STATUS_KEY, task_id, pickle.dumps(task_status)) if isinstance(input_data, list) and len(input_data) > 0: sample_task = input_data[0] language = sample_task.get('language', 'unknown') if isinstance(sample_task, dict) else 'unknown' task_size = len(input_data) task_complexity = _estimate_task_complexity(input_data) estimated_factors = { 'language': language, 'size': task_size, 'complexity': task_complexity } task_status['estimated_factors'] = estimated_factors redis_client.hset(STATUS_KEY, task_id, pickle.dumps(task_status)) result = evaluate(input_data) end_time = time.time() process_time = end_time - task_status['start_time'] # Update status task_status['status'] = 'completed' task_status['result'] = result task_status['end_time'] = end_time task_status['process_time'] = process_time redis_client.hset(STATUS_KEY, task_id, pickle.dumps(task_status)) # Update task type times if 'estimated_factors' in task_status: factors = task_status['estimated_factors'] key = f"{factors['language']}_{factors['complexity']}" # Update task times in Redis times_data = redis_client.hget(TASK_TIMES_KEY, key) if times_data: times = pickle.loads(times_data) else: times = [] times.append(process_time / factors['size']) if len(times) > 10: times = times[-10:] redis_client.hset(TASK_TIMES_KEY, key, pickle.dumps(times)) # Add to history history_item = { 'task_id': task_id, 'request_time': request_time, 'process_time': process_time, 'status': 'completed', 'factors': task_status.get('estimated_factors', {}) } # Get current history history_data = redis_client.get(HISTORY_KEY) if history_data: history = pickle.loads(history_data) else: history = [] history.append(history_item) while len(history) > 200: history.pop(0) redis_client.set(HISTORY_KEY, pickle.dumps(history)) local_task_queue.task_done() except queue.Empty: continue except Exception as e: if 'task_id' in locals(): status_data = redis_client.hget(STATUS_KEY, task_id) if status_data: task_status = pickle.loads(status_data) else: task_status = {} task_status['status'] = 'error' task_status['error'] = str(e) task_status['end_time'] = time.time() redis_client.hset(STATUS_KEY, task_id, pickle.dumps(task_status)) local_task_queue.task_done() def _estimate_task_complexity(tasks): """Estimate task complexity Returns: 'simple', 'medium', or 'complex' """ total_code_length = 0 count = 0 for task in tasks: if isinstance(task, dict): prompt = task.get('prompt', '') tests = task.get('tests', '') completions = task.get('processed_completions', []) code_length = len(prompt) + len(tests) if completions: code_length += sum(len(comp) for comp in completions) total_code_length += code_length count += 1 if count == 0: return 'medium' avg_length = total_code_length / count if avg_length < 1000: return 'simple' elif avg_length < 5000: return 'medium' else: return 'complex' def evaluate(input_data): """Main function for code evaluation""" try: if not isinstance(input_data, list): return {"status": "Exception", "error": "Input must be a list"} results = [] # Use a moderate number of workers for all language tests to ensure stability # This prevents resource contention regardless of language max_workers = max(1, min(multiprocessing.cpu_count() // 2, 4)) with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: future_to_item = {executor.submit(evaluate_single_case, item): item for item in input_data} for future in concurrent.futures.as_completed(future_to_item): item = future_to_item[future] try: result = future.result() item.update(result) results.append(item) except Exception as e: item.update({"status": "Exception", "error": str(e)}) results.append(item) return results except Exception as e: return {"status": "Exception", "error": str(e)} def evaluate_single_case(input_data): """Evaluate a single code case""" try: if not isinstance(input_data, dict): return {"status": "Exception", "error": "Input item must be a dictionary"} language = input_data.get('language') completions = input_data.get('processed_completions', []) if not completions: return {"status": "Exception", "error": "No code provided"} # Use a retry mechanism for all languages for better reliability max_retries = 2 # One retry for all languages results = [] for comp in completions: code = input_data.get('prompt') + comp + '\n' + input_data.get('tests') # Try up to max_retries + 1 times for all test cases for attempt in range(max_retries + 1): result = evaluate_code(code, language) # If success or last attempt, return/record the result if result["status"] == "OK" or attempt == max_retries: if result["status"] == "OK": return result results.append(result) break # For retries, briefly wait to allow resources to stabilize time.sleep(0.3) return results[0] except Exception as e: return {"status": "Exception", "error": str(e)} def evaluate_code(code, language): """Evaluate code in a specific language""" try: result = eval_string_script(language, code) return result except Exception as e: return {"status": "Exception", "error": str(e)} def synchronous_evaluate(input_data): """Synchronously evaluate code, compatible with original interface""" if isinstance(input_data, list) and len(input_data) > 0: sample_task = input_data[0] language = sample_task.get('language', 'unknown') if isinstance(sample_task, dict) else 'unknown' task_size = len(input_data) task_complexity = _estimate_task_complexity(input_data) else: language = 'unknown' task_size = 1 task_complexity = 'medium' estimated_time_per_task = _get_estimated_time_for_task(language, task_complexity) estimated_total_time = estimated_time_per_task * task_size queue_info = get_queue_status() waiting_tasks = queue_info['waiting_tasks'] task_id = str(uuid.uuid4()) request_time = time.time() task_status = { 'status': 'queued', 'queued_time': request_time, 'queue_position': queue_info['queue_size'] + 1, 'synchronous': True, 'estimated_factors': { 'language': language, 'size': task_size, 'complexity': task_complexity }, 'estimated_time': estimated_total_time } redis_client.hset(STATUS_KEY, task_id, pickle.dumps(task_status)) # Add to queue task = { 'id': task_id, 'input_data': input_data, 'request_time': request_time } redis_client.rpush(QUEUE_KEY, pickle.dumps(task)) while True: status_data = redis_client.hget(STATUS_KEY, task_id) if status_data: status_info = pickle.loads(status_data) if status_info['status'] == 'completed': result = status_info.get('result', {"status": "Exception", "error": "No result found"}) redis_client.hdel(STATUS_KEY, task_id) return result elif status_info['status'] == 'error': error = status_info.get('error', 'Unknown error') redis_client.hdel(STATUS_KEY, task_id) return {"status": "Exception", "error": error} time.sleep(0.1) def _get_estimated_time_for_task(language, complexity): """Get estimated processing time for a specific task type""" key = f"{language}_{complexity}" times_data = redis_client.hget(TASK_TIMES_KEY, key) if times_data: times = pickle.loads(times_data) if times: return np.median(times) if complexity == 'simple': return 1.0 elif complexity == 'medium': return 3.0 else: # complex return 8.0 def enqueue_task(input_data): """Add task to queue""" if isinstance(input_data, list) and len(input_data) > 0: sample_task = input_data[0] language = sample_task.get('language', 'unknown') if isinstance(sample_task, dict) else 'unknown' task_size = len(input_data) task_complexity = _estimate_task_complexity(input_data) else: language = 'unknown' task_size = 1 task_complexity = 'medium' estimated_time_per_task = _get_estimated_time_for_task(language, task_complexity) estimated_total_time = estimated_time_per_task * task_size task_id = str(uuid.uuid4()) request_time = time.time() queue_info = get_queue_status() task_status = { 'status': 'queued', 'queued_time': request_time, 'queue_position': queue_info['queue_size'] + 1, 'estimated_factors': { 'language': language, 'size': task_size, 'complexity': task_complexity }, 'estimated_time': estimated_total_time } redis_client.hset(STATUS_KEY, task_id, pickle.dumps(task_status)) # Add to queue task = { 'id': task_id, 'input_data': input_data, 'request_time': request_time } redis_client.rpush(QUEUE_KEY, pickle.dumps(task)) est_wait = queue_info['estimated_wait'] return { 'task_id': task_id, 'status': 'queued', 'queue_position': task_status['queue_position'], 'estimated_wait': est_wait, 'estimated_processing': estimated_total_time } def check_status(task_id): """Check task status""" status_data = redis_client.hget(STATUS_KEY, task_id) if not status_data: return {'status': 'not_found'} status_info = pickle.loads(status_data) if status_info['status'] in ['completed', 'error'] and time.time() - status_info.get('end_time', 0) > 3600: redis_client.hdel(STATUS_KEY, task_id) return status_info def get_queue_status(): """Get queue status""" # Get all task statuses all_statuses = redis_client.hgetall(STATUS_KEY) queued_tasks = [] processing_tasks = [] for task_id, status_data in all_statuses.items(): status_info = pickle.loads(status_data) if status_info['status'] == 'queued': queued_tasks.append(status_info) elif status_info['status'] == 'processing': processing_tasks.append(status_info) queue_size = redis_client.llen(QUEUE_KEY) active_tasks = len(processing_tasks) waiting_tasks = len(queued_tasks) remaining_processing_time = 0 for task in processing_tasks: if 'start_time' in task and 'estimated_time' in task: elapsed = time.time() - task['start_time'] remaining = max(0, task['estimated_time'] - elapsed) remaining_processing_time += remaining else: remaining_processing_time += 2 if active_tasks > 0: remaining_processing_time = remaining_processing_time / min(active_tasks, worker_threads) queued_processing_time = 0 for task in queued_tasks: if 'estimated_time' in task: queued_processing_time += task['estimated_time'] else: queued_processing_time += 5 if worker_threads > 0 and queued_processing_time > 0: queued_processing_time = queued_processing_time / worker_threads estimated_wait = remaining_processing_time + queued_processing_time # Get task history history_data = redis_client.get(HISTORY_KEY) if history_data: task_history = pickle.loads(history_data) else: task_history = [] if task_history: prediction_ratios = [] for task in task_history: if 'factors' in task and 'estimated_time' in task: prediction_ratios.append(task['process_time'] / task['estimated_time']) if prediction_ratios: correction_factor = np.median(prediction_ratios) correction_factor = max(0.5, min(2.0, correction_factor)) estimated_wait *= correction_factor estimated_wait = max(0.1, estimated_wait) if waiting_tasks == 0 and active_tasks == 0: estimated_wait = 0 recent_tasks = task_history[-5:] if task_history else [] return { 'queue_size': queue_size, 'active_tasks': active_tasks, 'waiting_tasks': waiting_tasks, 'worker_threads': worker_threads, 'estimated_wait': estimated_wait, 'recent_tasks': recent_tasks } def format_time(seconds): """Format time into readable format""" if seconds < 60: return f"{seconds:.1f} seconds" elif seconds < 3600: minutes = int(seconds / 60) seconds = seconds % 60 return f"{minutes}m {seconds:.1f}s" else: hours = int(seconds / 3600) minutes = int((seconds % 3600) / 60) return f"{hours}h {minutes}m" def ui_get_queue_info(): """Get queue info for UI""" queue_info = get_queue_status() tasks_html = "" for task in reversed(queue_info['recent_tasks']): tasks_html += f"""
Current Estimated Wait Time: {format_time(queue_info['estimated_wait'])}
Last update: {datetime.now().strftime('%H:%M:%S')}
Task ID | Request Time | Processing Time |
---|