"""
WORKING AUDIO WORKER - No RQ cleanup bug
FIXED VERSION - Redis connection scope issue resolved
"""
import os
import sys
import time
import redis
import json
from datetime import datetime
import signal
import threading

project_root = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, project_root)
sys.path.insert(0, os.path.join(project_root, '..'))


script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
print(f"📁 Working directory set to: {os.getcwd()}")


print("=" * 60)
print("🎵 WORKING AUDIO WORKER - FIXED VERSION")
print("=" * 60)

# Add project root
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, current_dir)

# Global Redis connection - FIX: Make it global or pass properly
redis_connection = None

def get_redis_connection():
    """Get or create Redis connection"""
    global redis_connection
    if redis_connection is None:
        try:
            redis_connection = redis.Redis(host='localhost', port=6379, db=0)
            redis_connection.ping()
            print("✅ Redis connected")
        except Exception as e:
            print(f"❌ Redis error: {e}")
            print("💡 Make sure Redis is running: redis-server")
            return None
    return redis_connection

def reconnect_redis():
    """Reconnect to Redis"""
    global redis_connection
    print("🔄 Reconnecting to Redis...")
    try:
        redis_connection = redis.Redis(host='localhost', port=6379, db=0)
        redis_connection.ping()
        print("✅ Redis reconnected")
        return redis_connection
    except Exception as e:
        print(f"❌ Redis reconnection failed: {e}")
        return None

# Initial connection
r = get_redis_connection()
if r is None:
    sys.exit(1)

# QUEUE SETTINGS
QUEUE_NAME = 'audio_queue_working'  # Fresh queue name
JOB_PREFIX = 'audio_job:'  # Job storage prefix

print(f"📦 Queue: {QUEUE_NAME}")
print(f"🔑 Job prefix: {JOB_PREFIX}")
print("-" * 60)

def add_job_to_queue(hashid, data):
    """Add a job to queue (for testing)"""
    r = get_redis_connection()
    if r is None:
        return None
    
    job_id = f"audio_{hashid}_{int(time.time())}"
    
    job_data = {
        'job_id': job_id,
        'hashid': hashid,
        'data': data,
        'status': 'queued',
        'created': datetime.now().isoformat()
    }
    
    # Store job
    r.setex(f"{JOB_PREFIX}{job_id}", 86400, json.dumps(job_data))
    
    # Add to queue
    r.rpush(QUEUE_NAME, job_id)
    
    print(f"➕ Added job: {job_id}")
    return job_id


def process_job(job_id):
    """Process a single audio job"""
    r = get_redis_connection()
    if r is None:
        return {'status': 'failed', 'error': 'Redis not connected'}
    
    print(f"\n🎵 [{datetime.now().strftime('%H:%M:%S')}] Processing: {job_id}")
    
    # Get job data
    job_key = f"{JOB_PREFIX}{job_id}"
    job_data = r.get(job_key)
    
    if not job_data:
        print(f"❌ No data for job: {job_id}")
        return {'status': 'failed', 'error': 'Job data missing'}
    
    job = json.loads(job_data)
    
    # Update status to processing
    job['status'] = 'processing'
    job['started_at'] = datetime.now().isoformat()
    r.setex(job_key, 86400, json.dumps(job))
    
    try:
        # Import service
        print("📦 Importing ContentCreationService...")
        from services.content_creation_service import ContentCreationService
        import os
        
        # Get the project structure
        # working_worker.py is in user_journey_service/
        # output/ is outside user_journey_service/ (one level up)
        script_dir = os.path.dirname(os.path.abspath(__file__))  # user_journey_service/
        parent_dir = os.path.dirname(script_dir)  # Go up one level
        output_dir = os.path.join(parent_dir, "output")
                
        # Create input object
        class SimpleInput:
            def __init__(self, data):
                for k, v in data.items():
                    setattr(self, k, v)
                # Ensure hashid is set
                self.hashid = job['hashid']
            def dict(self):
                return self.__dict__
        
        # Create service
        input_obj = SimpleInput(job['data'])
        print(f"Input object created with hashid: {input_obj.hashid}")
        
        service = ContentCreationService(input_obj)
        print("service object created")
        
        # DEBUG: Check what path service is using
        print(f"🔍 Service output_file path: {service.output_file}")
        
        # Check if the file exists in the correct location (outside user_journey_service)
        hashid = job['hashid']
        correct_output_path = os.path.join(parent_dir, "output", f"{hashid}.md")
        
        print(f"🔍 Expected file at: {correct_output_path}")
        print(f"🔍 File exists: {os.path.exists(correct_output_path)}")
        
        if not os.path.exists(correct_output_path):
            error = f"Content file not found: {correct_output_path}"
            print(f"❌ {error}")
            
            # Try alternative paths
            alternative_paths = [
                os.path.join(script_dir, "output", f"{hashid}.md"),  # Inside user_journey_service
                os.path.join("..", "output", f"{hashid}.md"),  # Relative path
                os.path.join(parent_dir, "user_journey_service", "output", f"{hashid}.md"),
            ]
            
            for alt_path in alternative_paths:
                abs_alt = os.path.abspath(alt_path)
                if os.path.exists(abs_alt):
                    print(f"✅ Found file at alternative location: {abs_alt}")
                    correct_output_path = abs_alt
                    break
            
            if not os.path.exists(correct_output_path):
                raise FileNotFoundError(error)
        
        # IMPORTANT: Update the service's output_file path to the correct location
        service.output_file = correct_output_path
        print(f"✅ Using output file: {service.output_file}")
        
        # Check if content exists (final check)
        if not os.path.exists(service.output_file):
            error = f"Content file not found: {service.output_file}"
            print(f"❌ {error}")
            raise FileNotFoundError(error)
        
        # Run audio creation
        print(f"🔊 Creating audio for {job['hashid']}...")
        result = service.run_audio_creation()
        
        # Update job status
        job['status'] = 'completed'
        job['result'] = result
        job['completed_at'] = datetime.now().isoformat()
        job['processing_time'] = time.time() - datetime.fromisoformat(job['started_at']).timestamp()
        
        r.setex(job_key, 86400, json.dumps(job))
        print(f"✅ Completed: {job_id}")
        
        return result
        
    except Exception as e:
        error_msg = f"{type(e).__name__}: {str(e)}"
        print(f"❌ Failed: {error_msg}")
        
        # Import traceback for debugging
        import traceback
        traceback.print_exc()
        
        # Update as failed
        job['status'] = 'failed'
        job['error'] = error_msg
        job['failed_at'] = datetime.now().isoformat()
        
        r.setex(job_key, 86400, json.dumps(job))
        
        return {'status': 'failed', 'error': error_msg}
    

def worker_loop():
    """Main worker loop - runs forever"""
    print("\n👷 Worker started. Press Ctrl+C to stop.")
    print("-" * 60)
    
    processed_count = 0
    failed_count = 0
    
    # Handle graceful shutdown
    stop_event = threading.Event()
    
    def signal_handler(signum, frame):
        print(f"\n🛑 Received shutdown signal")
        stop_event.set()
    
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    while not stop_event.is_set():
        try:
            # Get Redis connection for this iteration
            r = get_redis_connection()
            if r is None:
                print("❌ Redis not available, waiting...")
                time.sleep(5)
                continue
            
            # Get job from queue with timeout
            job_id = r.blpop(QUEUE_NAME, timeout=5)
            
            if job_id:
                job_id = job_id[1].decode('utf-8')
                result = process_job(job_id)
                
                if result and result.get('status') == 'completed':
                    processed_count += 1
                else:
                    failed_count += 1
                
                # Show stats
                print(f"📊 Stats: {processed_count} ✅ | {failed_count} ❌")
                
            else:
                # No jobs, show status
                if processed_count == 0 and failed_count == 0:
                    print("⏳ Waiting for jobs...", end='\r')
                else:
                    print(f"🔄 Idle... Processed: {processed_count}, Failed: {failed_count}", end='\r')
                
        except redis.exceptions.ConnectionError:
            print("❌ Redis connection lost. Reconnecting...")
            time.sleep(5)
            reconnect_redis()
            
        except Exception as e:
            print(f"\n⚠️ Worker error: {e}")
            import traceback
            traceback.print_exc()
            time.sleep(10)
    
    print(f"\n📊 Final stats: {processed_count} processed, {failed_count} failed")
    print("👋 Worker stopped gracefully")

def start_worker():
    """Start the worker with monitoring"""
    print("🚀 Starting audio worker...")
    
    # Check if worker already running
    r = get_redis_connection()
    if r is None:
        print("❌ Cannot start worker - Redis not available")
        return
    
    worker_key = 'worker:audio:status'
    worker_id = f"worker_{os.getpid()}_{int(time.time())}"
    
    # Register worker
    r.setex(worker_key, 300, json.dumps({
        'id': worker_id,
        'pid': os.getpid(),
        'started': datetime.now().isoformat(),
        'status': 'running'
    }))
    
    # Start heartbeat thread
    def heartbeat():
        while True:
            r_local = get_redis_connection()
            if r_local:
                r_local.setex(worker_key, 300, json.dumps({
                    'id': worker_id,
                    'pid': os.getpid(),
                    'last_heartbeat': datetime.now().isoformat(),
                    'status': 'running'
                }))
            time.sleep(60)
    
    heartbeat_thread = threading.Thread(target=heartbeat, daemon=True)
    heartbeat_thread.start()
    
    # Start worker loop
    worker_loop()


if __name__ == "__main__":
    start_worker()