"""
UNIFIED QUEUE WORKER - Handles both content creation and audio creation jobs
Jobs are processed in order: content jobs first, then audio jobs for same hashid
"""
import os
import sys
import time
import redis
import json
from datetime import datetime
import signal
import threading

# Add project root to path
project_root = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, project_root)
sys.path.insert(0, os.path.join(project_root, '..'))

# Set working directory
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
print(f"📁 Working directory set to: {os.getcwd()}")

print("=" * 60)
print("🎵 UNIFIED QUEUE WORKER - Handles Content + Audio")
print("=" * 60)

# Global Redis connection
redis_connection = None

def get_redis_connection():
    """Get or create Redis connection"""
    global redis_connection
    if redis_connection is None:
        try:
            redis_connection = redis.Redis(host='localhost', port=6379, db=0)
            redis_connection.ping()
            print("✅ Redis connected")
        except Exception as e:
            print(f"❌ Redis error: {e}")
            print("💡 Make sure Redis is running: redis-server")
            return None
    return redis_connection

def reconnect_redis():
    """Reconnect to Redis"""
    global redis_connection
    print("🔄 Reconnecting to Redis...")
    try:
        redis_connection = redis.Redis(host='localhost', port=6379, db=0)
        redis_connection.ping()
        print("✅ Redis reconnected")
        return redis_connection
    except Exception as e:
        print(f"❌ Redis reconnection failed: {e}")
        return None

# Initial connection
r = get_redis_connection()
if r is None:
    sys.exit(1)

# QUEUE SETTINGS
QUEUE_NAME = 'content_audio_queue'  # Unified queue name
JOB_PREFIX = 'job:'  # Job storage prefix

print(f"📦 Queue: {QUEUE_NAME}")
print(f"🔑 Job prefix: {JOB_PREFIX}")
print("-" * 60)





def process_content_job(job_id, job):
    """Process a content creation job"""
    print(f"\n📝 [{datetime.now().strftime('%H:%M:%S')}] Processing CONTENT job: {job_id}")
    
    try:
        # Import service
        print("📦 Importing ContentCreationService...")
        from services.content_creation_service import ContentCreationService
        
        # Get the project structure
        #script_dir = os.path.dirname(os.path.abspath(__file__))
        #parent_dir = os.path.dirname(script_dir)
        
        script_dir = os.path.dirname(os.path.abspath(__file__))  # user_journey_service/
        project_root = os.path.dirname(script_dir)  # Go up one level to project root
        print(f"📁 Project root: {project_root}")

        # Create input object
        class SimpleInput:
            def __init__(self, data, project_root):
                self._project_root = project_root
                for k, v in data.items():
                    setattr(self, k, v)
                self.hashid = job['hashid']
            def dict(self):
                return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
        
        # Create service
        input_obj = SimpleInput(job['data'], project_root)
        print(f"Input object created with hashid: {input_obj.hashid}")

        
        service = ContentCreationService(input_obj)
        hashid = job['hashid']
        # Fix output file path
        service.output_file = os.path.join(project_root, "output", f"{hashid}.md")
        print(f"✅ Using output file: {service.output_file}")
        
        # Run content creation
        print(f"📝 Creating content for {job['hashid']}...")
        result = service.run_content_creation_only()  # Call the new method
        
        return {'status': 'completed', 'result': result}
        
    except Exception as e:
        error_msg = f"{type(e).__name__}: {str(e)}"
        print(f"❌ Content job failed: {error_msg}")
        import traceback
        traceback.print_exc()
        return {'status': 'failed', 'error': error_msg}

def process_audio_job(job_id, job):
    """Process an audio creation job"""
    print(f"\n🔊 [{datetime.now().strftime('%H:%M:%S')}] Processing AUDIO job: {job_id}")
    
    try:
        # Import service
        print("📦 Importing ContentCreationService...")
        from services.content_creation_service import ContentCreationService
        
        # Get the project structure
        
        script_dir = os.path.dirname(os.path.abspath(__file__))  # user_journey_service/
        project_root = os.path.dirname(script_dir)  # Go up one level to project root
        print(f"📁 Project root: {project_root}")

        
        # Create input object
        class SimpleInput:
            def __init__(self, data, project_root):
                self._project_root = project_root
                for k, v in data.items():
                    setattr(self, k, v)
                self.hashid = job['hashid']
            def dict(self):
                return {k: v for k, v in self.__dict__.items() if not k.startswith('_')}
        
        # Create service
        input_obj = SimpleInput(job['data'],project_root)
        print(f"Input object created with hashid: {input_obj.hashid}")
        
        service = ContentCreationService(input_obj)
        hashid = job['hashid']
        # Fix output file path
        service.output_file = os.path.join(project_root, "output", f"{hashid}.md")


        print(f"✅ Using output file: {service.output_file}")
        
        # Check if content exists before creating audio
        if not os.path.exists(service.output_file):
            error = f"Content file not found: {service.output_file}. Audio creation requires content to exist."
            print(f"❌ {error}")
            return {'status': 'failed', 'error': error}
        
        # Run audio creation
        print(f"🔊 Creating audio for {job['hashid']}...")
        result = service.run_audio_creation_only()  # Call the new method
        
        return {'status': 'completed', 'result': result}
        
    except Exception as e:
        error_msg = f"{type(e).__name__}: {str(e)}"
        print(f"❌ Audio job failed: {error_msg}")
        import traceback
        traceback.print_exc()
        return {'status': 'failed', 'error': error_msg}

def process_job(job_id):
    """Process a job based on its type"""
    r = get_redis_connection()
    if r is None:
        return {'status': 'failed', 'error': 'Redis not connected'}
    
    # Get job data
    job_key = f"{JOB_PREFIX}{job_id}"
    job_data = r.get(job_key)
    
    if not job_data:
        print(f"❌ No data for job: {job_id}")
        return {'status': 'failed', 'error': 'Job data missing'}
    
    job = json.loads(job_data)
    
    # Update status to processing
    job['status'] = 'processing'
    job['started_at'] = datetime.now().isoformat()
    r.setex(job_key, 86400, json.dumps(job))
    
    # Process based on job type
    job_type = job.get('job_type', 'unknown')
    
    if job_type == 'content_creation':
        result = process_content_job(job_id, job)
    elif job_type == 'audio_creation':
        result = process_audio_job(job_id, job)
    else:
        print(f"❌ Unknown job type: {job_type}")
        result = {'status': 'failed', 'error': f'Unknown job type: {job_type}'}
    
    # Update job status
    if result['status'] == 'completed':
        job['status'] = 'completed'
        job['result'] = result.get('result')
        job['completed_at'] = datetime.now().isoformat()
        job['processing_time'] = time.time() - datetime.fromisoformat(job['started_at']).timestamp()
        print(f"✅ Completed {job_type} job: {job_id}")
    else:
        job['status'] = 'failed'
        job['error'] = result.get('error', 'Unknown error')
        job['failed_at'] = datetime.now().isoformat()
        print(f"❌ Failed {job_type} job: {job_id}")
    
    r.setex(job_key, 86400, json.dumps(job))
    return result

def worker_loop():
    """Main worker loop - runs forever"""
    print("\n👷 Unified Worker started. Press Ctrl+C to stop.")
    print("-" * 60)
    
    processed_count = 0
    failed_count = 0
    content_count = 0
    audio_count = 0
    
    # Handle graceful shutdown
    stop_event = threading.Event()
    
    def signal_handler(signum, frame):
        print(f"\n🛑 Received shutdown signal")
        stop_event.set()
    
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)
    
    while not stop_event.is_set():
        try:
            # Get Redis connection for this iteration
            r = get_redis_connection()
            if r is None:
                print("❌ Redis not available, waiting...")
                time.sleep(5)
                continue
            
            # Get job from queue with timeout
            job_id = r.blpop(QUEUE_NAME, timeout=5)
            
            if job_id:
                job_id = job_id[1].decode('utf-8')
                
                # Get job type for counting
                job_key = f"{JOB_PREFIX}{job_id}"
                job_data = r.get(job_key)
                if job_data:
                    job = json.loads(job_data)
                    job_type = job.get('job_type', 'unknown')
                    
                    result = process_job(job_id)
                    
                    if result and result.get('status') == 'completed':
                        processed_count += 1
                        if job_type == 'content_creation':
                            content_count += 1
                        elif job_type == 'audio_creation':
                            audio_count += 1
                    else:
                        failed_count += 1
                    
                    # Show stats
                    print(f"📊 Stats: {processed_count} ✅ ({content_count} content, {audio_count} audio) | {failed_count} ❌")
                
            else:
                # No jobs, show status
                if processed_count == 0 and failed_count == 0:
                    print("⏳ Waiting for jobs...", end='\r')
                else:
                    print(f"🔄 Idle... Processed: {processed_count} (C:{content_count}, A:{audio_count}), Failed: {failed_count}", end='\r')
                
        except redis.exceptions.ConnectionError:
            print("❌ Redis connection lost. Reconnecting...")
            time.sleep(5)
            reconnect_redis()
            
        except Exception as e:
            print(f"\n⚠️ Worker error: {e}")
            import traceback
            traceback.print_exc()
            time.sleep(10)
    
    print(f"\n📊 Final stats: {processed_count} processed (C:{content_count}, A:{audio_count}), {failed_count} failed")
    print("👋 Worker stopped gracefully")

def start_worker():
    """Start the worker with monitoring"""
    print("🚀 Starting unified queue worker...")
    
    # Check if worker already running
    r = get_redis_connection()
    if r is None:
        print("❌ Cannot start worker - Redis not available")
        return
    
    worker_key = 'worker:unified:status'
    worker_id = f"worker_{os.getpid()}_{int(time.time())}"
    
    # Register worker
    r.setex(worker_key, 300, json.dumps({
        'id': worker_id,
        'pid': os.getpid(),
        'started': datetime.now().isoformat(),
        'status': 'running'
    }))
    
    # Start heartbeat thread
    def heartbeat():
        while True:
            r_local = get_redis_connection()
            if r_local:
                r_local.setex(worker_key, 300, json.dumps({
                    'id': worker_id,
                    'pid': os.getpid(),
                    'last_heartbeat': datetime.now().isoformat(),
                    'status': 'running'
                }))
            time.sleep(60)
    
    heartbeat_thread = threading.Thread(target=heartbeat, daemon=True)
    heartbeat_thread.start()
    
    # Start worker loop
    worker_loop()

if __name__ == "__main__":
    start_worker()