# user_journey_service/processors/duration_estimator.py
import os
import yaml
from pathlib import Path
from crewai import LLM


# Define prompts path (moved outside class for clarity)
prompts_path = Path(__file__).parent.parent / "config" / "prompts.yaml"

class MicrolearningDurationEstimator:
    def __init__(self):
        # INITIALIZE PROMPTS FIRST - THIS WAS MISSING!
        self._load_prompts()
        
        print(f"DEBUG: Prompts loaded, keys: {list(self.prompts.keys())}")
        
        # Get API key directly from environment (not from settings)
        api_key = os.getenv("OPENAI_API_KEY")
        
        print(f"DEBUG: API Key from os.getenv: {api_key[:20]}..." if api_key else "DEBUG: API Key is None")
        
        if not api_key:
            # Try to get from settings as fallback
            try:
                from user_journey_service.core.config import settings
                api_key = settings.OPENAI_API_KEY
                print(f"DEBUG: API Key from settings: {api_key[:20]}..." if api_key else "DEBUG: API Key from settings is None")
            except ImportError:
                pass
        
        if not api_key:
            raise ValueError("OPENAI_API_KEY is required")
        
        # Get other settings with defaults
        provider = os.getenv("PROVIDER", "openai")
        model = os.getenv("LLM1", "gpt-3.5-turbo")
        
        if provider == "openai":
            self.llm = LLM(
                model=f"openai/{model}",
                temperature=float(os.getenv("TEMPERATURE", "0.7")),
                api_key=api_key,
                seed=int(os.getenv("SEED", "42")),
                timeout=int(os.getenv("TIMEOUT", "6000"))
            )
            print("✓ LLM initialized with OpenAI")
            
        elif provider == "ollama":
            self.llm = LLM(
                model=f"ollama/{model}",
                temperature=float(os.getenv("TEMPERATURE", "0.7")),
                base_url=os.getenv("OLLAMA_URL", "http://localhost:11434"),
                seed=int(os.getenv("SEED", "42")),
                timeout=int(os.getenv("TIMEOUT", "6000"))
            )
            print("✓ LLM initialized with Ollama")
            
        else:
            # Fallback
            self.llm = LLM(
                model="openai/gpt-3.5-turbo",
                temperature=0.7,
                api_key=api_key,
                seed=42,
                timeout=30
            )
            print("⚠️ LLM initialized with fallback settings")

    def _load_prompts(self):
        """Load prompts from YAML file or use defaults"""
        try:
            if prompts_path.exists():
                with open(prompts_path, "r") as f:
                    self.prompts = yaml.safe_load(f)
                print(f"✓ Loaded prompts from {prompts_path}")
            else:
                print(f"⚠️ Prompts file not found at {prompts_path}, using defaults")
                self.prompts = {
                    "duration_estimator": """
                    Estimate the appropriate learning duration for the following user based on their profile and the topic.
                    
                    User Profile:
                    - Job Title: {Job_Title}
                    - Experience: {Experience}
                    - Skills: {Skills}
                    - Learning Level: {Level}
                    
                    Topic: {topic}
                    
                    Consider:
                    - Beginner level with 1-3 years experience: 30-45 minutes
                    - Intermediate level with 3-7 years experience: 45-60 minutes  
                    - Advanced level with 7+ years experience: 60-90 minutes
                    
                    Return only the estimated duration in minutes (e.g., "60 minutes").
                    """
                }
        except Exception as e:
            print(f"⚠️ Error loading prompts: {e}, using defaults")
            self.prompts = {
                "duration_estimator": """
                Estimate learning duration for {topic} for a {Job_Title} with {Experience} experience and {Level} level.
                Skills: {Skills}
                Return duration in minutes.
                """
            }
        
    def estimate_duration(self, input_data):
        """Estimate duration for microlearning"""
        try:
            print(f"DEBUG: In estimate_duration, prompts exists: {hasattr(self, 'prompts')}")
            print(f"DEBUG: Prompts keys: {list(self.prompts.keys()) if hasattr(self, 'prompts') else 'No prompts'}")
            
            # Get prompt template
            prompt_template = self.prompts.get("duration_estimator", 
                "Estimate duration for {topic} at {Level} level. Return minutes.")
            
            prompt = prompt_template.format(
                Job_Title=input_data.Job_Title,
                Experience=input_data.Experience,
                Skills=input_data.Skills,
                topic=input_data.topic,
                Level=input_data.Level
            )
            
            print(f"DEBUG: Prompt prepared: {prompt[:100]}...")

            messages = [
                {
                    'role': 'user',
                    'content': prompt,
                },
            ]

            response = self.llm.call(messages)
            print(f"DEBUG: LLM response: {response}")
            
            return response.strip()
            
        except Exception as e:
            print(f"Error in estimate_duration: {e}")
            import traceback
            traceback.print_exc()
            return "60 minutes"  # Default fallback