from crewai import LLM
from user_journey_service.core.config import settings
import yaml
from pathlib import Path
prompts_path = 'user_journey_service/config/prompts.yaml'

    
# # from tools.custom_topic_validator_tool import TopicValidationTool
# from user_journey_service.tools.custom_topic_validator_tool import TopicValidationTool
# class MicrolearningTopicValidator:
#     def __init__(self):
#         self.tool = TopicValidationTool()

#     def validate_topic(self, input_data):
#         return self.tool._run(topic=input_data.topic)
            
    
    
    
class MicrolearningTopicValidator:    
    def __init__(self):
        self.llm = LLM(
          model=f"{settings.PROVIDER}/{settings.LLM1}",
          temperature=settings.TEMPERATURE,
          api_key=settings.OPENAI_KEY1,
          api_base=settings.ENDPOINT1,
          api_version=settings.API_VERSION1,
          seed=settings.SEED,                # For reproducible results
          timeout=settings.TIMEOUT
	    )
    #     self.llm = LLM(
    #       model="ollama/llama3.1:latest",  
    #       temperature=settings.TEMPERATURE,
    #       base_url="http://localhost:11434",
    #       seed=settings.SEED,                # For reproducible results
    #       timeout=settings.TIMEOUT
	# )
        self.prompts = self.load_prompts()
       
    def load_prompts(self):
        with open(Path(prompts_path), "r") as f:
            return yaml.safe_load(f)
        
    def validate_topic(self, input_data):
        prompt_template = self.prompts["topic_validator"]
        print(f"the topic is : {input_data.topic}")
        prompt = prompt_template.format(
            topic=input_data.topic
        )
        print(f"the prompt is : {prompt}")

        messages = [
            {
                'role': 'user',
                'content': prompt,
            },
        ]

        response = self.llm.call(messages)
        return response.strip()



