from fastapi import HTTPException
import os
from ..utils.hashing import HashGenerator
from user_journey_service.crew import UserJourney 
from user_journey_service.processors.duration_estimator import MicrolearningDurationEstimator
from user_journey_service.processors.StagewiseCourseParser import CourseOutlineParser
from user_journey_service.processors.content_reviewer import ContentReviewer
from user_journey_service.processors.user_journey_synthesizer import Synthesizer
from user_journey_service.tools.custom_stt_tool import LiveWhisperSTTTool
# from user_journey_service.tools.custom_tts_tool import RealTime_TTS
duration_estimator = MicrolearningDurationEstimator()
content_reviewer = ContentReviewer()
synthesizer = Synthesizer()
stt_tool = LiveWhisperSTTTool()
# tts_tool = RealTime_TTS()
from pathlib import Path
import re
import whisper
from gtts import gTTS
from pydub import AudioSegment


class ContentCreationService:
    def __init__(self,input_data):
        self.crew_instance = UserJourney()
        self.input_data = input_data
        self.input_hash = HashGenerator.generate_input_hash(input_data)
        self.research_file = f"research/{self.input_hash}.md"
        self.output_file = f"output/{self.input_hash}.md"
        self.output_file_1 = f"output1/{self.input_hash}.md"
        self.output_file_2 = f"output2/{self.input_hash}.md"
        self.json_output_path = f'parsed_course_content/{self.input_hash}.json'
        

    def run_content_creation(self):
        """Creates content for each stage and run user evaluation"""
        try:
            if not os.path.exists(self.output_file):
                return {"status": "failure", "message": "User journey is not created."}

            if os.path.exists(self.output_file):
                print("The user journey is created")
                parser = CourseOutlineParser(self.output_file)
                parsed_result = parser.parse_content()
                # json_output_path = f'parsed_course_content/{input_hash}.json'
                os.makedirs(os.path.dirname(self.json_output_path), exist_ok=True)
                parser.export_to_json(self.json_output_path, parsed_result)
                # Access main heading
                print(f"Main Heading: {parsed_result['main_heading']}\n")
                main_heading = parsed_result['main_heading']
                # Iterate through each stage
                for idx, stage in enumerate(parsed_result['stages'], start=1):
                    print(f"The iteration : {idx}")
                    if self._is_first_iteration():
                        if self._has_updated_content_questions(idx):
                            if self._has_audio_content_and_questions(idx):
                                print("Audio files for content and questions are available")
                            else:
                                print("Audio files are not available")
                                self._create_audio_files(idx)
                        else:
                            print("content and question is not available")
                            self._create_new_stage(idx,stage,main_heading)
                            self._create_audio_files(idx)
                    else:
                        print("content and question is not available")
                        self._create_new_stage(idx,stage,main_heading)
                        self._create_audio_files_without_check(idx)

                return {"status": "success", "message": "contents are created.","hashid":self.input_hash}
                    
                    # self._run_evaluation(idx)

        except Exception as e:
            raise HTTPException(status_code=500, detail=f"Error parsing result: {e}")
        
    def _is_first_iteration(self):
        print(f"Inside feedback test : {self.input_data.feedback}")
        if self.input_data.feedback =="First iteration":
            return True
        return False
        
    def _has_updated_content_questions(self,idx):
        updated_content_file = Path(f'updated_content/{self.input_hash}/stage{idx}.md')
        question_file = Path(f"question/{self.input_hash}/stage{idx}.md")
        return updated_content_file.exists() and question_file.exists()
    
    def _has_audio_content_and_questions(self,idx):
        print("Inside audio files availability check function")
        audio_content = Path(f'audio/stage/{self.input_hash}/stage{idx}.wav')
        audio_question1 = Path(f'audio/questions/{self.input_hash}/stage{idx}/1.wav')
        audio_question2 = Path(f'audio/questions/{self.input_hash}/stage{idx}/2.wav')
        audio_question3 = Path(f'audio/questions/{self.input_hash}/stage{idx}/3.wav')
        print(f"The output is : {audio_content.exists() and audio_question1.exists() and audio_question2.exists() and audio_question3.exists()}")
        return audio_content.exists() and audio_question1.exists() and audio_question2.exists() and audio_question3.exists()


    def _run_evaluation(self,idx):
        all_qns_answer = {}
        audio_content = Path(f'audio/stage/{self.input_hash}/stage{idx}.wav')
        print(f"The audio content is available at : {audio_content}")
        audio_question = Path(f'audio/questions/{self.input_hash}/stage{idx}/')
        wav_files = sorted(audio_question.glob('*.wav'))
        for wav_file in wav_files:
            print(wav_file)
            question = self.wav_to_text_whisper(wav_file)
            input("🎤 Press Enter when you're ready to answer...")
            print("📢 Listening to your answer...")
            answer = str(input("Enter your answer: "))
            inputs = {"question":question,"answer":answer}
            all_qns_answer[question] = answer
            crew = self.crew_instance.evaluator_crew()
            crew.kickoff(inputs=inputs)
        inputs = {"users_response":all_qns_answer}
        crew = self.crew_instance.assessment_crew()
        crew.kickoff(inputs=inputs)



    def _create_audio_files(self,idx):
        audio_content = Path(f'audio/stage/{self.input_hash}/stage{idx}.wav')
        if audio_content.exists():
            print(f"Audio for the content is already available at :{audio_content}")
        else:
            os.makedirs(os.path.dirname(f"audio/stage/{self.input_hash}/"), exist_ok=True)
            content_file = Path(f'updated_content/{self.input_hash}/stage{idx}.md')
            with open(content_file, 'r', encoding='utf-8') as f:
                text_content = f.read()
            self.text_to_wav(text_content,audio_content)
        audio_question = Path(f'audio/questions/{self.input_hash}/stage{idx}/')
        wav_files = sorted(audio_question.glob('*.wav'))
        if not wav_files:
            os.makedirs(os.path.dirname(f"audio/questions/{self.input_hash}/stage{idx}/"), exist_ok=True)
            print("question audio is not available")
            self._create_audio_question(idx)

    def _create_audio_files_without_check(self,idx):
        audio_content = Path(f'audio/stage/{self.input_hash}/stage{idx}.wav')
        os.makedirs(os.path.dirname(f"audio/stage/{self.input_hash}/"), exist_ok=True)
        content_file = Path(f'updated_content/{self.input_hash}/stage{idx}.md')
        with open(content_file, 'r', encoding='utf-8') as f:
            text_content = f.read()
        self.text_to_wav(text_content,audio_content)
        os.makedirs(os.path.dirname(f"audio/questions/{self.input_hash}/stage{idx}/"), exist_ok=True)
        print("question audio is not available")
        self._create_audio_question(idx)

    def _create_audio_question(self,idx):
        question_file = Path(f"question/{self.input_hash}/stage{idx}.md")
        with open(question_file, 'r', encoding='utf-8') as f:
            questions = f.read()
        for index, question in enumerate(self.parse_questions(questions),start=1):
            print(f"\n🧠 Asking Question {index+1}: {question}\n")
            audio_question = Path(f'audio/questions/{self.input_hash}/stage{idx}/{index}.wav')
            self.text_to_wav(question,audio_question)

    def parse_questions(self,md_text):
        pattern = r"### Q\d+:\s*\n?(.*?)(?=\n### Q\d+:|\Z)"
        return re.findall(pattern, md_text, re.DOTALL)

    def _create_new_stage(self,idx,stage,main_heading):
        inputs = self.input_data.dict()
        print(f"Stage {idx}: {stage['stage_title']}")
        stage_no = stage['stage_title']
        print(f"The stage value for content creation is : {stage_no}")
        print(f"  Focus: {stage['focus']}")
        focus = stage['focus']
        print(f"  Outcome: {stage['outcome']}")
        outcome = stage['outcome']
        print(f"  Duration: {stage['duration']}")
        duration = stage['duration']
        print(f"  Topics Covered: {stage['topics_covered']}")
        topics = stage['topics_covered']
        print(f"The topics for content creation is : {topics}")
        complete_data = {"Stage": f"Stage {idx}: {stage_no}",
                        "Focus": focus,
                        "Outcome": outcome,
                        "Duration": duration,
                        "Topics Covered": topics
                        }
        print(f"The user journey for content creation is : {complete_data}")
        content_file = f"content/{self.input_hash}/stage{idx}.md"
        question_file = f"question/{self.input_hash}/stage{idx}.md"
        match = re.search(r'\d+', duration)
        if match:
            minutes = int(match.group())
            lower_range = minutes * 120
            upper_range = minutes * 130
        print(f"The word count for content creation is : {lower_range} to {upper_range}")
        print(f"the level and motive for content creation is : {self.input_data.Level} and self.input_data.motive")
        inputs["main_heading"] = main_heading
        inputs["stage"] = stage_no
        inputs["topics"]=topics
        inputs["user_journey"]=complete_data
        inputs["word_count"]=f"{lower_range} to {upper_range}"
        # inputs = {"main_heading":main_heading,"stage":stage_no,"topics":topics,"user_journey":complete_data,"word_count":f"{lower_range} to {upper_range}","Level":self.input_data.Level,"motive":self.input_data.motive}
        # inputs = {"main_heading":main_heading,"stage_title":stage_no,"focus":focus,"outcome":outcome,"duration":duration,"topics_covered":topics}
        crew = self.crew_instance.second_stage_crew(output_file=content_file)
        crew.kickoff(inputs=inputs)
        with open(content_file, 'r', encoding='utf-8') as f:
            text = f.read()
        topic_sections = ",and ".join([f"'{topic}'" for topic in topics])
        print(f"The topic sections are: {topic_sections}")
        updated_data = content_reviewer.review_and_enrich_content(topic_sections,main_heading,complete_data,text,f"{lower_range} to {upper_range}")
        crew = self.crew_instance.qa_gen_crew(output_file=question_file)
        # inputs = {"content":updated_data}
        inputs["content"] = updated_data
        crew.kickoff(inputs=inputs)
        updated_content_path = f'updated_content/{self.input_hash}/stage{idx}.md'
        os.makedirs(os.path.dirname(updated_content_path), exist_ok=True)
        with open(updated_content_path, 'w', encoding='utf-8') as out_file:
            out_file.write(updated_data)
        return "content and questions created successfully.."

    


    def text_to_wav(self,text,output_file):
        # Convert text to mp3
        tts = gTTS(text=text, lang='en')
        temp_mp3 = "temp_audio.mp3"
        tts.save(temp_mp3)

        # Convert mp3 to wav
        audio = AudioSegment.from_mp3(temp_mp3)
        audio.export(output_file, format="wav")

        print(f"Audio saved as {output_file}")

    def wav_to_text_whisper(self,wav_file):
        print("inside wave to text conversion")
        model = whisper.load_model("base")
        result = model.transcribe(str(wav_file))
        # print("Transcription:", result["text"])
        print("\U0001F3A4 Transcription:", result["text"])
        return result["text"]






        
