from typing import Type
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
import dspy
import os
from dotenv import load_dotenv

# === Load secrets ===
load_dotenv(dotenv_path="/home/azureuser/microlearn/backend/prompt_opt_dspy/.env")

azure_api_key = os.getenv("AZURE_API_KEY")
azure_api_host = os.getenv("AZURE_API_HOST")
azure_api_version = os.getenv("AZURE_API_VERSION")

# === Configure Azure OpenAI LM ===
lm = dspy.LM(
    "azure/Csqr-gpt-4o-mini",
    api_key=azure_api_key,
    api_base=azure_api_host,
    api_version=azure_api_version,
    temperature=0.0,
)
dspy.configure(lm=lm)


# === DSPy Signature ===
class FinalAssessmentSignature(dspy.Signature):
    """
    You are an Overall User Performance Assessor.
    Review all question-answer pairs and generate an overall feedback summary.

    The user's company context is:
      Company Name : {Company_Name}
      Industry : {Industry}
      Company Size : {Company_Size}
      Business Model : {Business_Model}
      Top Use Cases : {Top_Use_Cases}
      Learning Focus : {Learning_Focus}
      Tech Stack : {Tech_stack}
      Compliance : {Compliance}

    Review all the user's answers and provide an overall final feedback summary in a single sentence.
    This is the users response {users_response}
    A comprehensive final feedback sentence summarizing the user's overall performance across all questions.
    
    """

    company_context: str = dspy.InputField()
    qa_text: str = dspy.InputField()
    final_feedback: str = dspy.OutputField(
        desc="A single comprehensive sentence summarizing overall performance"
    )


# === DSPy Module ===
class FinalAssessmentModule(dspy.Module):
    def __init__(self):
        super().__init__()
        self.predictor = dspy.Predict(FinalAssessmentSignature)

        # --- Few-shot training examples ---
        trainset = [
            dspy.Example(
                company_context="""
                    Company Name: FinSight, 
                    Industry: Financial Services, 
                    Company Size: 250 employees, 
                    Business Model: B2B, 
                    Top Use Cases: Financial modeling, 
                    Learning Focus: Data Analysis, 
                    Tech Stack: Excel, Tableau, SQL Server, 
                    Compliance: SOX, PCI DSS
                    """,
                qa_text="""
                    "What is SQL used for?":"SQL is useful for querying structured data, though I struggle with advanced joins."
                    "How does regression help in finance?":"Regression detects trends but I need practice applying it to forecasting."
                    "Why is visualization important?":"It communicates insights, though my design skills are limited."
                    """,
                final_feedback="The learner demonstrates a fair grasp of fundamentals but needs improvement in applying advanced concepts and communication clarity."
            ).with_inputs("company_context", "qa_text"),
        ]

        # --- Custom metric: ensure concise single-sentence feedback ---
        def feedback_metric(gold, pred, trace):
            output = getattr(pred, "final_feedback", "") or ""
            is_single_sentence = output.count(".") == 1 or output.strip().endswith(".")
            return int(is_single_sentence and len(output.split()) <= 40)

        # --- Bootstrap optimization ---
        optimizer = dspy.BootstrapFewShot(metric=feedback_metric)
        self.optimized_predictor = optimizer.compile(self.predictor, trainset=trainset)

    def forward(self, company_context, qa_text):
        return self.optimized_predictor(
            company_context=company_context,
            qa_text=qa_text
        )


# === Tool Input Schema ===
class FinalAssessmentInput(BaseModel):
    """Input schema for FinalAssessmentTool."""
    Company_Name: str = Field(..., description="Name of the company")
    Industry: str = Field(..., description="Industry of the company")
    Company_Size: str = Field(..., description="Size of the company")
    Business_Model: str = Field(..., description="Company's business model")
    Top_Use_Cases: str = Field(..., description="Top use cases the company focuses on")
    Learning_Focus: str = Field(..., description="Learning focus for the user")
    Tech_stack: str = Field(..., description="Company's tech stack")
    Compliance: str = Field(..., description="Relevant compliance requirements")
    qa_pairs: list[dict] = Field(..., description="List of Q&A pairs, each dict must have {question, answer}")


# === Custom DSPy Tool ===
class FinalAssessmentTool(BaseTool):
    name: str = "Final Assessment Tool"
    description: str = (
        "Reviews all user answers and generates a one-sentence overall performance feedback summary."
    )
    args_schema: Type[BaseModel] = FinalAssessmentInput

    def _run(
        self,
        Company_Name: str,
        Industry: str,
        Company_Size: str,
        Business_Model: str,
        Top_Use_Cases: str,
        Learning_Focus: str,
        Tech_stack: str,
        Compliance: str,
        qa_pairs: list[dict]
    ) -> str:

        # Build company context
        company_context = f"""
        Company Name: {Company_Name}
        Industry: {Industry}
        Company Size: {Company_Size}
        Business Model: {Business_Model}
        Top Use Cases: {Top_Use_Cases}
        Learning Focus: {Learning_Focus}
        Tech Stack: {Tech_stack}
        Compliance: {Compliance}
        """

        # Format Q&A pairs into text block
        qa_text = "\n".join(
            [f"Q: {pair['question']}\nA: {pair['answer']}" for pair in qa_pairs]
        )

        assessor = FinalAssessmentModule()
        result = assessor.forward(
            company_context=company_context,
            qa_text=qa_text
        )

        return result.final_feedback

# # === CLI Test Usage ===
# if __name__ == "__main__":
#     tool = FinalAssessmentTool()
#     output = tool._run(
#         Company_Name="DeepCore AI",
#         Industry="Technology",
#         Company_Size="500 employees",
#         Business_Model="SaaS",
#         Top_Use_Cases="Predictive analytics",
#         Learning_Focus="Machine Learning",
#         Tech_stack="Python, AWS, PostgreSQL",
#         Compliance="SOC 2",
#         qa_pairs=[
#             {"question": "What is SQL used for?", "answer": "SQL helps analysts extract and analyze data efficiently."},
#             {"question": "How does regression help in business?", "answer": "It helps identify purchasing trends but I need to practice more."},
#             {"question": "Why is storytelling important?", "answer": "It enhances communication with executives but I am still learning narrative flow."}
#         ]
#     )
#     print("=== Final Assessment ===")
#     print(output)

########### Final Assessment ---------------->>

# from typing import Type
# from crewai.tools import BaseTool
# from pydantic import BaseModel, Field
# import dspy
# import os
# from dotenv import load_dotenv

# # === Load secrets ===
# load_dotenv(dotenv_path="/home/azureuser/microlearn/backend/prompt_opt_dspy/.env")

# azure_api_key = os.getenv("AZURE_API_KEY")
# azure_api_host = os.getenv("AZURE_API_HOST")
# azure_deployment_id = os.getenv("AZURE_DEPLOYMENT_ID")
# azure_api_version = os.getenv("AZURE_API_VERSION")

# # === Configure Azure OpenAI LM ===
# lm = dspy.LM(
#     "azure/Csqr-gpt-4o-mini",
#     api_key=azure_api_key,
#     api_base=azure_api_host,
#     api_version=azure_api_version,
#     temperature=0.0,
# )
# dspy.configure(lm=lm)


# # === DSPy Signature ===
# class FinalAssessmentSignature(dspy.Signature):
#     """
#     You are an Overall User Performance Assessor.
#     Review all question-answer pairs and generate an overall feedback summary.

#     The user's company context is:
#       Company Name : {Company_Name}
#       Industry : {Industry}
#       Company Size : {Company_Size}
#       Business Model : {Business_Model}
#       Top Use Cases : {Top_Use_Cases}
#       Learning Focus : {Learning_Focus}
#       Tech Stack : {Tech_stack}
#       Compliance : {Compliance}

#     Task:
#     - Review all the user's answers provided in {users_response}.
#     - Assess clarity, correctness, and completeness across answers.
#     - Provide one concise summary sentence that reflects the user's overall performance.
#     """

#     company_context: str = dspy.InputField()
#     users_response: str = dspy.InputField()
#     final_feedback: str = dspy.OutputField(
#         desc="A single comprehensive sentence summarizing overall performance"
#     )


# # === DSPy Module ===
# class FinalAssessmentModule(dspy.Module):
#     def __init__(self):
#         super().__init__()
#         self.predictor = dspy.Predict(FinalAssessmentSignature)

#     def forward(self, company_context, users_response):
#         return self.predictor(
#             company_context=company_context,
#             users_response=users_response
#         )


# # === Tool Input Schema ===
# class FinalAssessmentInput(BaseModel):
#     """Input schema for FinalAssessmentTool."""
#     Company_Name: str = Field(..., description="Name of the company")
#     Industry: str = Field(..., description="Industry of the company")
#     Company_Size: str = Field(..., description="Size of the company")
#     Business_Model: str = Field(..., description="Company's business model")
#     Top_Use_Cases: str = Field(..., description="Top use cases the company focuses on")
#     Learning_Focus: str = Field(..., description="Learning focus for the user")
#     Tech_stack: str = Field(..., description="Company's tech stack")
#     Compliance: str = Field(..., description="Relevant compliance requirements")
#     users_response: str = Field(..., description="All answers provided by the user")


# # === Custom DSPy Tool ===
# class FinalAssessmentTool(BaseTool):
#     name: str = "Final Assessment Tool"
#     description: str = (
#         "Reviews all user answers and generates a one-sentence overall performance feedback summary."
#     )
#     args_schema: Type[BaseModel] = FinalAssessmentInput

#     def _run(
#         self,
#         Company_Name: str,
#         Industry: str,
#         Company_Size: str,
#         Business_Model: str,
#         Top_Use_Cases: str,
#         Learning_Focus: str,
#         Tech_stack: str,
#         Compliance: str,
#         users_response: str
#     ) -> str:

#         # Build company context
#         company_context = f"""
#         Company Name: {Company_Name}
#         Industry: {Industry}
#         Company Size: {Company_Size}
#         Business Model: {Business_Model}
#         Top Use Cases: {Top_Use_Cases}
#         Learning Focus: {Learning_Focus}
#         Tech Stack: {Tech_stack}
#         Compliance: {Compliance}
#         """

#         assessor = FinalAssessmentModule()
#         result = assessor.forward(
#             company_context=company_context,
#             users_response=users_response
#         )

#         return result.final_feedback


# # # === CLI Test Usage ===
# # if __name__ == "__main__":
# #     tool = FinalAssessmentTool()
# #     output = tool._run(
# #         Company_Name="DeepCore AI",
# #         Industry="Technology",
# #         Company_Size="500 employees",
# #         Business_Model="SaaS",
# #         Top_Use_Cases="Predictive analytics",
# #         Learning_Focus="Machine Learning",
# #         Tech_stack="Python, AWS, PostgreSQL",
# #         Compliance="SOC 2",
# #         users_response="""
# #         Q1: SQL helps analysts extract and analyze data efficiently.
# #         Q2: Regression analysis allows identifying purchasing trends but I need to practice more.
# #         Q3: Storytelling should adapt visuals for executives but I am still learning narrative flow.
# #         """
# #     )
# #     print("=== Final Assessment ===")
# #     print(output)
