from typing import Type
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
import dspy
import os
from dotenv import load_dotenv

# === Load secrets ===
load_dotenv(dotenv_path="/home/azureuser/microlearn/backend/prompt_opt_dspy/.env")

azure_api_key = os.getenv("AZURE_API_KEY")
azure_api_host = os.getenv("AZURE_API_HOST")
azure_deployment_id = os.getenv("AZURE_DEPLOYMENT_ID")
azure_api_version = os.getenv("AZURE_API_VERSION")

# === Configure Azure OpenAI LM ===
lm = dspy.LM(
    "azure/Csqr-gpt-4o-mini",
    api_key=azure_api_key,
    api_base=azure_api_host,
    api_version=azure_api_version,
    temperature=0.0,
)
dspy.configure(lm=lm)


# === DSPy Signature ===
class AnswerEvaluationSignature(dspy.Signature):
    """
    You are a Response Evaluator.
    Assess user understanding of a question based on their answer.

    The user's company context is:
      Company Name : {Company_Name}
      Industry : {Industry}
      Company Size : {Company_Size}
      Business Model : {Business_Model}
      Top Use Cases : {Top_Use_Cases}
      Learning Focus : {Learning_Focus}
      Tech Stack : {Tech_stack}
      Compliance : {Compliance}

    Task:
    - Evaluate the given user answer against the provided question and context.
    - Identify correctness, completeness, and clarity.
    - Highlight strengths in the response.
    - Suggest improvements if necessary.
    - Keep the feedback concise and constructive.
    
    Expected Output:
    Structured evaluation covering:
      - **Understanding Level** (Poor / Fair / Good / Excellent)
      - **Strengths** (bullet points)
      - **Areas to Improve** (bullet points)
    """

    company_context: str = dspy.InputField()
    question: str = dspy.InputField()
    answer: str = dspy.InputField()
    evaluation: str = dspy.OutputField(
        desc="Evaluation including understanding level, strengths, and improvement areas"
    )


# === DSPy Module with Bootstrap ===
class AnswerEvaluatorModule(dspy.Module):
    def __init__(self):
        super().__init__()
        base_predictor = dspy.Predict(AnswerEvaluationSignature)

        # === Training examples for bootstrap ===
        trainset = [
            dspy.Example(
                company_context="""
                    
                    Company Name: FinSight
                    Industry: Financial Services
                    Company Size: 250 employees
                    Business Model: B2B
                    Top Use Cases: Financial modeling
                    Learning Focus: Data Analysis
                    Tech Stack: Excel, Tableau, SQL Server
                    Compliance: SOX, PCI DSS
                                    """,

                question="What is the role of SQL in analyzing structured data?",
                answer="SQL helps analysts query databases, filter records, and summarize large datasets to support decision-making.",
                evaluation="""
                
                **Understanding Level**: Good

                **Strengths**:
                - Correctly explains SQL's role in querying and summarizing data.
                - Focuses on structured data analysis.

                **Areas to Improve**:
                - Could mention SQL's ability to join multiple tables for deeper insights.
                        
                """
            ).with_inputs("company_context", "question", "answer"),
        ]

        # === Custom Metric ===
        def evaluation_metric(gold, pred, trace=None):
            """
            Simple metric: ensure output contains
            - Understanding Level
            - Strengths
            - Areas to Improve
            """
            output = getattr(pred, "evaluation", "") or ""
            required_parts = ["Understanding Level", "Strengths", "Areas to Improve"]
            return int(all(part in output for part in required_parts))

        # === Bootstrap Few-Shot Optimizer ===
        optimizer = dspy.BootstrapFewShot(metric=evaluation_metric)
        self.predictor = optimizer.compile(
            student=base_predictor,
            trainset=trainset
        )

    def forward(self, company_context, question, answer):
        return self.predictor(
            company_context=company_context,
            question=question,
            answer=answer
        )


# === Tool Input Schema ===
class AnswerEvaluatorInput(BaseModel):
    """Input schema for AnswerEvaluationTool."""
    Company_Name: str = Field(..., description="Name of the company")
    Industry: str = Field(..., description="Industry of the company")
    Company_Size: str = Field(..., description="Size of the company")
    Business_Model: str = Field(..., description="Company's business model")
    Top_Use_Cases: str = Field(..., description="Top use cases the company focuses on")
    Learning_Focus: str = Field(..., description="Learning focus for the user")
    Tech_stack: str = Field(..., description="Company's tech stack")
    Compliance: str = Field(..., description="Relevant compliance requirements")
    question: str = Field(..., description="The question being answered")
    answer: str = Field(..., description="The user's provided answer")


# === Custom DSPy Tool ===
class AnswerEvaluationTool(BaseTool):
    name: str = "Answer Evaluation Tool"
    description: str = (
        "Evaluates a user's answer to a given question, "
        "assessing correctness, completeness, and clarity, "
        "while providing constructive feedback."
    )
    args_schema: Type[BaseModel] = AnswerEvaluatorInput

    def _run(
        self,
        Company_Name: str,
        Industry: str,
        Company_Size: str,
        Business_Model: str,
        Top_Use_Cases: str,
        Learning_Focus: str,
        Tech_stack: str,
        Compliance: str,
        question: str,
        answer: str
    ) -> str:

        # Build company context string
        company_context = f"""
        Company Name: {Company_Name}
        Industry: {Industry}
        Company Size: {Company_Size}
        Business Model: {Business_Model}
        Top Use Cases: {Top_Use_Cases}
        Learning Focus: {Learning_Focus}
        Tech Stack: {Tech_stack}
        Compliance: {Compliance}
        """

        evaluator = AnswerEvaluatorModule()
        result = evaluator.forward(
            company_context=company_context,
            question=question,
            answer=answer
        )

        return result.evaluation


# # === CLI Test Usage ===
# if __name__ == "__main__":
#     tool = AnswerEvaluationTool()
#     output = tool._run(
#         Company_Name="DeepCore AI",
#         Industry="Technology",
#         Company_Size="500 employees",
#         Business_Model="SaaS",
#         Top_Use_Cases="Predictive analytics",
#         Learning_Focus="Machine Learning",
#         Tech_stack="Python, AWS, PostgreSQL",
#         Compliance="SOC 2",
#         question="What is the purpose of SQL in the context of data analysis for business analysts?",
#         # answer="SQL helps analysts pull raw data from databases and summarize it into reports, but I am not sure how it supports predictive analytics."
#         answer="SQL helps analysts query databases, filter records, and summarize large datasets to support decision-making.",

#     )
#     print("=== Answer Evaluation ===")
#     print(output)



##### Older Version Without Bootstrap #####

# from typing import Type
# from crewai.tools import BaseTool
# from pydantic import BaseModel, Field
# import dspy
# import os
# from dotenv import load_dotenv

# # === Load secrets ===
# load_dotenv(dotenv_path="/home/azureuser/microlearn/backend/prompt_opt_dspy/.env")

# azure_api_key = os.getenv("AZURE_API_KEY")
# azure_api_host = os.getenv("AZURE_API_HOST")
# azure_deployment_id = os.getenv("AZURE_DEPLOYMENT_ID")
# azure_api_version = os.getenv("AZURE_API_VERSION")

# # === Configure Azure OpenAI LM ===
# lm = dspy.LM(
#     "azure/Csqr-gpt-4o-mini",
#     api_key=azure_api_key,
#     api_base=azure_api_host,
#     api_version=azure_api_version,
#     temperature=0.0,
# )
# dspy.configure(lm=lm)


# # === DSPy Signature ===
# class AnswerEvaluationSignature(dspy.Signature):
#     """
#     You are a Response Evaluator.
#     Assess user understanding of a question based on their answer.

#     The user's company context is:
#       Company Name : {Company_Name}
#       Industry : {Industry}
#       Company Size : {Company_Size}
#       Business Model : {Business_Model}
#       Top Use Cases : {Top_Use_Cases}
#       Learning Focus : {Learning_Focus}
#       Tech Stack : {Tech_stack}
#       Compliance : {Compliance}

#     Task:
#     - Evaluate the given user answer against the provided question and context.
#     - Identify correctness, completeness, and clarity.
#     - Highlight strengths in the response.
#     - Suggest improvements if necessary.
#     - Keep the feedback concise and constructive.
    
#     Expected Output:
#     Structured evaluation covering:
#       - **Understanding Level** (Poor / Fair / Good / Excellent)
#       - **Strengths** (bullet points)
#       - **Areas to Improve** (bullet points)
#     """

#     company_context: str = dspy.InputField()
#     question: str = dspy.InputField()
#     answer: str = dspy.InputField()
#     evaluation: str = dspy.OutputField(
#         desc="Evaluation including understanding level, strengths, and improvement areas"
#     )


# # === DSPy Module ===
# class AnswerEvaluatorModule(dspy.Module):
#     def __init__(self):
#         super().__init__()
#         self.predictor = dspy.Predict(AnswerEvaluationSignature)

#     def forward(self, company_context, question, answer):
#         return self.predictor(
#             company_context=company_context,
#             question=question,
#             answer=answer
#         )


# # === Tool Input Schema ===
# class AnswerEvaluatorInput(BaseModel):
#     """Input schema for AnswerEvaluationTool."""
#     Company_Name: str = Field(..., description="Name of the company")
#     Industry: str = Field(..., description="Industry of the company")
#     Company_Size: str = Field(..., description="Size of the company")
#     Business_Model: str = Field(..., description="Company's business model")
#     Top_Use_Cases: str = Field(..., description="Top use cases the company focuses on")
#     Learning_Focus: str = Field(..., description="Learning focus for the user")
#     Tech_stack: str = Field(..., description="Company's tech stack")
#     Compliance: str = Field(..., description="Relevant compliance requirements")
#     question: str = Field(..., description="The question being answered")
#     answer: str = Field(..., description="The user's provided answer")


# # === Custom DSPy Tool ===
# class AnswerEvaluationTool(BaseTool):
#     name: str = "Answer Evaluation Tool"
#     description: str = (
#         "Evaluates a user's answer to a given question, "
#         "assessing correctness, completeness, and clarity, "
#         "while providing constructive feedback."
#     )
#     args_schema: Type[BaseModel] = AnswerEvaluatorInput

#     def _run(
#         self,
#         Company_Name: str,
#         Industry: str,
#         Company_Size: str,
#         Business_Model: str,
#         Top_Use_Cases: str,
#         Learning_Focus: str,
#         Tech_stack: str,
#         Compliance: str,
#         question: str,
#         answer: str
#     ) -> str:

#         # Build company context string
#         company_context = f"""
#         Company Name: {Company_Name}
#         Industry: {Industry}
#         Company Size: {Company_Size}
#         Business Model: {Business_Model}
#         Top Use Cases: {Top_Use_Cases}
#         Learning Focus: {Learning_Focus}
#         Tech Stack: {Tech_stack}
#         Compliance: {Compliance}
#         """

#         evaluator = AnswerEvaluatorModule()
#         result = evaluator.forward(
#             company_context=company_context,
#             question=question,
#             answer=answer
#         )

#         return result.evaluation


# # # === CLI Test Usage ===
# # if __name__ == "__main__":
# #     tool = AnswerEvaluationTool()
# #     output = tool._run(
# #         Company_Name="DeepCore AI",
# #         Industry="Technology",
# #         Company_Size="500 employees",
# #         Business_Model="SaaS",
# #         Top_Use_Cases="Predictive analytics",
# #         Learning_Focus="Machine Learning",
# #         Tech_stack="Python, AWS, PostgreSQL",
# #         Compliance="SOC 2",
# #         question="What is the purpose of SQL in the context of data analysis for business analysts?",
# #         answer="SQL helps analysts pull raw data from databases and summarize it into reports, but I am not sure how it supports predictive analytics."
# #     )
# #     print("=== Answer Evaluation ===")
# #     print(output)
