import os
import logging
from typing import Dict, List, Any, Optional

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("skill_assessment_api")

from fastapi import FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field

try:
    from skill_assessment_processor import generate_skill_assessment
except ImportError:
    import sys
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))
    from skill_assessment_processor import generate_skill_assessment


# ── Skill Assessment models ────────────────────────────────────────────────

class UserDetails(BaseModel):
    skill_gap_name:      Optional[str] = None
    designation_name:    Optional[str] = "Senior Software Engineer"
    job_profile_name:    Optional[str] = "Software Engineer"


class PreviouslyGeneratedQuestion(BaseModel):
    """
    Represents a single question that was already generated for the same
    skill and difficulty level in a previous attempt.  Only `question_name`
    is strictly required by the processor; all other fields are stored for
    reference and forwarded as-is.
    """
    lms_user_skill_assessment_question_id: Optional[str] = None
    Client_Id:           Optional[str] = None
    user_id:             Optional[str] = None
    lms_user_skill_assessment_id: Optional[str] = None
    question_name:       str
    question_type:       Optional[str] = None
    option_1:            Optional[str] = None
    option_2:            Optional[str] = None
    option_3:            Optional[str] = None
    option_4:            Optional[str] = None
    correct_answer:      Optional[str] = None
    user_answer:         Optional[str] = None
    skill_id:            Optional[str] = None
    skill_type:          Optional[str] = None
    attempt:             Optional[str] = None
    created_at:          Optional[str] = None
    created_by:          Optional[str] = None


class SkillAssessmentRequest(BaseModel):
    userDetails:          UserDetails
    companyName:          str

    questions_difficulty_level: int = Field(
        default=1,
        ge=1,
        le=3,
        description=(
            "Difficulty level for the 3 questions to generate. "
            "1 = Beginner | 2 = Intermediate | 3 = Advanced"
        ),
    )

    attempt: int = Field(
        default=1,
        ge=1,
        description=(
            "Which attempt for this difficulty level (1-based). "
            "Increments each time the user retakes the same difficulty tier, "
            "ensuring fresh questions are generated on every retry."
        ),
    )

    previously_generated_questions: List[PreviouslyGeneratedQuestion] = Field(
        default_factory=list,
        description=(
            "All questions already generated for the *same* difficulty level "
            "across prior attempts.  Pass an empty list for attempt 1. "
            "The processor uses these to guarantee no repeated questions."
        ),
    )


class SkillOption(BaseModel):
    optionId:   str
    optionText: str
    isCorrect:  bool


class SkillData(BaseModel):
    skillId:    str
    skillName:  str
    skillLevel: str
    attempt:    int


class AssessmentQuestion(BaseModel):
    questionId:   str
    questionText: str
    difficulty:   str
    questionType: int
    skills:       List[SkillData]
    options:      List[SkillOption]


class SkillAssessmentResponse(BaseModel):
    assessmentId:    str
    assessmentTitle: str
    totalQuestions:  int
    questions:       List[AssessmentQuestion]


# ── App ────────────────────────────────────────────────────────────────────

app = FastAPI(
    title="Skill Assessment API",
    description=(
        "Skill Assessment question generation powered by Ollama.\n\n"
        "The `/skill_assessment_data` endpoint generates **exactly 3 questions** "
        "per call, scoped to a single difficulty level and attempt number, "
        "with built-in deduplication against previously generated questions."
    ),
    version="2.0.0",
)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# ── Endpoints ──────────────────────────────────────────────────────────────

@app.post("/skill_assessment_data", response_model=SkillAssessmentResponse)
async def skill_assessment_data_endpoint(payload: SkillAssessmentRequest):
    """
    Generate **3 skill-assessment questions** for a specific difficulty level
    and attempt number.

    ### Difficulty levels
    | `questions_difficulty_level` | Label        |
    |------------------------------|--------------|
    | 1                            | Beginner     |
    | 2                            | Intermediate |
    | 3                            | Advanced     |

    ### Attempt logic
    - `attempt=1` with `previously_generated_questions=[]` → first set of 3 questions.
    - `attempt=2` with all attempt-1 questions in `previously_generated_questions`
      → a completely fresh set of 3, guaranteed not to repeat earlier questions.
    - Each subsequent attempt follows the same pattern.

    Powered by **Ollama (gemma3:12b)** — no external API calls, no API keys needed.
    """
    difficulty_label_map = {1: "Beginner", 2: "Intermediate", 3: "Advanced"}
    difficulty_label = difficulty_label_map.get(
        payload.questions_difficulty_level, "Beginner"
    )

    logger.info(
        f"SkillAssessment | company={payload.companyName} "
        f"| role={payload.userDetails.designation_name} "
        f"| skill={payload.userDetails.skill_gap_name} "
        f"| difficulty={difficulty_label} (level={payload.questions_difficulty_level}) "
        f"| attempt={payload.attempt} "
        f"| prev_questions={len(payload.previously_generated_questions)}"
    )

    # Convert Pydantic models → plain dicts for the processor
    prev_questions_dicts = [q.dict() for q in payload.previously_generated_questions]

    result = generate_skill_assessment(
        user_details=payload.userDetails.dict(),
        company_name=payload.companyName,
        questions_difficulty_level=payload.questions_difficulty_level,
        attempt=payload.attempt,
        previously_generated_questions=prev_questions_dicts,
    )

    if "error" in result:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=result["error"],
        )

    return result