import json
import logging
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass
from difflib import SequenceMatcher
import re
from datetime import datetime
from langchain_ollama import ChatOllama
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@dataclass
class RecommendedSkill:
    """Data class for recommended skill from Problem Statement 2"""
    skill_id: int
    skill_name: str
    skill_level: str
    priority: str
    source: str
    justification: str

@dataclass
class Course:
    """Data class for course information"""
    course_id: str
    course_name: str
    short_description: str
    description: str
    skills: List[str]  # May be empty or partial

@dataclass
class CourseRecommendationRequest:
    """Data class for course recommendation request"""
    user_prompt: str
    department: str
    recommended_skills: List[RecommendedSkill]
    offline_courses: List[Course]

@dataclass
class InferredSkill:
    """Data class for LLM-inferred course skills"""
    skill_name: str
    skill_level: str

@dataclass
class SkillMatch:
    """Data class for skill matching results"""
    user_skill: str
    course_skill: str
    confidence: float

@dataclass
class CourseRecommendation:
    """Data class for course recommendation result"""
    course_id: str
    course_name: str
    short_description: str
    description: str
    matched_skills: List[str]
    relevance_score: float
    llm_justification: str
    matched_using: str  # 'llm' or 'existing_tags'
    domain_relevance: bool
    domain_confidence: float
    inferred_skills: List[InferredSkill]
    skill_matches: List[SkillMatch]

@dataclass
class CourseRecommendationResponse:
    """Data class for complete course recommendation response"""
    recommended_courses: List[CourseRecommendation]
    total_courses_analyzed: int
    domain_relevant_courses: int
    courses_with_skill_matches: int
    analysis_summary: str

class CourseInferenceEngine:
    """Handles course analysis using LLM"""
    
    def __init__(self, model_name: str = "gemma3:12b"):
        """Initialize the LLM model"""
        try:
            self.llm = ChatOllama(model=model_name)
            logger.info(f"Initialized Course LLM with model: {model_name}")
        except Exception as e:
            logger.error(f"Failed to initialize Course LLM: {e}")
            raise
    
    def evaluate_domain_relevance(self, course: Course, department: str) -> Tuple[bool, float, str]:
        """
        Step 1: Evaluate if course is relevant to the department/domain
        
        Args:
            course: Course to evaluate
            department: Target department/domain
            
        Returns:
            Tuple of (is_relevant, confidence_score, justification)
        """
        prompt_template = PromptTemplate(
            input_variables=["course_name", "short_description", "description", "department"],
            template="""
            You are an expert training analyst specializing in course relevance evaluation.
            
            Course Information:
            - Course Name: {course_name}
            - Short Description: {short_description}
            - Full Description: {description}
            
            Target Department/Domain: {department}
            
            Evaluate if this course is relevant to the '{department}' domain.
            
            Instructions:
            1. Analyze the course content, objectives, and target audience
            2. Determine if the course would benefit professionals in the {department} field
            3. Consider both direct relevance and transferable skills
            4. Be selective but not overly restrictive
            5. Consider modern workplace cross-functional needs
            
            Return your analysis in the following format:
            Is_Match: [Yes/No]
            Confidence: [0.0 to 1.0]
            Justification: [One clear sentence explaining your decision]
            
            Analysis:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                course_name=course.course_name,
                short_description=course.short_description,
                description=course.description,
                department=department
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            # Parse response
            is_match, confidence, justification = self._parse_domain_response(response.content)
            
            logger.info(f"Domain relevance for '{course.course_name}': {is_match} (confidence: {confidence:.2f})")
            return is_match, confidence, justification
            
        except Exception as e:
            logger.error(f"Error in domain relevance evaluation: {e}")
            return False, 0.0, "Error in evaluation"
    
    def extract_course_skills(self, course: Course) -> List[InferredSkill]:
        """
        Step 2: Extract skills taught by the course
        
        Args:
            course: Course to analyze
            
        Returns:
            List of inferred skills with levels
        """
        prompt_template = PromptTemplate(
            input_variables=["course_name", "short_description", "description", "existing_skills"],
            template="""
            You are an expert curriculum analyst specializing in identifying learning outcomes.
            
            Course Information:
            - Course Name: {course_name}
            - Short Description: {short_description}
            - Full Description: {description}
            - Existing Skill Tags: {existing_skills}
            
            Based on the course content, identify 3-5 key skills that learners will likely develop or improve.
            
            Instructions:
            1. Focus on concrete, applicable skills rather than vague concepts
            2. Include both technical and soft skills as appropriate
            3. Specify skill level (Beginner/Intermediate/Advanced) based on course complexity
            4. Consider the existing skill tags but don't be limited by them
            5. Be specific and practical
            
            Return skills in the following format:
            - Skill Name 1 | Skill Level
            - Skill Name 2 | Skill Level
            - Skill Name 3 | Skill Level
            ...
            
            Skills:
            """
        )
        
        try:
            existing_skills_text = ", ".join(course.skills) if course.skills else "None specified"
            
            formatted_prompt = prompt_template.format(
                course_name=course.course_name,
                short_description=course.short_description,
                description=course.description,
                existing_skills=existing_skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            # Parse skills from response
            inferred_skills = self._parse_skills_response(response.content)
            
            logger.info(f"Extracted {len(inferred_skills)} skills from '{course.course_name}'")
            return inferred_skills
            
        except Exception as e:
            logger.error(f"Error in skill extraction: {e}")
            return []
    
    def match_skills_semantically(self, user_skills: List[str], 
                                course_skills: List[InferredSkill]) -> List[SkillMatch]:
        """
        Step 3: Semantically match user-needed skills with course skills
        
        Args:
            user_skills: Skills needed by user (from Problem Statement 2)
            course_skills: Skills taught by course (inferred)
            
        Returns:
            List of skill matches with confidence scores
        """
        if not user_skills or not course_skills:
            logger.warning(f"Empty inputs - User skills: {len(user_skills)}, Course skills: {len(course_skills)}")
            return []
        
        user_skills_text = ", ".join(user_skills)
        course_skills_text = ", ".join([f"{cs.skill_name} ({cs.skill_level})" for cs in course_skills])
        
        # DEBUG: Log the inputs
        logger.info(f"DEBUG - User skills: {user_skills_text}")
        logger.info(f"DEBUG - Course skills: {course_skills_text}")
        
        prompt_template = PromptTemplate(
            input_variables=["user_skills", "course_skills"],
            template="""
            You are an expert skills matching analyst specializing in semantic skill alignment.
            
            User-Needed Skills: {user_skills}
            Course-Taught Skills: {course_skills}
            
            Match semantically related skill pairs between user needs and course offerings.
            
            Instructions:
            1. Look for direct matches, partial matches, and semantically related skills
            2. Consider synonyms, related concepts, and skill hierarchies
            3. Assign confidence scores (0.0 to 1.0) based on match quality
            4. Include matches with confidence >= 0.5
            5. A skill can match multiple course skills if relevant
            
            Return matches in the following format:
            User_Skill: [skill name] | Course_Skill: [skill name] | Confidence: [0.0-1.0]
            User_Skill: [skill name] | Course_Skill: [skill name] | Confidence: [0.0-1.0]
            ...
            
            Skill Matches:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                user_skills=user_skills_text,
                course_skills=course_skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            # DEBUG: Log the raw LLM response
            logger.info(f"DEBUG - Raw LLM response: {response.content}")
            
            # Parse matches from response
            skill_matches = self._parse_matches_response(response.content)
            
            # DEBUG: Log parsed matches
            logger.info(f"DEBUG - Parsed matches: {[(m.user_skill, m.course_skill, m.confidence) for m in skill_matches]}")
            
            logger.info(f"Found {len(skill_matches)} semantic skill matches")
            return skill_matches
            
        except Exception as e:
            logger.error(f"Error in semantic skill matching: {e}")
            return []
    
    def generate_course_justification(self, course: Course, matched_skills: List[str], 
                                    relevance_score: float, department: str) -> str:
        """
        Generate justification for course recommendation
        
        Args:
            course: Course being recommended
            matched_skills: Skills that matched user needs
            relevance_score: Calculated relevance score
            department: Target department
            
        Returns:
            Justification string
        """
        prompt_template = PromptTemplate(
            input_variables=["course_name", "matched_skills", "relevance_score", "department"],
            template="""
            You are an expert training advisor writing course recommendations.
            
            Course: {course_name}
            Matched Skills: {matched_skills}
            Relevance Score: {relevance_score}
            Department: {department}
            
            Write a concise, compelling justification (1-2 sentences) for why this course is recommended.
            
            Instructions:
            1. Focus on practical benefits and skill development
            2. Mention specific matched skills if relevant
            3. Keep it professional and actionable
            4. Highlight value proposition
            
            Justification:
            """
        )
        
        try:
            matched_skills_text = ", ".join(matched_skills) if matched_skills else "general professional development"
            
            formatted_prompt = prompt_template.format(
                course_name=course.course_name,
                matched_skills=matched_skills_text,
                relevance_score=relevance_score,
                department=department
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            justification = response.content.strip()
            # Clean up the response
            justification = justification.replace("Justification:", "").strip()
            
            return justification
            
        except Exception as e:
            logger.error(f"Error generating justification: {e}")
            return f"Recommended for {department} professionals to develop relevant skills."
    
    def _parse_domain_response(self, response_text: str) -> Tuple[bool, float, str]:
        """Parse domain relevance response"""
        lines = response_text.strip().split('\n')
        
        is_match = False
        confidence = 0.0
        justification = "No justification provided"
        
        for line in lines:
            line = line.strip()
            if line.lower().startswith('is_match:'):
                match_text = line.split(':', 1)[1].strip().lower()
                is_match = 'yes' in match_text
            elif line.lower().startswith('confidence:'):
                try:
                    confidence_text = line.split(':', 1)[1].strip()
                    confidence = float(confidence_text)
                except ValueError:
                    confidence = 0.5  # Default if parsing fails
            elif line.lower().startswith('justification:'):
                justification = line.split(':', 1)[1].strip()
        
        return is_match, confidence, justification
    
    def _parse_skills_response(self, response_text: str) -> List[InferredSkill]:
        """Parse skills extraction response"""
        skills = []
        lines = response_text.strip().split('\n')
        
        for line in lines:
            line = line.strip()
            # Remove bullet points and clean up
            line = re.sub(r'^[-•*\d+\.\)]\s*', '', line)
            
            if '|' in line:
                parts = line.split('|')
                if len(parts) >= 2:
                    skill_name = parts[0].strip()
                    skill_level = parts[1].strip()
                    
                    if skill_name and skill_level:
                        skills.append(InferredSkill(
                            skill_name=skill_name,
                            skill_level=skill_level
                        ))
        
        return skills
    
    def _parse_matches_response(self, response_text: str) -> List[SkillMatch]:
        """Parse skill matching response with robust parsing for different formats"""
        matches = []
        lines = response_text.strip().split('\n')
        
        # DEBUG: Log response parsing
        logger.info(f"DEBUG - Parsing response with {len(lines)} lines")
        
        for i, line in enumerate(lines):
            line = line.strip()
            
            # Skip empty lines and headers
            if not line or line.lower() in ['skill matches:', 'matches:', '']:
                continue
            
            try:
                # Method 1: Try the expected format first - "User_Skill: X | Course_Skill: Y | Confidence: Z"
                if 'User_Skill:' in line and 'Course_Skill:' in line and 'Confidence:' in line:
                    parts = line.split('|')
                    if len(parts) >= 3:
                        user_skill = parts[0].split(':', 1)[1].strip()
                        course_skill = parts[1].split(':', 1)[1].strip()
                        confidence_text = parts[2].split(':', 1)[1].strip()
                        # Extract just the number from confidence text
                        confidence_match = re.search(r'(\d+\.?\d*)', confidence_text)
                        if confidence_match:
                            confidence = float(confidence_match.group(1))
                            if confidence > 1.0:  # Convert percentage to decimal
                                confidence = confidence / 100.0
                            self._add_match_if_valid(matches, user_skill, course_skill, confidence)
                
                # Method 2: Try alternative format - "Skill: Course_Skill | Confidence: X"
                elif ':' in line and 'Confidence:' in line and '|' in line:
                    # Split by | to separate skill part from confidence part
                    parts = line.split('|')
                    if len(parts) >= 2:
                        skill_part = parts[0].strip()
                        confidence_part = ''
                        
                        # Find the part with Confidence
                        for part in parts[1:]:
                            if 'confidence' in part.lower():
                                confidence_part = part
                                break
                        
                        # Extract user skill (before first colon)
                        if ':' in skill_part:
                            user_skill = skill_part.split(':', 1)[0].strip()
                            
                            # Extract course skill (after first colon, may have brackets or additional text)
                            course_skill_raw = skill_part.split(':', 1)[1].strip()
                            
                            # Clean course skill - remove brackets, extra text in parentheses
                            course_skill = re.sub(r'\[|\]', '', course_skill_raw)  # Remove brackets
                            course_skill = re.sub(r'\([^)]*\)$', '', course_skill).strip()  # Remove trailing parentheses
                            
                            # Extract confidence
                            confidence_match = re.search(r'(\d+\.?\d*)', confidence_part)
                            if confidence_match:
                                confidence = float(confidence_match.group(1))
                                
                                # Convert percentage to decimal if needed
                                if confidence > 1.0:
                                    confidence = confidence / 100.0
                                
                                self._add_match_if_valid(matches, user_skill, course_skill, confidence)
                                logger.info(f"DEBUG - Parsed alternative format: {user_skill} -> {course_skill} ({confidence})")
                
                # Method 3: Try even more flexible parsing for lines with confidence scores
                elif 'confidence' in line.lower() and ':' in line:
                    # Look for pattern: "SkillA ... SkillB ... Confidence: X"
                    confidence_match = re.search(r'confidence:?\s*(\d+\.?\d*)', line.lower())
                    if confidence_match:
                        confidence = float(confidence_match.group(1))
                        if confidence > 1.0:
                            confidence = confidence / 100.0
                        
                        # Try to extract skills from the beginning of the line
                        colon_parts = line.split(':')
                        if len(colon_parts) >= 2:
                            user_skill = colon_parts[0].strip()
                            # Rest of the line before confidence
                            rest = ':'.join(colon_parts[1:])
                            before_confidence = re.split(r'\|\s*confidence', rest, flags=re.IGNORECASE)[0].strip()
                            course_skill = re.sub(r'\[|\]|\([^)]*\)', '', before_confidence).strip()
                            
                            if user_skill and course_skill:
                                self._add_match_if_valid(matches, user_skill, course_skill, confidence)
                                logger.info(f"DEBUG - Parsed flexible format: {user_skill} -> {course_skill} ({confidence})")
                
            except (ValueError, IndexError) as e:
                logger.warning(f"DEBUG - Failed to parse line: '{line}' - Error: {e}")
                continue
        
        logger.info(f"DEBUG - Final matches count: {len(matches)}")
        return matches
    
    def _add_match_if_valid(self, matches: List[SkillMatch], user_skill: str, course_skill: str, confidence: float):
        """Add a match if it meets criteria"""
        if confidence >= 0.5 and user_skill and course_skill:  # Lowered threshold
            matches.append(SkillMatch(
                user_skill=user_skill,
                course_skill=course_skill,
                confidence=confidence
            ))
            logger.info(f"DEBUG - Added match: {user_skill} -> {course_skill} ({confidence})")
        else:
            logger.info(f"DEBUG - Rejected match: {user_skill} -> {course_skill} ({confidence}) - Below threshold or invalid")

class CourseRecommendationSystem:
    """Main Course Recommendation System"""
    
    def __init__(self, model_name: str = "gemma3:12b", 
                 domain_confidence_threshold: float = 0.6):
        """
        Initialize the course recommendation system
        
        Args:
            model_name: LLM model name
            domain_confidence_threshold: Minimum confidence for domain relevance
        """
        self.course_engine = CourseInferenceEngine(model_name)
        self.domain_threshold = domain_confidence_threshold
        
        logger.info(f"Course Recommendation System initialized with domain threshold: {domain_confidence_threshold}")
    
    def recommend_courses(self, request_data: Dict) -> CourseRecommendationResponse:
        """
        Main method to recommend courses based on skills from Problem Statement 2
        
        Args:
            request_data: Dictionary containing user prompt, department, skills, and courses
            
        Returns:
            CourseRecommendationResponse with recommendations
        """
        try:
            # Parse request
            request = self._parse_request(request_data)
            
            # Extract user skill names for matching
            user_skill_names = [skill.skill_name for skill in request.recommended_skills]
            
            # Process each course through the pipeline
            course_recommendations = []
            domain_relevant_count = 0
            courses_with_matches = 0
            
            for course in request.offline_courses:
                logger.info(f"Processing course: {course.course_name}")
                
                # Step 1: Filter by domain relevance
                is_relevant, domain_confidence, domain_justification = self.course_engine.evaluate_domain_relevance(
                    course, request.department
                )
                
                if is_relevant and domain_confidence >= self.domain_threshold:
                    domain_relevant_count += 1
                    
                    # Step 2: Extract skills taught by course
                    inferred_skills = self.course_engine.extract_course_skills(course)
                    
                    # Step 3: Match skills semantically
                    skill_matches = self.course_engine.match_skills_semantically(
                        user_skill_names, inferred_skills
                    )
                    
                    # Step 4: Calculate relevance score
                    relevance_score = self._calculate_relevance_score(
                        skill_matches, domain_confidence, inferred_skills
                    )
                    
                    # Step 5: Generate justification
                    matched_skill_names = list(set([match.user_skill for match in skill_matches]))
                    justification = self.course_engine.generate_course_justification(
                        course, matched_skill_names, relevance_score, request.department
                    )
                    
                    if skill_matches:  # Only recommend courses with skill matches
                        courses_with_matches += 1
                        
                        course_recommendations.append(CourseRecommendation(
                            course_id=course.course_id,
                            course_name=course.course_name,
                            short_description=course.short_description,
                            description=course.description,
                            matched_skills=matched_skill_names,
                            relevance_score=relevance_score,
                            llm_justification=justification,
                            matched_using="llm",
                            domain_relevance=is_relevant,
                            domain_confidence=domain_confidence,
                            inferred_skills=inferred_skills,
                            skill_matches=skill_matches
                        ))
            
            # Sort by relevance score (descending)
            course_recommendations.sort(key=lambda x: x.relevance_score, reverse=True)
            
            # Generate analysis summary
            analysis_summary = self._generate_analysis_summary(
                len(request.offline_courses),
                domain_relevant_count,
                courses_with_matches,
                len(course_recommendations),
                request.department
            )
            
            return CourseRecommendationResponse(
                recommended_courses=course_recommendations,
                total_courses_analyzed=len(request.offline_courses),
                domain_relevant_courses=domain_relevant_count,
                courses_with_skill_matches=courses_with_matches,
                analysis_summary=analysis_summary
            )
            
        except Exception as e:
            logger.error(f"Error in course recommendation: {e}")
            return CourseRecommendationResponse(
                recommended_courses=[],
                total_courses_analyzed=0,
                domain_relevant_courses=0,
                courses_with_skill_matches=0,
                analysis_summary=f"Error in analysis: {str(e)}"
            )
    
    def _parse_request(self, request_data: Dict) -> CourseRecommendationRequest:
        """Parse request data into CourseRecommendationRequest object"""
        # Parse recommended skills from Problem Statement 2
        recommended_skills = []
        if 'recommended_skills' in request_data:
            for skill_data in request_data['recommended_skills']:
                recommended_skills.append(RecommendedSkill(
                    skill_id=skill_data['skill_id'],
                    skill_name=skill_data['skill_name'],
                    skill_level=skill_data['skill_level'],
                    priority=skill_data['priority'],
                    source=skill_data['source'],
                    justification=skill_data['justification']
                ))
        
        # Parse offline courses
        offline_courses = []
        if 'offline_courses' in request_data:
            for course_data in request_data['offline_courses']:
                offline_courses.append(Course(
                    course_id=course_data['course_id'],
                    course_name=course_data['course_name'],
                    short_description=course_data['short_description'],
                    description=course_data['description'],
                    skills=course_data.get('skills', [])
                ))
        
        return CourseRecommendationRequest(
            user_prompt=request_data['user_prompt'],
            department=request_data['department'],
            recommended_skills=recommended_skills,
            offline_courses=offline_courses
        )
    
    def _calculate_relevance_score(self, skill_matches: List[SkillMatch], 
                                 domain_confidence: float, 
                                 inferred_skills: List[InferredSkill]) -> float:
        """
        Calculate overall relevance score for a course
        
        Args:
            skill_matches: List of skill matches
            domain_confidence: Domain relevance confidence
            inferred_skills: Skills inferred from course
            
        Returns:
            Relevance score between 0.0 and 1.0
        """
        if not skill_matches:
            return 0.0
        
        # Average confidence of skill matches
        avg_skill_confidence = sum(match.confidence for match in skill_matches) / len(skill_matches)
        
        # Bonus for multiple skill matches
        match_count_bonus = min(len(skill_matches) * 0.1, 0.3)
        
        # Bonus for advanced level skills
        level_bonus = 0.0
        for skill in inferred_skills:
            if skill.skill_level.lower() in ['advanced', 'expert']:
                level_bonus += 0.05
        level_bonus = min(level_bonus, 0.2)
        
        # Combine scores
        relevance_score = (
            avg_skill_confidence * 0.6 +  # 60% weight on skill matching
            domain_confidence * 0.3 +     # 30% weight on domain relevance
            match_count_bonus +            # Bonus for multiple matches
            level_bonus                    # Bonus for advanced skills
        )
        
        return min(relevance_score, 1.0)  # Cap at 1.0
    
    def _generate_analysis_summary(self, total_courses: int, domain_relevant: int, 
                                 with_matches: int, recommended: int, department: str) -> str:
        """Generate analysis summary"""
        return (f"Analyzed {total_courses} courses for {department} domain. "
                f"{domain_relevant} courses were domain-relevant, "
                f"{with_matches} had skill matches, "
                f"and {recommended} courses are recommended.")
    
    def process_json_request(self, json_data: str) -> Dict:
        """
        Process JSON request and return JSON response
        
        Args:
            json_data: JSON string with request data
            
        Returns:
            Dictionary with course recommendations
        """
        try:
            request_data = json.loads(json_data)
            response = self.recommend_courses(request_data)
            
            # Convert to JSON-serializable format
            recommendations_json = []
            for rec in response.recommended_courses:
                recommendations_json.append({
                    'course_id': rec.course_id,
                    'course_name': rec.course_name,
                    'short_description': rec.short_description,
                    'description': rec.description,
                    'matched_skills': rec.matched_skills,
                    'relevance_score': round(rec.relevance_score, 4),
                    'llm_justification': rec.llm_justification,
                    'matched_using': rec.matched_using,
                    'domain_relevance': rec.domain_relevance,
                    'domain_confidence': round(rec.domain_confidence, 4),
                    'inferred_skills': [
                        {'skill_name': skill.skill_name, 'skill_level': skill.skill_level}
                        for skill in rec.inferred_skills
                    ],
                    'skill_matches': [
                        {
                            'user_skill': match.user_skill,
                            'course_skill': match.course_skill,
                            'confidence': round(match.confidence, 4)
                        }
                        for match in rec.skill_matches
                    ]
                })
            
            return {
                'success': True,
                'recommended_courses': recommendations_json,
                'total_courses_analyzed': response.total_courses_analyzed,
                'domain_relevant_courses': response.domain_relevant_courses,
                'courses_with_skill_matches': response.courses_with_skill_matches,
                'analysis_summary': response.analysis_summary
            }
            
        except json.JSONDecodeError as e:
            logger.error(f"JSON decode error: {e}")
            return {
                'success': False,
                'error': f'Invalid JSON format: {e}',
                'recommended_courses': []
            }
        except Exception as e:
            logger.error(f"Processing error: {e}")
            return {
                'success': False,
                'error': str(e),
                'recommended_courses': []
            }

# Example usage
def main():
    """Example usage of the Course Recommendation System"""
    
    # Initialize system
    course_system = CourseRecommendationSystem(domain_confidence_threshold=0.6)
    
    # Sample request data (using skills from Problem Statement 2)
    sample_request = {
        "user_prompt": "Want to improve my tech ability",
        "department": "Web Development",
        "recommended_skills": [
            {
                "skill_id": 121,
                "skill_name": "JavaScript",
                "skill_level": "Intermediate",
                "priority": "Medium",
                "source": "organizational_relevant",
                "justification": "Relevant to Web Development field and available in organization"
            },
            {
                "skill_id": 122,
                "skill_name": "React",
                "skill_level": "Intermediate",
                "priority": "Medium",
                "source": "organizational_relevant",
                "justification": "Relevant to Web Development field and available in organization"
            },
            {
                "skill_id": 9000,
                "skill_name": "TypeScript",
                "skill_level": "Beginner",
                "priority": "Low",
                "source": "external",
                "justification": "External Web Development skill to fill capability gap"
            }
        ],
        "offline_courses": [
            {
                "course_id": "73",
                "course_name": "Java Course",
                "short_description": "Java Course",
                "description": "Comprehensive Java programming course covering fundamentals to advanced concepts",
                "skills": ["java", "javascript", "coding"]
            },
            {
                "course_id": "101",
                "course_name": "Advanced JavaScript Development",
                "short_description": "Master modern JavaScript and ES6+ features",
                "description": "Deep dive into JavaScript, covering ES6+, async programming, and modern development practices",
                "skills": ["javascript", "programming"]
            },
            {
                "course_id": "102",
                "course_name": "React Fundamentals",
                "short_description": "Learn React for modern web applications",
                "description": "Complete guide to React including components, hooks, state management, and deployment",
                "skills": ["react", "frontend", "web development"]
            },
            {
                "course_id": "103",
                "course_name": "Digital Marketing Basics",
                "short_description": "Introduction to digital marketing strategies",
                "description": "Learn SEO, social media marketing, and digital advertising fundamentals",
                "skills": ["marketing", "seo", "advertising"]
            }
        ]
    }
    
    # Process request
    print("Processing Course Recommendation request...")
    result = course_system.process_json_request(json.dumps(sample_request))
    
    # Display results
    print("\nCourse Recommendation Results:")
    print(f"Success: {result['success']}")
    print(f"Total Courses Analyzed: {result['total_courses_analyzed']}")
    print(f"Domain Relevant Courses: {result['domain_relevant_courses']}")
    print(f"Courses with Skill Matches: {result['courses_with_skill_matches']}")
    print(f"Analysis Summary: {result['analysis_summary']}")
    
    print("\nRecommended Courses:")
    for i, course in enumerate(result['recommended_courses'], 1):
        print(f"\n{i}. {course['course_name']} (ID: {course['course_id']})")
        print(f"   Relevance Score: {course['relevance_score']}")
        print(f"   Domain Confidence: {course['domain_confidence']}")
        print(f"   Matched Skills: {', '.join(course['matched_skills'])}")
        print(f"   Justification: {course['llm_justification']}")
        print(f"   Inferred Skills: {', '.join([s['skill_name'] for s in course['inferred_skills']])}")
        
        print(f"   Skill Matches:")
        for match in course['skill_matches']:
            print(f"     - {match['user_skill']} → {match['course_skill']} ({match['confidence']:.2f})")

if __name__ == "__main__":
    main()