# tna_core.py - Enhanced Core business logic for TNA system with specificity detection

import json
import logging
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass
from difflib import SequenceMatcher
import re
from datetime import datetime
from langchain_ollama import ChatOllama
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# =====================================
# DATA CLASSES
# =====================================

@dataclass
class Department:
    id: int
    name: str

@dataclass
class TNARequest:
    user_prompt: str
    organization_departments: List[Department]

@dataclass
class TNAResponse:
    department_id: int
    department_name: str
    inferred_department: str
    similarity_score: float
    matched: bool

@dataclass
class Skill:
    skill_id: int
    skill_name: str
    skill_level: Optional[str] = None
    skill_count: Optional[int] = None

@dataclass
class SkillRecommendation:
    skill_id: int
    skill_name: str
    skill_level: str
    priority: str
    source: str
    justification: str

@dataclass
class SkillAnalysisResponse:
    recommended_skills: List[SkillRecommendation]
    analysis_summary: str
    department_coverage: float
    total_skills_recommended: int
    case_applied: str
    identified_field: str
    field_analysis: Optional[Dict] = None

@dataclass
class Course:
    course_id: str
    course_name: str
    short_description: str
    description: str
    skills: List[str]

@dataclass
class InferredSkillInternal:
    skill_name: str
    skill_level: str

@dataclass
class SkillMatchInternal:
    user_skill: str
    course_skill: str
    confidence: float

@dataclass
class CourseRecommendationInternal:
    course_id: str
    course_name: str
    short_description: str
    description: str
    matched_skills: List[str]
    relevance_score: float
    llm_justification: str
    matched_using: str
    domain_relevance: bool
    domain_confidence: float
    inferred_skills: List[InferredSkillInternal]
    skill_matches: List[SkillMatchInternal]

# =====================================
# ENHANCED SKILL INFERENCE ENGINE
# =====================================

class EnhancedSkillInferenceEngine:
    """Enhanced skill inference with specificity-aware analysis"""
    
    def __init__(self, model_name: str = "gemma3:12b"):
        try:
            self.llm = ChatOllama(model=model_name)
            logger.info(f"Initialized Enhanced Skill LLM with model: {model_name}")
        except Exception as e:
            logger.error(f"Failed to initialize Enhanced Skill LLM: {e}")
            raise
    
    def identify_field_domain(self, user_query: str, department_name: str) -> dict:
        """Enhanced field identification with specificity analysis"""
        prompt_template = PromptTemplate(
            input_variables=["user_query", "department_name"],
            template="""
            You are an expert business analyst specializing in identifying professional fields and domains.
            
            User Goal/Challenge: {user_query}
            Department: {department_name}
            
            Analyze the user's goal and provide a detailed field analysis including specificity level.
            
            SPECIFICITY LEVELS:
            - GENERAL: Broad, vague requests like "improve tech skills", "better at development", "enhance coding"
            - SPECIFIC: Domain-focused like "improve frontend skills", "better at web development", "enhance mobile development"  
            - HIGHLY_SPECIFIC: Technology-specific like "improve Angular skills", "better at React development", "enhance AWS cloud skills"
            
            Return your analysis in this EXACT JSON format:
            {{
                "primary_field": "specific field name",
                "specificity_level": "GENERAL|SPECIFIC|HIGHLY_SPECIFIC", 
                "key_technologies": ["tech1", "tech2"],
                "skill_focus": "broad|targeted",
                "learning_approach": "exploration|specialization|mastery"
            }}
            
            Field Analysis:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                user_query=user_query,
                department_name=department_name
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            field_analysis = self._parse_field_analysis(response.content)
            logger.info(f"Enhanced field analysis: {field_analysis}")
            return field_analysis
            
        except Exception as e:
            logger.error(f"Error in enhanced field identification: {e}")
            return {
                "primary_field": "General",
                "specificity_level": "GENERAL",
                "key_technologies": [],
                "skill_focus": "broad",
                "learning_approach": "exploration"
            }
    
    def filter_relevant_organizational_skills(self, field_analysis: dict, 
                                            all_skills_data: List[Dict]) -> List[Dict]:
        """Enhanced skill filtering based on specificity level"""
        primary_field = field_analysis["primary_field"]
        specificity_level = field_analysis["specificity_level"]
        key_technologies = field_analysis["key_technologies"]
        
        skills_text = "\n".join([
            f"- {skill['skill_name']} (ID: {skill['skill_id']})"
            for skill in all_skills_data
        ])
        
        if specificity_level == "HIGHLY_SPECIFIC":
            return self._filter_highly_specific_skills(primary_field, key_technologies, skills_text, all_skills_data)
        elif specificity_level == "SPECIFIC":
            return self._filter_specific_skills(primary_field, skills_text, all_skills_data)
        else:  # GENERAL
            return self._filter_general_skills(primary_field, skills_text, all_skills_data)
    
    def _filter_highly_specific_skills(self, primary_field: str, key_technologies: List[str], 
                                     skills_text: str, all_skills_data: List[Dict]) -> List[Dict]:
        """Filter skills for highly specific technology requests"""
        key_tech_str = ", ".join(key_technologies)
        
        prompt_template = PromptTemplate(
            input_variables=["primary_field", "key_technologies", "skills_text"],
            template="""
            HIGHLY SPECIFIC SKILL FILTERING for {primary_field}
            Target Technologies: {key_technologies}
            
            Available Skills:
            {skills_text}
            
            Since this is a HIGHLY SPECIFIC request focusing on {key_technologies}, filter skills that are:
            1. DIRECTLY related to {key_technologies} 
            2. Essential prerequisites for {key_technologies}
            3. Advanced concepts that build upon {key_technologies}
            
            Be VERY SELECTIVE - only include skills that have clear relevance to {key_technologies}.
            
            Return ONLY the skill names that are relevant, one per line:
            - Skill Name 1
            - Skill Name 2
            
            Relevant Skills for {key_technologies}:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                primary_field=primary_field,
                key_technologies=key_tech_str,
                skills_text=skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            relevant_skill_names = self._parse_skills_from_response(response.content)
            relevant_skills = self._match_skills_to_data(relevant_skill_names, all_skills_data)
            
            logger.info(f"Filtered {len(relevant_skills)} highly specific skills for {key_tech_str}")
            return relevant_skills
            
        except Exception as e:
            logger.error(f"Error in highly specific skill filtering: {e}")
            return []
    
    def _filter_specific_skills(self, primary_field: str, skills_text: str, 
                              all_skills_data: List[Dict]) -> List[Dict]:
        """Filter skills for specific domain requests"""
        prompt_template = PromptTemplate(
            input_variables=["primary_field", "skills_text"],
            template="""
            SPECIFIC SKILL FILTERING for {primary_field}
            
            Available Skills:
            {skills_text}
            
            Since this is a SPECIFIC domain request for {primary_field}, filter skills that are:
            1. Core skills essential to {primary_field}
            2. Common tools and technologies used in {primary_field}
            3. Industry-standard technologies for {primary_field}
            
            Be moderately selective - include skills that are commonly needed in {primary_field}.
            
            Return ONLY the skill names that are relevant, one per line:
            - Skill Name 1
            - Skill Name 2
            
            Relevant Skills for {primary_field}:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                primary_field=primary_field,
                skills_text=skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            relevant_skill_names = self._parse_skills_from_response(response.content)
            relevant_skills = self._match_skills_to_data(relevant_skill_names, all_skills_data)
            
            logger.info(f"Filtered {len(relevant_skills)} specific domain skills for {primary_field}")
            return relevant_skills
            
        except Exception as e:
            logger.error(f"Error in specific skill filtering: {e}")
            return []
    
    def _filter_general_skills(self, primary_field: str, skills_text: str, 
                             all_skills_data: List[Dict]) -> List[Dict]:
        """Filter skills for general broad requests"""
        prompt_template = PromptTemplate(
            input_variables=["primary_field", "skills_text"],
            template="""
            GENERAL SKILL FILTERING for {primary_field}
            
            Available Skills:
            {skills_text}
            
            Since this is a GENERAL request for improving skills in {primary_field}, filter skills that are:
            1. Foundational skills that provide broad value in {primary_field}
            2. Popular tools and technologies in {primary_field}
            3. Skills that provide good career foundation in {primary_field}
            
            Be INCLUSIVE - include skills that could be valuable for someone wanting to improve broadly in {primary_field}.
            
            Return ONLY the skill names that are relevant, one per line:
            - Skill Name 1
            - Skill Name 2
            
            Relevant Skills for {primary_field}:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                primary_field=primary_field,
                skills_text=skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            relevant_skill_names = self._parse_skills_from_response(response.content)
            relevant_skills = self._match_skills_to_data(relevant_skill_names, all_skills_data)
            
            logger.info(f"Filtered {len(relevant_skills)} general foundation skills for {primary_field}")
            return relevant_skills
            
        except Exception as e:
            logger.error(f"Error in general skill filtering: {e}")
            return []
    
    def suggest_external_skills(self, user_query: str, field_analysis: dict, 
                              existing_skills: List[str], needed_count: int) -> List[str]:
        """Enhanced external skill suggestion based on specificity level"""
        primary_field = field_analysis["primary_field"]
        specificity_level = field_analysis["specificity_level"]
        key_technologies = field_analysis["key_technologies"]
        
        existing_skills_text = ", ".join(existing_skills) if existing_skills else "None"
        
        if specificity_level == "HIGHLY_SPECIFIC":
            return self._suggest_highly_specific_external_skills(
                user_query, primary_field, key_technologies, existing_skills_text, needed_count
            )
        elif specificity_level == "SPECIFIC":
            return self._suggest_specific_external_skills(
                user_query, primary_field, existing_skills_text, needed_count
            )
        else:  # GENERAL
            return self._suggest_general_external_skills(
                user_query, primary_field, existing_skills_text, needed_count
            )
    
    def _suggest_highly_specific_external_skills(self, user_query: str, primary_field: str, 
                                               key_technologies: List[str], existing_skills: str, 
                                               needed_count: int) -> List[str]:
        """Suggest external skills for highly specific technology requests"""
        key_tech_str = ", ".join(key_technologies)
        
        prompt_template = PromptTemplate(
            input_variables=["user_query", "primary_field", "key_technologies", "existing_skills", "needed_count"],
            template="""
            User Goal: {user_query}
            Field: {primary_field}
            Target Technologies: {key_technologies}
            Already Covered: {existing_skills}
            
            Suggest {needed_count} ADVANCED external skills specifically for {key_technologies}.
            
            Focus on advanced {key_technologies} concepts, testing tools, and deployment.
            
            Return exactly {needed_count} skills in this format:
            - External Skill 1
            - External Skill 2
            
            Advanced {key_technologies} Skills:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                user_query=user_query,
                primary_field=primary_field,
                key_technologies=key_tech_str,
                existing_skills=existing_skills,
                needed_count=needed_count
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            skills = self._parse_skills_from_response(response.content)
            return skills[:needed_count]
            
        except Exception as e:
            logger.error(f"Error in highly specific external skill suggestion: {e}")
            return []
    
    def _suggest_specific_external_skills(self, user_query: str, primary_field: str, 
                                        existing_skills: str, needed_count: int) -> List[str]:
        """Suggest external skills for specific domain requests"""
        prompt_template = PromptTemplate(
            input_variables=["user_query", "primary_field", "existing_skills", "needed_count"],
            template="""
            User Goal: {user_query}
            Field: {primary_field}
            Already Covered: {existing_skills}
            
            Suggest {needed_count} external skills that are trending in the {primary_field} domain.
            
            Return exactly {needed_count} skills in this format:
            - External Skill 1
            - External Skill 2
            
            Trending {primary_field} Skills:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                user_query=user_query,
                primary_field=primary_field,
                existing_skills=existing_skills,
                needed_count=needed_count
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            skills = self._parse_skills_from_response(response.content)
            return skills[:needed_count]
            
        except Exception as e:
            logger.error(f"Error in specific external skill suggestion: {e}")
            return []
    
    def _suggest_general_external_skills(self, user_query: str, primary_field: str, 
                                       existing_skills: str, needed_count: int) -> List[str]:
        """Suggest external skills for general broad requests"""
        prompt_template = PromptTemplate(
            input_variables=["user_query", "primary_field", "existing_skills", "needed_count"],
            template="""
            User Goal: {user_query}
            Field: {primary_field}
            Already Covered: {existing_skills}
            
            Suggest {needed_count} diverse external skills for a strong foundation in {primary_field}.
            
            Return exactly {needed_count} skills in this format:
            - External Skill 1
            - External Skill 2
            
            Foundation {primary_field} Skills:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                user_query=user_query,
                primary_field=primary_field,
                existing_skills=existing_skills,
                needed_count=needed_count
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            skills = self._parse_skills_from_response(response.content)
            return skills[:needed_count]
            
        except Exception as e:
            logger.error(f"Error in general external skill suggestion: {e}")
            return []
    
    # Helper methods
    def _parse_field_analysis(self, response_content: str) -> dict:
        """Parse field analysis from LLM response"""
        try:
            json_match = re.search(r'\{.*\}', response_content, re.DOTALL)
            if json_match:
                field_analysis = json.loads(json_match.group())
                
                required_fields = ["primary_field", "specificity_level", "key_technologies", "skill_focus", "learning_approach"]
                for field in required_fields:
                    if field not in field_analysis:
                        field_analysis[field] = self._get_default_value(field)
                
                return field_analysis
            else:
                return self._fallback_field_analysis(response_content)
                
        except Exception as e:
            logger.error(f"Error parsing field analysis: {e}")
            return {
                "primary_field": "General",
                "specificity_level": "GENERAL",
                "key_technologies": [],
                "skill_focus": "broad",
                "learning_approach": "exploration"
            }
    
    def _get_default_value(self, field_name: str):
        """Get default values for missing fields"""
        defaults = {
            "primary_field": "General",
            "specificity_level": "GENERAL",
            "key_technologies": [],
            "skill_focus": "broad",
            "learning_approach": "exploration"
        }
        return defaults.get(field_name, "")
    
    def _fallback_field_analysis(self, response_content: str) -> dict:
        """Fallback parsing when JSON extraction fails"""
        content_lower = response_content.lower()
        
        if any(tech in content_lower for tech in ["angular", "react", "vue", "python", "java", "aws", "docker"]):
            specificity = "HIGHLY_SPECIFIC"
            focus = "targeted"
            approach = "mastery"
        elif any(domain in content_lower for domain in ["web development", "mobile development", "data science", "frontend", "backend"]):
            specificity = "SPECIFIC"
            focus = "broad"
            approach = "specialization"
        else:
            specificity = "GENERAL"
            focus = "broad"
            approach = "exploration"
        
        field_name = self._clean_field_name(response_content)
        
        return {
            "primary_field": field_name,
            "specificity_level": specificity,
            "key_technologies": [],
            "skill_focus": focus,
            "learning_approach": approach
        }
    
    def _match_skills_to_data(self, skill_names: List[str], all_skills_data: List[Dict]) -> List[Dict]:
        """Match skill names back to original skill data"""
        relevant_skills = []
        for skill_name in skill_names:
            for org_skill in all_skills_data:
                if self._skills_match(skill_name, org_skill['skill_name']):
                    relevant_skills.append(org_skill)
                    break
        return relevant_skills
    
    def _clean_field_name(self, field_text: str) -> str:
        """Clean and normalize field name"""
        field_text = field_text.strip().split('\n')[0]
        field_text = field_text.replace("Field/Domain:", "").replace("Field:", "").replace("Domain:", "")
        field_text = field_text.strip(' "\'')
        return field_text
    
    def _parse_skills_from_response(self, response_text: str) -> List[str]:
        """Parse skills from LLM response"""
        skills = []
        lines = response_text.strip().split('\n')
        
        for line in lines:
            line = line.strip()
            line = re.sub(r'^[-•*\d+\.\)]\s*', '', line)
            line = line.strip()
            
            if line and len(line) > 2:
                skills.append(line)
        
        return skills
    
    def _skills_match(self, skill1: str, skill2: str) -> bool:
        """Check if two skill names match with reasonable similarity"""
        skill1_norm = re.sub(r'[^a-zA-Z0-9\s]', '', skill1.lower()).strip()
        skill2_norm = re.sub(r'[^a-zA-Z0-9\s]', '', skill2.lower()).strip()
        
        if skill1_norm == skill2_norm:
            return True
        
        similarity = SequenceMatcher(None, skill1_norm, skill2_norm).ratio()
        if similarity > 0.8:
            return True
        
        if skill1_norm in skill2_norm or skill2_norm in skill1_norm:
            return True
        
        return False

# =====================================
# DEPARTMENT INFERENCE ENGINE
# =====================================

class DepartmentInferenceEngine:
    def __init__(self, model_name: str = "gemma3:12b"):
        try:
            self.llm = ChatOllama(model=model_name)
            logger.info(f"Initialized Department LLM with model: {model_name}")
        except Exception as e:
            logger.error(f"Failed to initialize Department LLM: {e}")
            raise
    
    def infer_department(self, user_prompt: str) -> str:
        prompt_template = PromptTemplate(
            input_variables=["user_prompt"],
            template="""
            You are an expert business analyst specializing in organizational structure and training needs.
            
            Based on the following business goal or challenge, identify the SINGLE most relevant department.
            
            User Goal/Challenge: {user_prompt}
            
            Common departments include:
            - Sales
            - Marketing
            - HR (Human Resources)
            - Operations
            - Finance
            - Product Management
            - Development/Engineering
            - Customer Service
            - Quality Assurance
            - Research and Development
            
            Return ONLY the department name (e.g., "Sales", "Marketing", "HR")
            
            Department:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(user_prompt=user_prompt)
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            inferred_dept = response.content.strip()
            inferred_dept = self._clean_department_name(inferred_dept)
            
            logger.info(f"Inferred department: {inferred_dept}")
            return inferred_dept
            
        except Exception as e:
            logger.error(f"Error in department inference: {e}")
            return "Unknown"
    
    def _clean_department_name(self, dept_name: str) -> str:
        dept_name = dept_name.replace("Department:", "").replace("Department", "")
        dept_name = dept_name.replace("Team:", "").replace("Team", "")
        dept_name = dept_name.split('\n')[0]
        dept_name = dept_name.strip(' "\'')
        return dept_name

class SimilarityMatcher:
    @staticmethod
    def calculate_similarity(str1: str, str2: str) -> float:
        str1_norm = SimilarityMatcher._normalize_string(str1)
        str2_norm = SimilarityMatcher._normalize_string(str2)
        
        seq_similarity = SequenceMatcher(None, str1_norm, str2_norm).ratio()
        exact_match = 1.0 if str1_norm == str2_norm else 0.0
        substring_match = 0.9 if (str1_norm in str2_norm or str2_norm in str1_norm) else 0.0
        
        words1 = set(str1_norm.split())
        words2 = set(str2_norm.split())
        word_similarity = len(words1.intersection(words2)) / len(words1.union(words2)) if words1 and words2 else 0.0
        
        final_score = max(seq_similarity, exact_match, substring_match, word_similarity)
        return final_score
    
    @staticmethod
    def _normalize_string(s: str) -> str:
        s = s.lower()
        s = re.sub(r'[^a-zA-Z0-9\s]', '', s)
        s = re.sub(r'\s+', ' ', s)
        
        replacements = {
            'human resources': 'hr',
            'human resource': 'hr',
            'information technology': 'it',
            'research and development': 'rd',
            'research & development': 'rd',
            'customer service': 'customer support',
            'customer care': 'customer support',
            'product management': 'product',
            'product manager': 'product',
            'software development': 'development',
            'software engineering': 'development',
            'engineering': 'development',
            'programmer': 'developer',
            'programming': 'development'
        }
        
        for old, new in replacements.items():
            s = s.replace(old, new)
        
        return s.strip()

# =====================================
# COURSE INFERENCE ENGINE
# =====================================

class CourseInferenceEngine:
    def __init__(self, model_name: str = "gemma3:12b"):
        try:
            self.llm = ChatOllama(model=model_name)
            logger.info(f"Initialized Course LLM with model: {model_name}")
        except Exception as e:
            logger.error(f"Failed to initialize Course LLM: {e}")
            raise
    
    def evaluate_domain_relevance(self, course: Course, department: str) -> Tuple[bool, float, str]:
        prompt_template = PromptTemplate(
            input_variables=["course_name", "short_description", "description", "department"],
            template="""
            Course Information:
            - Course Name: {course_name}
            - Short Description: {short_description}
            - Full Description: {description}
            
            Target Department/Domain: {department}
            
            STRICT EVALUATION RULES:
            1. Return "No" if course is generic (demo, test, sample, author courses)
            2. Return "No" if course has no clear domain-specific content
            3. Only return "Yes" if course teaches SPECIFIC skills relevant to {department}
            
            For {department} domain, look for:
            - Technical skills, programming, development tools (if Developer/Technical)
            - Sales techniques, client management, revenue (if Sales)
            - HR processes, recruitment, management (if HR)
            
            Is this course specifically relevant and valuable for {department} professionals?
            
            Return EXACTLY this format:
            Is_Match: [Yes/No]
            Confidence: [0.0 to 1.0]
            Justification: [One clear sentence explaining your decision]
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                course_name=course.course_name,
                short_description=course.short_description,
                description=course.description,
                department=department
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            is_match, confidence, justification = self._parse_domain_response(response.content)
            logger.info(f"Domain relevance for '{course.course_name}': {is_match} (confidence: {confidence:.2f})")
            return is_match, confidence, justification
            
        except Exception as e:
            logger.error(f"Error in domain relevance evaluation: {e}")
            return False, 0.0, "Error in evaluation"
    
    def extract_course_skills(self, course: Course) -> List[InferredSkillInternal]:
        prompt_template = PromptTemplate(
            input_variables=["course_name", "short_description", "description", "existing_skills"],
            template="""
            Course Information:
            - Course Name: {course_name}
            - Short Description: {short_description}
            - Full Description: {description}
            - Existing Skill Tags: {existing_skills}
            
            Based on the course content, identify 3-5 key skills that learners will likely develop or improve.
            
            Return skills in the following format:
            - Skill Name 1 | Skill Level
            - Skill Name 2 | Skill Level
            - Skill Name 3 | Skill Level
            
            Skills:
            """
        )
        
        try:
            existing_skills_text = ", ".join(course.skills) if course.skills else "None specified"
            
            formatted_prompt = prompt_template.format(
                course_name=course.course_name,
                short_description=course.short_description,
                description=course.description,
                existing_skills=existing_skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            inferred_skills = self._parse_skills_response(response.content)
            logger.info(f"Extracted {len(inferred_skills)} skills from '{course.course_name}'")
            return inferred_skills
            
        except Exception as e:
            logger.error(f"Error in skill extraction: {e}")
            return []
    
    def match_skills_semantically(self, user_skills: List[str], 
                                course_skills: List[InferredSkillInternal]) -> List[SkillMatchInternal]:
        if not user_skills or not course_skills:
            return []
        
        user_skills_text = ", ".join(user_skills)
        course_skills_text = ", ".join([f"{cs.skill_name} ({cs.skill_level})" for cs in course_skills])
        
        prompt_template = PromptTemplate(
            input_variables=["user_skills", "course_skills"],
            template="""
            User-Needed Skills: {user_skills}
            Course-Taught Skills: {course_skills}
            
            Match semantically related skill pairs between user needs and course offerings.
            
            Return matches in the following format:
            User_Skill: [skill name] | Course_Skill: [skill name] | Confidence: [0.0-1.0]
            
            Skill Matches:
            """
        )
        
        try:
            formatted_prompt = prompt_template.format(
                user_skills=user_skills_text,
                course_skills=course_skills_text
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            skill_matches = self._parse_matches_response(response.content)
            logger.info(f"Found {len(skill_matches)} semantic skill matches")
            return skill_matches
            
        except Exception as e:
            logger.error(f"Error in semantic skill matching: {e}")
            return []
    
    def generate_course_justification(self, course: Course, matched_skills: List[str], 
                                    relevance_score: float, department: str) -> str:
        prompt_template = PromptTemplate(
            input_variables=["course_name", "matched_skills", "relevance_score", "department"],
            template="""
            Course: {course_name}
            Matched Skills: {matched_skills}
            Relevance Score: {relevance_score}
            Department: {department}
            
            Write a concise justification (1-2 sentences) for why this course is recommended.
            
            Justification:
            """
        )
        
        try:
            matched_skills_text = ", ".join(matched_skills) if matched_skills else "general professional development"
            
            formatted_prompt = prompt_template.format(
                course_name=course.course_name,
                matched_skills=matched_skills_text,
                relevance_score=relevance_score,
                department=department
            )
            response = self.llm.invoke([HumanMessage(content=formatted_prompt)])
            
            justification = response.content.strip()
            justification = justification.replace("Justification:", "").strip()
            
            return justification
            
        except Exception as e:
            logger.error(f"Error generating justification: {e}")
            return f"Recommended for {department} professionals to develop relevant skills."
    
    def _parse_domain_response(self, response_text: str) -> Tuple[bool, float, str]:
        lines = response_text.strip().split('\n')
        
        is_match = False
        confidence = 0.0
        justification = "No justification provided"
        
        for line in lines:
            line = line.strip()
            if line.lower().startswith('is_match:'):
                match_text = line.split(':', 1)[1].strip().lower()
                is_match = 'yes' in match_text
            elif line.lower().startswith('confidence:'):
                try:
                    confidence_text = line.split(':', 1)[1].strip()
                    confidence = float(confidence_text)
                except ValueError:
                    confidence = 0.5
            elif line.lower().startswith('justification:'):
                justification = line.split(':', 1)[1].strip()
        
        return is_match, confidence, justification
    
    def _parse_skills_response(self, response_text: str) -> List[InferredSkillInternal]:
        skills = []
        lines = response_text.strip().split('\n')
        
        for line in lines:
            line = line.strip()
            line = re.sub(r'^[-•*\d+\.\)]\s*', '', line)
            
            if '|' in line:
                parts = line.split('|')
                if len(parts) >= 2:
                    skill_name = parts[0].strip()
                    skill_level = parts[1].strip()
                    
                    if skill_name and skill_level:
                        skills.append(InferredSkillInternal(
                            skill_name=skill_name,
                            skill_level=skill_level
                        ))
        
        return skills
    
    def _parse_matches_response(self, response_text: str) -> List[SkillMatchInternal]:
        matches = []
        lines = response_text.strip().split('\n')
        
        for line in lines:
            line = line.strip()
            
            if not line or line.lower() in ['skill matches:', 'matches:', '']:
                continue
            
            try:
                if 'User_Skill:' in line and 'Course_Skill:' in line and 'Confidence:' in line:
                    parts = line.split('|')
                    if len(parts) >= 3:
                        user_skill = parts[0].split(':', 1)[1].strip()
                        course_skill = parts[1].split(':', 1)[1].strip()
                        confidence_text = parts[2].split(':', 1)[1].strip()
                        confidence_match = re.search(r'(\d+\.?\d*)', confidence_text)
                        if confidence_match:
                            confidence = float(confidence_match.group(1))
                            if confidence > 1.0:
                                confidence = confidence / 100.0
                            if confidence >= 0.5 and user_skill and course_skill:
                                matches.append(SkillMatchInternal(
                                    user_skill=user_skill,
                                    course_skill=course_skill,
                                    confidence=confidence
                                ))
                
            except (ValueError, IndexError) as e:
                logger.warning(f"Failed to parse line: '{line}' - Error: {e}")
                continue
        
        return matches

# =====================================
# ENHANCED INTEGRATED TNA SYSTEM
# =====================================

class EnhancedIntegratedTNASystem:
    def __init__(self, model_name: str = "gemma3:12b"):
        self.dept_engine = DepartmentInferenceEngine(model_name)
        self.similarity_matcher = SimilarityMatcher()
        self.skill_engine = EnhancedSkillInferenceEngine(model_name)
        self.course_engine = CourseInferenceEngine(model_name)
        self.similarity_threshold = 0.96
        self.domain_threshold = 0.6
        self.min_skills_target = 15
        
        logger.info("Enhanced Integrated TNA System initialized with specificity detection")
    
    def process_department_inference(self, user_prompt: str, organization_departments: List[Department]) -> TNAResponse:
        """Process department inference"""
        try:
            tna_request = TNARequest(user_prompt, organization_departments)
            
            inferred_department = self.dept_engine.infer_department(tna_request.user_prompt)
            best_match = self._find_best_match(inferred_department, tna_request.organization_departments)
            
            if best_match['similarity_score'] >= self.similarity_threshold:
                return TNAResponse(
                    department_id=best_match['department'].id,
                    department_name=best_match['department'].name,
                    inferred_department=inferred_department,
                    similarity_score=best_match['similarity_score'],
                    matched=True
                )
            else:
                return TNAResponse(
                    department_id=0,
                    department_name="Null",
                    inferred_department=inferred_department,
                    similarity_score=best_match['similarity_score'],
                    matched=False
                )
                
        except Exception as e:
            logger.error(f"Error in department inference: {e}")
            return TNAResponse(
                department_id=0,
                department_name="Error",
                inferred_department="Error",
                similarity_score=0.0,
                matched=False
            )
    
    def process_skill_and_course_analysis(self, user_query: str, admin_name: str, department_name: str,
                                    filtered_skills: List[Skill], all_skills_data: List[Dict],
                                    offline_courses: List[Course]) -> Tuple[str, List[CourseRecommendationInternal]]:
        """Enhanced skill analysis and course recommendation with specificity awareness"""
        try:
            skill_analysis_response = self._analyze_skills_enhanced(
                user_query,
                admin_name,
                department_name,
                filtered_skills,
                all_skills_data
            )
            
            skill_analysis_strategic = self._generate_strategic_skill_presentation_enhanced(
                skill_analysis_response,
                user_query,
                admin_name,
                department_name
            )
            
            course_recommendations = self._recommend_courses(
                user_query,
                skill_analysis_response.identified_field,
                skill_analysis_response.recommended_skills,
                offline_courses
            )
            
            return skill_analysis_strategic, course_recommendations
            
        except Exception as e:
            logger.error(f"Error in enhanced skill and course analysis: {e}")
            return f"Error in analysis: {str(e)}", []
    
    def _analyze_skills_enhanced(self, user_query: str, admin_name: str, department_name: str,
                               filtered_skills: List[Skill], all_skills_data: List[Dict]) -> SkillAnalysisResponse:
        """Enhanced skill analysis using specificity-aware approach"""
        
        field_analysis = self.skill_engine.identify_field_domain(
            user_query, 
            department_name or "General"
        )
        
        relevant_org_skills = self.skill_engine.filter_relevant_organizational_skills(
            field_analysis, 
            all_skills_data
        )
        
        if (department_name and 
            department_name.lower() != "null" and 
            len(filtered_skills) > 3):
            
            return self._handle_case_1_enhanced(user_query, admin_name, department_name,
                                              filtered_skills, field_analysis, relevant_org_skills)
        else:
            return self._handle_case_2_enhanced(user_query, admin_name, department_name,
                                              filtered_skills, field_analysis, relevant_org_skills)
    
    def _handle_case_1_enhanced(self, user_query: str, admin_name: str, department_name: str,
                               filtered_skills: List[Skill], field_analysis: dict, 
                               relevant_org_skills: List[Dict]) -> SkillAnalysisResponse:
        """Handle Case 1 with enhanced specificity-aware approach"""
        logger.info("Applying Case 1 (Enhanced): Department found with sufficient employees")
        
        identified_field = field_analysis["primary_field"]
        specificity_level = field_analysis["specificity_level"]
        
        recommendations = []
        used_skill_ids = set()
        
        # PRIORITY 1 (HIGH): Under-covered department skills
        under_covered_dept_skills = self._filter_department_skills(filtered_skills, 0.5)
        
        for dept_skill in under_covered_dept_skills:
            if dept_skill.skill_id not in used_skill_ids:
                recommendations.append(SkillRecommendation(
                    skill_id=dept_skill.skill_id,
                    skill_name=dept_skill.skill_name,
                    skill_level=dept_skill.skill_level or "Intermediate",
                    priority="High",
                    source="department_existing",
                    justification=f"Under-covered in department: {dept_skill.skill_count} employees out of {max([s.skill_count for s in filtered_skills])}"
                ))
                used_skill_ids.add(dept_skill.skill_id)
        
        # PRIORITY 2 (MEDIUM): Field-relevant organizational skills
        for org_skill in relevant_org_skills:
            if org_skill['skill_id'] not in used_skill_ids:
                recommendations.append(SkillRecommendation(
                    skill_id=org_skill['skill_id'],
                    skill_name=org_skill['skill_name'],
                    skill_level="Intermediate",
                    priority="Medium",
                    source="organizational_relevant",
                    justification=f"Relevant to {identified_field} field ({specificity_level.lower()} level) and available in organization"
                ))
                used_skill_ids.add(org_skill['skill_id'])
        
        # PRIORITY 3 (LOW): External skills if needed
        if len(recommendations) < self.min_skills_target:
            needed_count = self.min_skills_target - len(recommendations)
            existing_skill_names = [rec.skill_name for rec in recommendations]
            
            external_skills = self.skill_engine.suggest_external_skills(
                user_query,
                field_analysis,
                existing_skill_names,
                needed_count
            )
            
            for i, ext_skill in enumerate(external_skills):
                recommendations.append(SkillRecommendation(
                    skill_id=9000 + i,
                    skill_name=ext_skill,
                    skill_level="Beginner",
                    priority="Low",
                    source="external",
                    justification=f"External {specificity_level.lower()} skill for {identified_field} to enhance capabilities"
                ))
        
        dept_coverage = len(under_covered_dept_skills) / len(filtered_skills) if filtered_skills else 0
        
        return SkillAnalysisResponse(
            recommended_skills=recommendations[:self.min_skills_target],
            analysis_summary=f"Enhanced analysis for {department_name} department in {identified_field} field ({specificity_level}). Found {len(relevant_org_skills)} relevant organizational skills.",
            department_coverage=dept_coverage,
            total_skills_recommended=len(recommendations[:self.min_skills_target]),
            case_applied="Case 1 (Enhanced): Department found with sufficient employees",
            identified_field=identified_field,
            field_analysis=field_analysis
        )
    
    def _handle_case_2_enhanced(self, user_query: str, admin_name: str, department_name: str,
                               filtered_skills: List[Skill], field_analysis: dict, 
                               relevant_org_skills: List[Dict]) -> SkillAnalysisResponse:
        """Handle Case 2 with enhanced specificity-aware approach"""
        logger.info("Applying Case 2 (Enhanced): Department not found or insufficient employees")
        
        identified_field = field_analysis["primary_field"]
        specificity_level = field_analysis["specificity_level"]
        
        recommendations = []
        used_skill_ids = set()
        
        # PRIORITY 1 (MEDIUM): Field-relevant organizational skills
        for org_skill in relevant_org_skills:
            if org_skill['skill_id'] not in used_skill_ids:
                recommendations.append(SkillRecommendation(
                    skill_id=org_skill['skill_id'],
                    skill_name=org_skill['skill_name'],
                    skill_level="Intermediate",
                    priority="Medium",
                    source="organizational_relevant",
                    justification=f"Relevant to {identified_field} field ({specificity_level.lower()} level) and available in organization"
                ))
                used_skill_ids.add(org_skill['skill_id'])
        
        # PRIORITY 2 (LOW): External skills if needed
        if len(recommendations) < self.min_skills_target:
            needed_count = self.min_skills_target - len(recommendations)
            existing_skill_names = [rec.skill_name for rec in recommendations]
            
            external_skills = self.skill_engine.suggest_external_skills(
                user_query,
                field_analysis,
                existing_skill_names,
                needed_count
            )
            
            for i, ext_skill in enumerate(external_skills):
                recommendations.append(SkillRecommendation(
                    skill_id=9000 + i,
                    skill_name=ext_skill,
                    skill_level="Beginner",
                    priority="Low",
                    source="external",
                    justification=f"External {specificity_level.lower()} skill for {identified_field} to fill capability gap"
                ))
        
        return SkillAnalysisResponse(
            recommended_skills=recommendations[:self.min_skills_target],
            analysis_summary=f"Enhanced general analysis for {identified_field} field ({specificity_level}). Found {len(relevant_org_skills)} relevant organizational skills.",
            department_coverage=0.0,
            total_skills_recommended=len(recommendations[:self.min_skills_target]),
            case_applied="Case 2 (Enhanced): Department not found or insufficient employees",
            identified_field=identified_field,
            field_analysis=field_analysis
        )
    
    def _generate_strategic_skill_presentation_enhanced(self, analysis_response: SkillAnalysisResponse,
                                                       user_query: str, admin_name: str, 
                                                       department_name: str) -> str:
        """Generate enhanced strategic presentation with specificity information"""
        org_skills = [s for s in analysis_response.recommended_skills if s.source == 'organizational_relevant']
        ext_skills = [s for s in analysis_response.recommended_skills if s.source == 'external']
        dept_skills = [s for s in analysis_response.recommended_skills if s.source == 'department_existing']
        
        org_percentage = (len(org_skills) / len(analysis_response.recommended_skills)) * 100 if analysis_response.recommended_skills else 0
        current_date = datetime.now().strftime("%B %d, %Y")
        
        field_analysis = analysis_response.field_analysis or {}
        specificity_level = field_analysis.get("specificity_level", "GENERAL")
        key_technologies = field_analysis.get("key_technologies", [])
        learning_approach = field_analysis.get("learning_approach", "exploration")
        
        tech_focus_text = f" (Focus: {', '.join(key_technologies)})" if key_technologies else ""
        
        strategic_report = f"""
📊 ENHANCED TRAINING NEEDS ANALYSIS REPORT
═══════════════════════════════════════════════════════════════

Organization: {admin_name}
Department: {department_name}
Analysis Date: {current_date}
Field Identified: {analysis_response.identified_field}{tech_focus_text}

🎯 EXECUTIVE SUMMARY
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

User Goal: "{user_query}"
AI-Identified Field: {analysis_response.identified_field}
Specificity Level: {specificity_level}
Learning Approach: {learning_approach.title()}
Case Applied: {analysis_response.case_applied}

Key Findings:
• {len(org_skills)} organizational skills aligned with business goal
• {len(ext_skills)} external skills identified for capability enhancement  
• {org_percentage:.1f}% utilization of existing organizational assets
• {specificity_level.lower().replace('_', '-')} learning pathway identified

🧠 SPECIFICITY ANALYSIS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

Request Type: {specificity_level}
├─ Field Focus: {analysis_response.identified_field}
├─ Learning Approach: {learning_approach.title()}
├─ Skill Selection: {"Targeted technology-specific" if specificity_level == "HIGHLY_SPECIFIC" else "Domain-focused" if specificity_level == "SPECIFIC" else "Broad foundational"}
└─ Key Technologies: {", ".join(key_technologies) if key_technologies else "General skills across the field"}

🟢 IMMEDIATE PRIORITIES (Organizational Skills)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
{self._format_skill_section(org_skills + dept_skills)}

🟡 GROWTH OPPORTUNITIES (External Skills)
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
{self._format_skill_section(ext_skills)}

═══════════════════════════════════════════════════════════════
Report Generated: {current_date}
Analysis Engine: Enhanced TNA v3.0 (Specificity-Aware Approach)
═══════════════════════════════════════════════════════════════
"""
        
        return strategic_report
    
    def _find_best_match(self, inferred_dept: str, org_departments: List[Department]) -> Dict:
        """Find the best matching department from organization's departments"""
        best_match = {
            'department': None,
            'similarity_score': 0.0
        }
        
        for dept in org_departments:
            similarity_score = self.similarity_matcher.calculate_similarity(
                inferred_dept, dept.name
            )
            
            if similarity_score > best_match['similarity_score']:
                best_match['department'] = dept
                best_match['similarity_score'] = similarity_score
        
        return best_match
    
    def _filter_department_skills(self, filtered_skills: List[Skill], coverage_threshold: float = 0.5) -> List[Skill]:
        """Filter out skills that are already well-covered in the department"""
        if not filtered_skills:
            return []
        
        total_employees = max([skill.skill_count for skill in filtered_skills], default=1)
        
        under_covered_skills = []
        for skill in filtered_skills:
            coverage_ratio = skill.skill_count / total_employees
            if coverage_ratio < coverage_threshold:
                under_covered_skills.append(skill)
        
        return under_covered_skills
    
    def _recommend_courses(self, user_prompt: str, identified_field: str, 
                          recommended_skills: List[SkillRecommendation],
                          offline_courses: List[Course]) -> List[CourseRecommendationInternal]:
        """Recommend courses using enhanced logic"""
        
        user_skill_names = [skill.skill_name for skill in recommended_skills]
        course_recommendations = []
        
        for course in offline_courses:
            logger.info(f"Processing course: {course.course_name}")
            
            is_relevant, domain_confidence, domain_justification = self.course_engine.evaluate_domain_relevance(
                course, identified_field
            )
            
            if is_relevant and domain_confidence >= self.domain_threshold:
                inferred_skills = self.course_engine.extract_course_skills(course)
                
                skill_matches = self.course_engine.match_skills_semantically(
                    user_skill_names, inferred_skills
                )
                
                relevance_score = self._calculate_relevance_score(
                    skill_matches, domain_confidence, inferred_skills
                )
                
                matched_skill_names = list(set([match.user_skill for match in skill_matches]))
                justification = self.course_engine.generate_course_justification(
                    course, matched_skill_names, relevance_score, identified_field
                )
                
                if skill_matches:
                    course_recommendations.append(CourseRecommendationInternal(
                        course_id=course.course_id,
                        course_name=course.course_name,
                        short_description=course.short_description,
                        description=course.description,
                        matched_skills=matched_skill_names,
                        relevance_score=relevance_score,
                        llm_justification=justification,
                        matched_using="llm",
                        domain_relevance=is_relevant,
                        domain_confidence=domain_confidence,
                        inferred_skills=inferred_skills,
                        skill_matches=skill_matches
                    ))
        
        course_recommendations.sort(key=lambda x: x.relevance_score, reverse=True)
        return course_recommendations
    
    def _calculate_relevance_score(self, skill_matches: List[SkillMatchInternal], 
                                 domain_confidence: float, 
                                 inferred_skills: List[InferredSkillInternal]) -> float:
        """Calculate overall relevance score for a course"""
        if not skill_matches:
            return 0.0
        
        avg_skill_confidence = sum(match.confidence for match in skill_matches) / len(skill_matches)
        match_count_bonus = min(len(skill_matches) * 0.1, 0.3)
        
        level_bonus = 0.0
        for skill in inferred_skills:
            if skill.skill_level.lower() in ['advanced', 'expert']:
                level_bonus += 0.05
        level_bonus = min(level_bonus, 0.2)
        
        relevance_score = (
            avg_skill_confidence * 0.6 +
            domain_confidence * 0.3 +
            match_count_bonus +
            level_bonus
        )
        
        return min(relevance_score, 1.0)
    
    def _format_skill_section(self, skills: List[SkillRecommendation]) -> str:
        """Format skills section for strategic presentation"""
        if not skills:
            return "No skills identified in this category."
        
        formatted_skills = []
        for i, skill in enumerate(skills, 1):
            formatted_skills.append(f"{i:2d}. {skill.skill_name} ({skill.skill_level})")
        
        return "\n".join(formatted_skills)

# =====================================
# CONVENIENCE FUNCTIONS
# =====================================

def create_enhanced_tna_system(model_name: str = "gemma3:12b") -> EnhancedIntegratedTNASystem:
    """Factory function to create enhanced TNA system"""
    return EnhancedIntegratedTNASystem(model_name)

def process_department_analysis(tna_system: EnhancedIntegratedTNASystem, user_prompt: str, 
                               departments: List[Dict]) -> Dict:
    """Convenience function for department analysis"""
    dept_objects = [Department(id=d['id'], name=d['name']) for d in departments]
    result = tna_system.process_department_inference(user_prompt, dept_objects)
    
    return {
        'identified_department': result.department_name,
        'department_id': result.department_id
    }

def process_enhanced_skills_and_courses_analysis(tna_system: EnhancedIntegratedTNASystem, user_query: str,
                                               admin_name: str, department_name: str,
                                               filtered_skills: List[Dict], all_skills: List[Dict],
                                               courses: List[Dict]) -> Dict:
    """Enhanced convenience function for skills and courses analysis"""
    
    skill_objects = [
        Skill(
            skill_id=s['skill_id'],
            skill_name=s['skill_name'],
            skill_level=s.get('skill_level'),
            skill_count=s.get('skill_count')
        ) for s in filtered_skills
    ]
    
    course_objects = [
        Course(
            course_id=c['course_id'],
            course_name=c['course_name'],
            short_description=c['short_description'],
            description=c['description'],
            skills=c.get('skills', [])
        ) for c in courses
    ]
    
    strategic_analysis, course_recommendations = tna_system.process_skill_and_course_analysis(
        user_query, admin_name, department_name, skill_objects, all_skills, course_objects
    )
    
    courses_offline = []
    for course_rec in course_recommendations:
        courses_offline.append({
            'course_id': course_rec.course_id,
            'name': course_rec.course_name,
            'matched_skills': course_rec.matched_skills,
            'justification': course_rec.llm_justification,
            'relevance_score': course_rec.relevance_score,
            'inferred_skills': [
                {'skill_name': skill.skill_name, 'skill_level': skill.skill_level}
                for skill in course_rec.inferred_skills
            ],
            'skill_matches': [
                {
                    'user_skill': match.user_skill,
                    'course_skill': match.course_skill,
                    'confidence': match.confidence
                } for match in course_rec.skill_matches
            ]
        })
    
    return {
        'skill_analysis': strategic_analysis,
        'courses_offline': courses_offline
    }

# =====================================
# BACKWARD COMPATIBILITY
# =====================================

IntegratedTNASystem = EnhancedIntegratedTNASystem
create_tna_system = create_enhanced_tna_system
process_skills_and_courses_analysis = process_enhanced_skills_and_courses_analysis

# =====================================
# EXAMPLE USAGE
# =====================================

if __name__ == "__main__":
    tna = create_enhanced_tna_system()
    
    print("="*80)
    print("ENHANCED TNA SYSTEM DEMONSTRATION")
    print("="*80)
    
    # Test general query
    general_result = process_enhanced_skills_and_courses_analysis(
        tna,
        "Want to improve my tech ability",
        "Ascent",
        "Developer",
        [
            {"skill_id": 120, "skill_name": "Python", "skill_level": "Intermediate", "skill_count": 6},
            {"skill_id": 1442, "skill_name": "Emotional Intelligence", "skill_level": "Advanced", "skill_count": 3}
        ],
        [
            {"skill_id": 121, "skill_name": "JavaScript"},
            {"skill_id": 122, "skill_name": "React"},
            {"skill_id": 123, "skill_name": "Node.js"}
        ],
        [
            {
                "course_id": "101",
                "course_name": "Full Stack Development",
                "short_description": "Complete web development course",
                "description": "Learn HTML, CSS, JavaScript, and backend",
                "skills": ["html", "css", "javascript"]
            }
        ]
    )
    
    print("GENERAL QUERY RESULT:")
    print(general_result['skill_analysis'][:500] + "...")
    
    # Test specific query  
    specific_result = process_enhanced_skills_and_courses_analysis(
        tna,
        "Want to improve my Angular skills",
        "Ascent", 
        "Developer",
        [
            {"skill_id": 120, "skill_name": "Python", "skill_level": "Intermediate", "skill_count": 6}
        ],
        [
            {"skill_id": 121, "skill_name": "JavaScript"},
            {"skill_id": 122, "skill_name": "TypeScript"},
            {"skill_id": 123, "skill_name": "Angular"}
        ],
        [
            {
                "course_id": "201",
                "course_name": "Advanced Angular",
                "short_description": "Master Angular framework",
                "description": "Deep dive into Angular and TypeScript",
                "skills": ["angular", "typescript"]
            }
        ]
    )
    
    print("\nSPECIFIC QUERY RESULT:")
    print(specific_result['skill_analysis'][:500] + "...")
    
    print("\n" + "="*80)
    print("ENHANCEMENT SUCCESS!")
    print("="*80)