import ollama
import json
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional
import random
import asyncio
import re
import hashlib
from dataclasses import dataclass
from enum import Enum

class ConversationStage(Enum):
    INITIALIZATION = "initialization"
    RAPPORT_BUILDING = "rapport_building"
    CHALLENGE_INTRODUCTION = "challenge_introduction"
    SKILL_DEVELOPMENT = "skill_development"
    ADVANCED_SCENARIOS = "advanced_scenarios"
    CLOSURE = "closure"

@dataclass
class SessionContext:
    session_id: int
    client_id: str
    user_type: str
    roleplay_category: str
    conversation_stage: ConversationStage
    turn_count: int
    created_at: datetime
    last_activity: datetime
    user_performance_score: float
    key_topics_covered: List[str]
    learning_objectives_met: List[str]

class RoleplayProcessor:
    def __init__(self):
        self.model_name = "gemma3:12b"
        self.client = ollama.AsyncClient()
        self.session_storage: Dict[int, SessionContext] = {}
        self.conversation_memory: Dict[int, List[Dict[str, Any]]] = {}

    async def process_roleplay(
        self,
        client_id: str,
        user_type: str,
        session_id: Optional[int],
        org_knowledge_base: Dict[str, Any],
        roleplay_data: Dict[str, Any],
        skills_for_roleplay: List[Dict[str, Any]],
        is_suggested_responses: int,
        previous_memory: List[str],
        query: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        Enhanced roleplay processing with conditional suggested responses
        """
        try:
            # Handle session management
            final_session_id, session_context = self._manage_session(
                client_id, user_type, session_id, previous_memory, roleplay_data
            )

            # Process organization and roleplay data
            org_info = self._extract_organization_info(org_knowledge_base)
            roleplay_info = self._extract_roleplay_info(roleplay_data)

            # Determine conversation stage and adjust behavior
            conversation_stage = self._determine_conversation_stage(
                session_context, previous_memory, query
            )

            # Build contextual memory representation
            memory_context = self._build_memory_context(
                final_session_id, previous_memory, query
            )

            # Generate advanced prompt with memory integration and skills
            context_prompt = self._build_enhanced_context_prompt(
                org_info, roleplay_info, user_type, memory_context,
                conversation_stage, session_context, skills_for_roleplay
            )

            # Generate intelligent response
            response = await self._generate_contextual_response(
                context_prompt, roleplay_info, conversation_stage, query
            )

            # Generate suggested responses based on is_suggested_responses flag
            suggested_responses = []
            if is_suggested_responses == 1:
                suggested_responses = await self._generate_suggested_responses(
                    response, user_type, roleplay_info, org_info, conversation_stage,
                    memory_context, session_context, skills_for_roleplay
                )
            suggested_responses = suggested_responses[:4]

            # Update memory and session tracking
            updated_memory = self._update_conversation_memory(
                final_session_id, previous_memory, response, query
            )

            # Update session context
            self._update_session_context(
                final_session_id, conversation_stage, response, query
            )

            return {
                "response": response,
                "updated_memory": updated_memory,
                "session_id": final_session_id,
                "conversation_stage": conversation_stage.value,
                "suggested_responses": suggested_responses,
                "timestamp": datetime.now().isoformat()
            }

        except Exception as e:
            raise Exception(f"Error processing roleplay: {str(e)}")

    def _manage_session(
        self,
        client_id: str,
        user_type: str,
        session_id: Optional[int],
        previous_memory: List[str],
        roleplay_data: Dict[str, Any]
    ) -> tuple[int, SessionContext]:
        """
        Enhanced session management with proper context tracking
        """
        # First-time interaction (no previous memory)
        if not previous_memory:
            if session_id is None:
                # Brand new session
                new_session_id = self._generate_session_id()
                session_context = SessionContext(
                    session_id=new_session_id,
                    client_id=client_id,
                    user_type=user_type,
                    roleplay_category=roleplay_data.get('category', 'general'),
                    conversation_stage=ConversationStage.INITIALIZATION,
                    turn_count=0,
                    created_at=datetime.now(),
                    last_activity=datetime.now(),
                    user_performance_score=0.0,
                    key_topics_covered=[],
                    learning_objectives_met=[]
                )
                self.session_storage[new_session_id] = session_context
                self.conversation_memory[new_session_id] = []
                return new_session_id, session_context
            else:
                # Session ID provided but no memory - validate and create context
                if session_id not in self.session_storage:
                    session_context = SessionContext(
                        session_id=session_id,
                        client_id=client_id,
                        user_type=user_type,
                        roleplay_category=roleplay_data.get('category', 'general'),
                        conversation_stage=ConversationStage.INITIALIZATION,
                        turn_count=0,
                        created_at=datetime.now(),
                        last_activity=datetime.now(),
                        user_performance_score=0.0,
                        key_topics_covered=[],
                        learning_objectives_met=[]
                    )
                    self.session_storage[session_id] = session_context
                    self.conversation_memory[session_id] = []
                return session_id, self.session_storage[session_id]

        # Continuing conversation (has previous memory)
        else:
            if session_id is not None and session_id in self.session_storage:
                # Valid existing session
                session_context = self.session_storage[session_id]
                if session_context.client_id == client_id:
                    return session_id, session_context

            # Invalid session or session mismatch - create new one
            new_session_id = self._generate_session_id()
            session_context = SessionContext(
                session_id=new_session_id,
                client_id=client_id,
                user_type=user_type,
                roleplay_category=roleplay_data.get('category', 'general'),
                conversation_stage=ConversationStage.RAPPORT_BUILDING,
                turn_count=len(previous_memory),
                created_at=datetime.now(),
                last_activity=datetime.now(),
                user_performance_score=0.0,
                key_topics_covered=[],
                learning_objectives_met=[]
            )
            self.session_storage[new_session_id] = session_context
            self.conversation_memory[new_session_id] = []
            return new_session_id, session_context

    def _generate_session_id(self) -> int:
        """Generate cryptographically secure session ID"""
        timestamp = str(datetime.now().timestamp())
        random_component = str(random.randint(100000, 999999))
        hash_input = f"{timestamp}{random_component}"
        hash_object = hashlib.md5(hash_input.encode())
        return int(hash_object.hexdigest()[:8], 16) % 1000000

    def _determine_conversation_stage(
        self,
        session_context: SessionContext,
        previous_memory: List[str],
        query: Optional[str]
    ) -> ConversationStage:
        """
        Intelligent conversation stage determination
        """
        turn_count = len(previous_memory)

        if turn_count == 0:
            return ConversationStage.INITIALIZATION
        elif turn_count <= 2:
            return ConversationStage.RAPPORT_BUILDING
        elif turn_count <= 5:
            return ConversationStage.CHALLENGE_INTRODUCTION
        elif turn_count <= 8:
            return ConversationStage.SKILL_DEVELOPMENT
        elif turn_count <= 12:
            return ConversationStage.ADVANCED_SCENARIOS
        else:
            return ConversationStage.CLOSURE

    def _build_memory_context(
        self,
        session_id: int,
        previous_memory: List[str],
        query: Optional[str]
    ) -> Dict[str, Any]:
        """
        Build comprehensive memory context for conversation continuity
        """
        memory_context = {
            "conversation_history": [],
            "key_topics": [],
            "user_responses": [],
            "ai_responses": [],
            "current_query": query,
            "conversation_flow": "natural"
        }

        if session_id in self.conversation_memory:
            stored_memory = self.conversation_memory[session_id]
            memory_context["conversation_history"] = stored_memory

        # Process previous memory for context
        if previous_memory:
            for i, response in enumerate(previous_memory):
                response_context = {
                    "turn": i + 1,
                    "content": response,
                    "type": "ai_response" if i % 2 == 0 else "user_response",
                    "timestamp": datetime.now().isoformat()
                }
                memory_context["conversation_history"].append(response_context)

        return memory_context

    def _extract_organization_info(self, org_knowledge_base) -> Dict[str, str]:
        """Extract organization information with error handling"""
        try:
            return {
                'organisation_name': org_knowledge_base.organisation_name,
                'organisation_objective': org_knowledge_base.organisation_objective,
                'organisation_information': org_knowledge_base.organisation_information
            }
        except AttributeError:
            # Fallback for dictionary access
            return {
                'organisation_name': org_knowledge_base.get('organisation_name', 'Unknown Organization'),
                'organisation_objective': org_knowledge_base.get('organisation_objective', 'Not specified'),
                'organisation_information': org_knowledge_base.get('organisation_information', 'No information available')
            }

    def _extract_roleplay_info(self, roleplay_data: Dict[str, Any]) -> Dict[str, str]:
        """Extract roleplay information with validation"""
        return {
            'category': roleplay_data.get('category', 'general').lower(),
            'objective': roleplay_data.get('objective', 'General roleplay practice'),
            'additional_info': roleplay_data.get('additional_info', ''),
            'difficulty_level': roleplay_data.get('difficulty_level', 'medium'),
            'scenario_type': roleplay_data.get('scenario_type', 'standard')
        }

    def _build_enhanced_context_prompt(
        self,
        org_info: Dict[str, str],
        roleplay_info: Dict[str, str],
        user_type: str,
        memory_context: Dict[str, Any],
        conversation_stage: ConversationStage,
        session_context: SessionContext,
        skills_for_roleplay: List[Dict[str, Any]] = None
    ) -> str:
        """
        Build sophisticated context prompt with memory integration and skills focus
        """
        # Analyze conversation history for continuity
        conversation_analysis = self._analyze_conversation_history(memory_context)

        # Build character profile
        character_profile = self._build_advanced_character_profile(
            roleplay_info, org_info, conversation_stage
        )

        # Generate stage-specific instructions
        stage_instructions = self._get_stage_specific_instructions(
            conversation_stage, roleplay_info['category']
        )

        # Build memory-aware context
        memory_summary = self._build_memory_summary(memory_context)

        # Build skills context
        skills_context = self._build_skills_context(skills_for_roleplay or [])

        prompt = f"""
    You are an advanced AI roleplay partner in a sophisticated business training environment.
    === SESSION CONTEXT ===
    Session ID: {session_context.session_id}
    Conversation Stage: {conversation_stage.value.title()}
    Turn Count: {session_context.turn_count}
    Organization: {org_info['organisation_name']}
    Participant: {user_type.title()}
    === ORGANIZATION PROFILE ===
    Name: {org_info['organisation_name']}
    Objective: {org_info['organisation_objective']}
    Details: {org_info['organisation_information']}
    === ROLEPLAY SCENARIO ===
    Category: {roleplay_info['category'].title()}
    Objective: {roleplay_info['objective']}
    Context: {roleplay_info['additional_info']}
    Difficulty: {roleplay_info['difficulty_level'].title()}
    Scenario Type: {roleplay_info['scenario_type']}
    === SKILLS FOCUS ===
    {skills_context}
    === CHARACTER PROFILE ===
    {character_profile}
    === CONVERSATION MEMORY ===
    {memory_summary}
    === CONVERSATION ANALYSIS ===
    {conversation_analysis}
    === STAGE-SPECIFIC INSTRUCTIONS ===
    {stage_instructions}
    === ADVANCED RESPONSE GUIDELINES ===
    1. MEMORY INTEGRATION: Reference previous conversations naturally and maintain consistency
    2. PROGRESSIVE LEARNING: Adapt difficulty and complexity based on conversation stage
    3. AUTHENTIC INTERACTION: Act as a realistic business professional with genuine motivations
    4. EDUCATIONAL VALUE: Provide meaningful challenges that develop specific skills
    5. EMOTIONAL INTELLIGENCE: Display appropriate emotions and interpersonal dynamics
    6. CONTEXTUAL AWARENESS: Maintain awareness of business context and organizational objectives
    7. CONVERSATIONAL FLOW: Ensure natural progression and avoid repetitive patterns
    8. SKILLS DEVELOPMENT: Focus on developing and testing the specified skills during interaction
    Current Query Context: {memory_context.get('current_query', 'No specific query provided')}
    Respond as the {self._get_roleplay_character(roleplay_info)} keeping all context and memory in mind:
    """

        return prompt

    def _analyze_conversation_history(self, memory_context: Dict[str, Any]) -> str:
        """Analyze conversation history for patterns and insights"""
        history = memory_context.get("conversation_history", [])

        if not history:
            return "New conversation - establish rapport and scenario context."

        analysis = f"Conversation analysis ({len(history)} exchanges):\n"

        # Analyze recent patterns
        if len(history) >= 3:
            recent_topics = [item.get("content", "")[:50] for item in history[-3:]]
            analysis += f"Recent topics: {', '.join(recent_topics)}\n"

        # Determine conversation flow
        if len(history) <= 2:
            analysis += "Early stage - focus on scenario establishment and engagement."
        elif len(history) <= 5:
            analysis += "Development stage - introduce challenges and assess skills."
        else:
            analysis += "Advanced stage - provide complex scenarios and feedback."

        return analysis

    def _build_advanced_character_profile(
        self,
        roleplay_info: Dict[str, str],
        org_info: Dict[str, str],
        conversation_stage: ConversationStage
    ) -> str:
        """Build detailed character profile with stage awareness"""
        category = roleplay_info['category']

        base_profiles = {
            'sales': f"Experienced procurement manager at a growing company evaluating {org_info['organisation_name']}'s solutions. You have budget authority but need to justify ROI. You're professional, detail-oriented, and have experience with similar implementations.",

            'customer_service': f"Valued customer of {org_info['organisation_name']} with a 2-year relationship. You have high expectations for service quality and personalized attention. You're generally professional but can become frustrated with poor service.",

            'management': f"Senior team member with 5+ years experience working with {org_info['organisation_name']}. You have strong opinions about processes and value clear communication. You're results-oriented and expect professional leadership.",

            'negotiation': f"Business development manager representing a potential partner organization. You're skilled in negotiations, understand market dynamics, and have alternatives. You're professional but firm about your objectives."
        }

        profile = base_profiles.get(category, f"Professional counterpart working with {org_info['organisation_name']} in a {category} context.")

        # Add stage-specific characteristics
        stage_additions = {
            ConversationStage.INITIALIZATION: "You're cautious and formal, gathering initial information.",
            ConversationStage.RAPPORT_BUILDING: "You're becoming more open and sharing relevant details.",
            ConversationStage.CHALLENGE_INTRODUCTION: "You're presenting real concerns and testing competence.",
            ConversationStage.SKILL_DEVELOPMENT: "You're engaging in detailed discussions and problem-solving.",
            ConversationStage.ADVANCED_SCENARIOS: "You're presenting complex challenges and evaluating solutions."
        }

        return f"{profile}\n\nCurrent demeanor: {stage_additions.get(conversation_stage, 'Professional and engaged.')}"

    def _get_stage_specific_instructions(
        self,
        conversation_stage: ConversationStage,
        category: str
    ) -> str:
        """Get specific instructions based on conversation stage"""
        stage_instructions = {
            ConversationStage.INITIALIZATION: "Establish scenario context, introduce yourself professionally, and set the stage for interaction.",
            ConversationStage.RAPPORT_BUILDING: "Build relationship, share relevant information, and establish trust and credibility.",
            ConversationStage.CHALLENGE_INTRODUCTION: "Present realistic challenges, ask probing questions, and test initial competence.",
            ConversationStage.SKILL_DEVELOPMENT: "Engage in complex problem-solving, provide detailed scenarios, and assess skill application.",
            ConversationStage.ADVANCED_SCENARIOS: "Present sophisticated challenges, test advanced skills, and provide nuanced feedback.",
            ConversationStage.CLOSURE: "Summarize outcomes, provide final assessment, and guide toward natural conclusion."
        }

        return stage_instructions.get(conversation_stage, "Engage professionally and educationally.")

    def _build_memory_summary(self, memory_context: Dict[str, Any]) -> str:
        """Build comprehensive memory summary"""
        if not memory_context.get("conversation_history"):
            return "No previous conversation history."

        history = memory_context["conversation_history"]
        summary = f"Conversation Memory Summary:\n"

        # Recent exchanges
        if len(history) >= 2:
            summary += f"Last exchange: {history[-1].get('content', '')[:100]}...\n"

        # Key topics covered
        if len(history) >= 3:
            topics = [item.get('content', '')[:30] for item in history[-3:]]
            summary += f"Recent topics: {', '.join(topics)}\n"

        # Current query context
        if memory_context.get('current_query'):
            summary += f"Current query: {memory_context['current_query']}\n"

        return summary

    def _get_roleplay_character(self, roleplay_info: Dict[str, str]) -> str:
        """Get character description for roleplay"""
        character_map = {
            'sales': 'potential enterprise client',
            'customer_service': 'valued customer',
            'management': 'team member',
            'negotiation': 'business partner',
            'crm': 'existing customer'
        }

        return character_map.get(roleplay_info['category'], 'business professional')

    async def _generate_contextual_response(
        self,
        prompt: str,
        roleplay_info: Dict[str, str],
        conversation_stage: ConversationStage,
        query: Optional[str]
    ) -> str:
        """Generate contextually aware response"""
        try:
            # Adjust parameters based on stage and category
            temperature = self._get_optimal_temperature(
                roleplay_info['category'], conversation_stage
            )

            response = await self.client.chat(
                model=self.model_name,
                messages=[
                    {
                        'role': 'system',
                        'content': 'You are an expert business roleplay partner providing realistic, educational interactions that help professionals develop skills.'
                    },
                    {
                        'role': 'user',
                        'content': prompt
                    }
                ],
                options={
                    'temperature': temperature,
                    'top_p': 0.9,
                    'max_tokens': 500,
                    'frequency_penalty': 0.2,
                    'presence_penalty': 0.1
                }
            )

            generated_response = response['message']['content'].strip()
            return self._post_process_response(generated_response, roleplay_info)

        except Exception as e:
            raise Exception(f"Error generating response: {str(e)}")

    def _get_optimal_temperature(
        self,
        category: str,
        conversation_stage: ConversationStage
    ) -> float:
        """Get optimal temperature based on category and stage"""
        base_temps = {
            'sales': 0.7,
            'customer_service': 0.6,
            'management': 0.65,
            'negotiation': 0.75
        }

        # Adjust based on conversation stage
        stage_adjustments = {
            ConversationStage.INITIALIZATION: -0.1,
            ConversationStage.RAPPORT_BUILDING: 0.0,
            ConversationStage.CHALLENGE_INTRODUCTION: 0.1,
            ConversationStage.SKILL_DEVELOPMENT: 0.05,
            ConversationStage.ADVANCED_SCENARIOS: 0.15
        }

        base_temp = base_temps.get(category, 0.7)
        adjustment = stage_adjustments.get(conversation_stage, 0.0)

        return max(0.3, min(1.0, base_temp + adjustment))

    def _post_process_response(self, response: str, roleplay_info: Dict[str, str]) -> str:
        """Enhanced response post-processing"""
        # Clean up any artifacts
        response = re.sub(r'<[^>]*>', '', response)
        response = re.sub(r'\*{1,2}[^*]*\*{1,2}', '', response)
        response = re.sub(r'_{1,2}[^_]*_{1,2}', '', response)

        # Ensure proper sentence structure
        sentences = [s.strip() for s in response.split('.') if s.strip()]
        if len(sentences) > 4:
            sentences = sentences[:3]

        processed = '. '.join(sentences)
        if not processed.endswith('.'):
            processed += '.'

        return processed

    def _update_conversation_memory(
        self,
        session_id: int,
        previous_memory: List[str],
        new_response: str,
        query: Optional[str]
    ) -> List[str]:
        """Enhanced memory management with intelligent retention"""
        # Build comprehensive memory entry
        memory_entry = {
            "ai_response": new_response,
            "user_query": query,
            "timestamp": datetime.now().isoformat(),
            "turn_number": len(previous_memory) + 1
        }

        # Update stored memory
        if session_id not in self.conversation_memory:
            self.conversation_memory[session_id] = []

        self.conversation_memory[session_id].append(memory_entry)

        # Create updated memory list for return
        updated_memory = [new_response] + previous_memory

        # Intelligent truncation to maintain performance
        max_memory_items = 15
        if len(updated_memory) > max_memory_items:
            # Keep most recent items and summary of older ones
            updated_memory = updated_memory[:max_memory_items]

        return updated_memory

    def _update_session_context(
        self,
        session_id: int,
        conversation_stage: ConversationStage,
        response: str,
        query: Optional[str]
    ):
        """Update session context with latest interaction"""
        if session_id in self.session_storage:
            context = self.session_storage[session_id]
            context.last_activity = datetime.now()
            context.turn_count += 1
            context.conversation_stage = conversation_stage

    def get_session_analytics(self, session_id: int) -> Dict[str, Any]:
        """Get comprehensive session analytics"""
        if session_id not in self.session_storage:
            return {"error": "Session not found"}

        context = self.session_storage[session_id]
        memory = self.conversation_memory.get(session_id, [])

        return {
            "session_id": session_id,
            "client_id": context.client_id,
            "roleplay_category": context.roleplay_category,
            "conversation_stage": context.conversation_stage.value,
            "turn_count": context.turn_count,
            "duration_minutes": (datetime.now() - context.created_at).total_seconds() / 60,
            "memory_entries": len(memory),
            "performance_score": context.user_performance_score,
            "topics_covered": context.key_topics_covered
        }

    def cleanup_old_sessions(self, hours_threshold: int = 24):
        """Clean up sessions older than threshold"""
        cutoff_time = datetime.now() - timedelta(hours=hours_threshold)

        sessions_to_remove = []
        for session_id, context in self.session_storage.items():
            if context.last_activity < cutoff_time:
                sessions_to_remove.append(session_id)

        for session_id in sessions_to_remove:
            del self.session_storage[session_id]
            if session_id in self.conversation_memory:
                del self.conversation_memory[session_id]

        return len(sessions_to_remove)

    def _build_skills_context(self, skills_for_roleplay: List[Dict[str, Any]]) -> str:
        """Build skills context for roleplay focus"""
        if not skills_for_roleplay:
            return "No specific skills targeted for this roleplay session."

        skills_text = "Target Skills for Development:\n"
        for skill in skills_for_roleplay:
            skill_id = skill.get('skill_id', 'Unknown')
            skill_name = skill.get('skill_name', 'Unknown Skill')
            skills_text += f"- {skill_name} (ID: {skill_id})\n"

        skills_text += "\nFocus on creating scenarios and challenges that help develop these specific skills."
        return skills_text

    async def _generate_suggested_responses(
        self,
        ai_response: str,
        user_type: str,
        roleplay_info: Dict[str, str],
        org_info: Dict[str, str],
        conversation_stage: ConversationStage,
        memory_context: Dict[str, Any],
        session_context: SessionContext,
        skills_for_roleplay: List[Dict[str, Any]]
    ) -> List[str]:
        """
        Generate intelligent suggested responses for the user based on AI response and context
        """
        try:
            # Build context for response generation
            response_context = self._build_response_generation_context(
                ai_response, user_type, roleplay_info, org_info,
                conversation_stage, memory_context, session_context, skills_for_roleplay
            )

            # Generate response suggestions using AI
            suggestions_prompt = f"""
    You are an expert business training consultant generating intelligent response suggestions for roleplay participants.
    === CONTEXT ===
    User Type: {user_type.title()}
    Roleplay Category: {roleplay_info['category'].title()}
    Organization: {org_info['organisation_name']}
    Conversation Stage: {conversation_stage.value.title()}
    Turn Count: {session_context.turn_count}
    === AI ROLEPLAY PARTNER'S RESPONSE ===
    {ai_response}
    === SCENARIO DETAILS ===
    Objective: {roleplay_info['objective']}
    Difficulty: {roleplay_info['difficulty_level'].title()}
    Additional Context: {roleplay_info['additional_info']}
    === SKILLS BEING DEVELOPED ===
    {self._format_skills_for_prompt(skills_for_roleplay)}
    === RESPONSE GENERATION CONTEXT ===
    {response_context}
    === RESPONSE GENERATION INSTRUCTIONS ===
    Generate exactly 4 professional response options that the {user_type} could use to reply to the AI partner.
    Each response should:
    1. Be contextually appropriate to the AI partner's message
    2. Advance the conversation constructively
    3. Demonstrate the target skills being developed
    4. Match the user type ({user_type}) and conversation stage
    5. Be 15-40 words long
    6. Sound natural and professional
    7. Show different approaches (e.g., direct, consultative, relationship-building)
    User Type Guidelines:
    - ADMIN: Focus on strategic, systematic, and analytical responses
    - MANAGER: Focus on leadership, team-oriented, and results-driven responses
    - LEARNER: Focus on learning-oriented, question-asking, and skill-building responses
    Response Types to Include:
    1. Direct/Assertive Response
    2. Consultative/Question-based Response
    3. Relationship-building/Collaborative Response
    4. Analytical/Strategic Response
    Format your response as exactly 4 responses, one per line, without numbering or bullets:
    """

            # Generate suggestions
            response = await self.client.chat(
                model=self.model_name,
                messages=[
                    {
                        'role': 'system',
                        'content': 'You are an expert business training consultant specializing in roleplay response development and professional communication coaching.'
                    },
                    {
                        'role': 'user',
                        'content': suggestions_prompt
                    }
                ],
                options={
                    'temperature': 0.7,
                    'top_p': 0.9,
                    'max_tokens': 200,
                    'frequency_penalty': 0.3,
                    'presence_penalty': 0.2
                }
            )

            suggested_responses = response['message']['content'].strip()

            # Parse and validate responses
            responses = self._parse_and_validate_responses(
                suggested_responses, user_type, roleplay_info, skills_for_roleplay
            )

            return responses

        except Exception as e:
            # Fallback to predefined responses if generation fails
            return self._get_fallback_responses(user_type, roleplay_info['category'], conversation_stage)

    def _build_response_generation_context(
        self,
        ai_response: str,
        user_type: str,
        roleplay_info: Dict[str, str],
        org_info: Dict[str, str],
        conversation_stage: ConversationStage,
        memory_context: Dict[str, Any],
        session_context: SessionContext,
        skills_for_roleplay: List[Dict[str, Any]]
    ) -> str:
        """Build comprehensive context for response generation"""

        # Analyze AI response for response opportunities
        response_analysis = self._analyze_ai_response_for_user_responses(ai_response, roleplay_info['category'])

        # Get skills-specific guidance
        skills_guidance = self._get_skills_response_guidance(skills_for_roleplay, conversation_stage)

        # Get stage-specific response focus
        stage_focus = self._get_stage_response_focus(conversation_stage, roleplay_info['category'])

        context = f"""
    AI RESPONSE ANALYSIS:
    {response_analysis}
    SKILLS DEVELOPMENT GUIDANCE:
    {skills_guidance}
    STAGE-SPECIFIC FOCUS:
    {stage_focus}
    USER TYPE CONSIDERATIONS:
    {self._get_user_type_response_preferences(user_type, roleplay_info['category'])}
    """

        return context

    def _format_skills_for_prompt(self, skills_for_roleplay: List[Dict[str, Any]]) -> str:
        """Format skills for prompt inclusion"""
        if not skills_for_roleplay:
            return "No specific skills targeted."

        return ', '.join([skill.get('skill_name', 'Unknown') for skill in skills_for_roleplay])

    def _analyze_ai_response_for_user_responses(self, ai_response: str, category: str) -> str:
        """Analyze AI response to identify user response opportunities"""

        # Check for different types of AI communications
        has_question = '?' in ai_response
        has_concern = any(word in ai_response.lower() for word in ['concern', 'worried', 'issue', 'problem', 'challenge'])
        has_request = any(word in ai_response.lower() for word in ['need', 'require', 'want', 'looking for'])
        has_objection = any(word in ai_response.lower() for word in ['but', 'however', 'expensive', 'cost', 'budget'])

        analysis = f"AI response type analysis:\n"
        analysis += f"- Contains questions: {has_question}\n"
        analysis += f"- Expresses concerns: {has_concern}\n"
        analysis += f"- Makes requests: {has_request}\n"
        analysis += f"- Shows objections: {has_objection}\n"

        # Determine recommended response approach
        if has_objection:
            analysis += "Recommended approach: Address objections with value proposition and evidence"
        elif has_concern:
            analysis += "Recommended approach: Acknowledge concerns and provide reassurance with specifics"
        elif has_question:
            analysis += "Recommended approach: Provide comprehensive answers and ask follow-up questions"
        elif has_request:
            analysis += "Recommended approach: Fulfill request and expand with additional value"
        else:
            analysis += "Recommended approach: Build on the conversation and advance the relationship"

        return analysis

    def _get_skills_response_guidance(self, skills_for_roleplay: List[Dict[str, Any]], conversation_stage: ConversationStage) -> str:
        """Get skills-specific guidance for response generation"""

        if not skills_for_roleplay:
            return "Focus on general professional communication skills."

        skills_guidance = "Skills-specific response guidance:\n"

        for skill in skills_for_roleplay:
            skill_name = skill.get('skill_name', '').lower()

            if 'negotiation' in skill_name:
                skills_guidance += "- Negotiation: Use collaborative language, explore win-win solutions, ask probing questions\n"
            elif 'consultative' in skill_name or 'selling' in skill_name:
                skills_guidance += "- Consultative Selling: Focus on understanding needs, ask discovery questions, provide tailored solutions\n"
            elif 'communication' in skill_name:
                skills_guidance += "- Communication: Use clear, empathetic language, active listening, and appropriate tone\n"
            elif 'leadership' in skill_name:
                skills_guidance += "- Leadership: Show confidence, provide direction, inspire and motivate\n"
            else:
                skills_guidance += f"- {skill.get('skill_name', 'Unknown')}: Apply skill-specific techniques and best practices\n"

        return skills_guidance

    def _get_stage_response_focus(self, conversation_stage: ConversationStage, category: str) -> str:
        """Get stage-specific response focus"""

        stage_focuses = {
            ConversationStage.INITIALIZATION: "Focus on professional introduction, establishing credibility, and setting positive tone",
            ConversationStage.RAPPORT_BUILDING: "Focus on building trust, finding common ground, and showing genuine interest",
            ConversationStage.CHALLENGE_INTRODUCTION: "Focus on understanding challenges, asking clarifying questions, and showing expertise",
            ConversationStage.SKILL_DEVELOPMENT: "Focus on demonstrating skills, providing solutions, and handling objections",
            ConversationStage.ADVANCED_SCENARIOS: "Focus on complex problem-solving, strategic thinking, and advanced techniques",
            ConversationStage.CLOSURE: "Focus on summarizing value, confirming next steps, and maintaining relationships"
        }

        return stage_focuses.get(conversation_stage, "Focus on professional and contextually appropriate responses")

    def _get_user_type_response_preferences(self, user_type: str, category: str) -> str:
        """Get user type specific response preferences"""

        preferences = {
            'admin': "Focus on strategic thinking, data-driven responses, system-level considerations, and organizational impact",
            'manager': "Focus on team implications, process optimization, results orientation, and stakeholder management",
            'learner': "Focus on skill application, learning opportunities, asking good questions, and seeking feedback"
        }

        return preferences.get(user_type, "Focus on professional development and effective communication")

    def _parse_and_validate_responses(
        self,
        suggested_responses: str,
        user_type: str,
        roleplay_info: Dict[str, str],
        skills_for_roleplay: List[Dict[str, Any]]
    ) -> List[str]:
        """Parse and validate generated responses"""

        # Split responses by newlines and clean up
        responses = [r.strip() for r in suggested_responses.split('\n') if r.strip()]

        # Remove any numbering or bullets
        cleaned_responses = []
        for response in responses:
            # Remove common prefixes
            response = re.sub(r'^[\d\.\-\*\+\s]*', '', response)
            # Remove quotes if present
            response = response.strip('"\'')
            # Ensure proper capitalization
            if response and not response[0].isupper():
                response = response[0].upper() + response[1:]

            if response and len(response.split()) >= 5:  # Minimum word count
                cleaned_responses.append(response)

        # Ensure we have exactly 4 responses
        if len(cleaned_responses) >= 4:
            return cleaned_responses[:4]
        else:
            # Add fallback responses if needed
            fallback_responses = self._get_fallback_responses(user_type, roleplay_info['category'], ConversationStage.SKILL_DEVELOPMENT)
            return (cleaned_responses + fallback_responses)[:4]

    def _get_fallback_responses(self, user_type: str, category: str, conversation_stage: ConversationStage) -> List[str]:
        """Get fallback responses when generation fails"""

        fallback_sets = {
            'admin': {
                'sales': [
                    "I'd like to understand the strategic implications of this solution for our organization.",
                    "Can you provide data on how this has performed for similar companies in our industry?",
                    "What are the long-term scalability considerations we should be aware of?",
                    "How does this align with our current business objectives?"
                ]
            },
            'manager': {
                'sales': [
                    "How will this solution impact my team's current workflow and productivity?",
                    "I need to understand the training requirements and timeline for implementation.",
                    "What ongoing support can we expect from your team during the transition?",
                    "What metrics should we track to measure the success of this implementation?"
                ]
            },
            'learner': {
                'sales': [
                    "That's interesting. Can you help me understand the key benefits in more detail?",
                    "What questions should I be asking to better evaluate this type of solution?",
                    "How do other customers typically approach this decision-making process?",
                    "What are some common challenges with this type of solution and how can they be addressed?"
                ]
            }
        }

        # Get category-specific fallbacks
        user_fallbacks = fallback_sets.get(user_type, {})
        responses = user_fallbacks.get(category, [
            "That's a valid point. Let me think about how this applies to our situation.",
            "I appreciate the information. Can you elaborate on the implementation process?",
            "This sounds promising. What would be the best next step for us to take?",
            "How does this compare to other solutions we might be considering?"
        ])

        return responses
