import os
import json
from typing import Dict, List, Any, Optional
from groq import Groq
from config.prompts import (
    SCENARIO_GENERATION_PROMPT,
    CHARACTER_ROLEPLAY_PROMPT,
    CHARACTER_CONCLUSION_PROMPT,
    SKILL_ANALYSIS_PROMPT,
    get_skill_analysis_template
)

class GroqService:
    def __init__(self):
        api_key = os.getenv("GROQ_API_KEY")
        if not api_key:
            raise ValueError("GROQ_API_KEY not found in environment variables")

        self.client = Groq(api_key=api_key)
        self.model = "openai/gpt-oss-20b"  # You can change this to mixtral-8x7b-32768 or llama3-8b-8192

        # Token counters for different operations
        self.token_counts = {
            'preview': {'input': 0, 'output': 0, 'total': 0},
            'conversation': {'input': 0, 'output': 0, 'total': 0},
            'assessment': {'input': 0, 'output': 0, 'total': 0}
        }

    def _update_token_count(self, operation_type: str, usage_stats):
        """Update token counts for specific operation"""
        if usage_stats and hasattr(usage_stats, 'prompt_tokens') and hasattr(usage_stats, 'completion_tokens'):
            input_tokens = usage_stats.prompt_tokens
            output_tokens = usage_stats.completion_tokens
            total_tokens = usage_stats.total_tokens

            self.token_counts[operation_type]['input'] += input_tokens
            self.token_counts[operation_type]['output'] += output_tokens
            self.token_counts[operation_type]['total'] += total_tokens

            print(f"GROQ Token count for {operation_type}: Input={input_tokens}, Output={output_tokens}, Total={total_tokens}")

    def get_token_counts(self) -> Dict[str, Any]:
        """Get current token counts for all operations"""
        return self.token_counts.copy()

    def reset_token_counts(self):
        """Reset all token counters"""
        for operation in self.token_counts:
            self.token_counts[operation] = {'input': 0, 'output': 0, 'total': 0}

    def generate_scenario(self, admin_input: Dict[str, Any]) -> Optional[Dict[str, Any]]:
        """Generate structured scenario from admin input"""
        try:
            prompt = SCENARIO_GENERATION_PROMPT.format(
                category=admin_input['category'],
                objective=admin_input['objective'],
                details=admin_input['details'],
                skills=', '.join(admin_input['skills_to_assess'])
            )

            response = self.client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": "You are a training scenario expert. Always respond with valid JSON only."
                    },
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                model=self.model,
                temperature=0.7,
                max_tokens=2048
            )

            # Track tokens for preview operation
            self._update_token_count('preview', response.usage)

            response_text = response.choices[0].message.content.strip()

            # Try to extract JSON from response
            try:
                # Remove any markdown formatting
                if response_text.startswith('```json'):
                    response_text = response_text[7:]
                if response_text.endswith('```'):
                    response_text = response_text[:-3]

                return json.loads(response_text)
            except json.JSONDecodeError as e:
                print(f"JSON parsing error: {e}")
                print(f"Response text: {response_text}")
                return None

        except Exception as e:
            print(f"Error generating scenario: {e}")
            return None

    def play_character(self, scenario: Dict[str, Any], conversation_history: List[Dict[str, str]], user_message: str, is_conclusion: bool = False) -> Optional[str]:
        """AI character responses during roleplay"""
        try:
            # Format conversation history
            history_text = ""
            for turn in conversation_history:
                speaker = "Learner" if turn['speaker'] == 'learner' else scenario['ai_character']['name']
                history_text += f"{speaker}: {turn['message']}\n"

            if is_conclusion:
                prompt_template = CHARACTER_CONCLUSION_PROMPT
            else:
                prompt_template = CHARACTER_ROLEPLAY_PROMPT

            prompt = prompt_template.format(
                character_name=scenario['ai_character']['name'],
                personality=scenario['ai_character']['personality'],
                goals=scenario['ai_character']['goals'],
                background=scenario['ai_character']['background'],
                emotional_state=scenario['ai_character'].get('emotional_state', 'neutral'),
                context=scenario['scenario_setup']['context'],
                environment=scenario['scenario_setup']['environment'],
                objective=scenario.get('objective', ''),
                constraints=scenario['scenario_setup']['constraints'],
                conversation_history=history_text,
                user_message=user_message
            )

            response = self.client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": "You are a roleplay character. Stay in character at all times. Respond naturally, realistically, and politely. Maintain a courteous and professional tone throughout the conversation. Guide conversations toward professional conclusions when appropriate."
                    },
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                model=self.model,
                temperature=0.8,
                max_tokens=512
            )

            # Track tokens for conversation operation
            self._update_token_count('conversation', response.usage)

            return response.choices[0].message.content.strip()

        except Exception as e:
            print(f"Error in character roleplay: {e}")
            return "I'm having trouble responding right now. Please try again."

    def analyze_skills(self, scenario: Dict[str, Any], conversation_turns: List[Dict[str, str]]) -> Optional[Dict[str, Any]]:
        """Comprehensive skill analysis"""
        try:
            # Format conversation for analysis
            conversation_text = ""
            for turn in conversation_turns:
                speaker = "Learner" if turn['speaker'] == 'learner' else "AI Character"
                conversation_text += f"{speaker}: {turn['message']}\n"

            # Build scenario context
            scenario_context = f"""
Category: {scenario['category']}
Objective: {scenario['objective']}
Context: {scenario['scenario_setup']['context']}
Success Criteria: {scenario['success_criteria']}
AI Character: {scenario['ai_character']['name']} - {scenario['ai_character']['background']}
"""

            skills = scenario['skills_to_assess']
            skill_template = get_skill_analysis_template(skills)

            prompt = SKILL_ANALYSIS_PROMPT.format(
                skills=', '.join(skills),
                scenario_context=scenario_context,
                conversation=conversation_text,
                skill_analysis_template=skill_template
            )

            response = self.client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": "You are an expert skill assessor. Provide detailed, accurate analysis in valid JSON format only."
                    },
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                model=self.model,
                temperature=0.3,  # Lower temperature for consistent analysis
                max_tokens=3000
            )

            # Track tokens for assessment operation
            self._update_token_count('assessment', response.usage)

            response_text = response.choices[0].message.content.strip()

            # Try to extract JSON from response
            try:
                # Remove any markdown formatting
                if response_text.startswith('```json'):
                    response_text = response_text[7:]
                elif response_text.startswith('```'):
                    response_text = response_text[3:]
                if response_text.endswith('```'):
                    response_text = response_text[:-3]

                response_text = response_text.strip()
                analysis = json.loads(response_text)

                # Validate the analysis has required structure
                if not all(key in analysis for key in ['skill_analysis', 'overall_performance', 'conversation_analysis', 'recommendations']):
                    print("Analysis missing required keys")
                    return None

                return analysis

            except json.JSONDecodeError as e:
                print(f"JSON parsing error in analysis: {e}")
                print(f"Response text: {response_text}")

                # Try to extract JSON from within the text using regex
                import re
                json_match = re.search(r'\{.*\}', response_text, re.DOTALL)
                if json_match:
                    try:
                        analysis = json.loads(json_match.group())
                        print("Successfully extracted JSON using regex fallback")
                        return analysis
                    except json.JSONDecodeError:
                        print("Regex fallback also failed")

                return None

        except Exception as e:
            print(f"Error analyzing skills: {e}")
            return None

    def get_completion(self, prompt: str) -> Optional[str]:
        """Get a simple text completion from the LLM"""
        try:
            response = self.client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": "You are a helpful assistant. Provide clear, natural responses without any formatting symbols."
                    },
                    {
                        "role": "user",
                        "content": prompt
                    }
                ],
                model=self.model,
                temperature=0.7,
                max_tokens=1500
            )

            # Track tokens for preview operation (get_completion is used for scenario formatting)
            self._update_token_count('preview', response.usage)

            return response.choices[0].message.content.strip()

        except Exception as e:
            print(f"Error getting completion: {e}")
            return None
