import subprocess
import json
import tempfile
import os
from fastapi import FastAPI, HTTPException, BackgroundTasks, status
from pydantic import BaseModel
from typing import Dict, List, Any, Optional
from fastapi.middleware.cors import CORSMiddleware
import logging
from fastapi.responses import StreamingResponse

# Import the new manager models
from manager_career_guidance_models import (
    ManagerCareerGuidanceRequest, 
    ManagerCareerGuidanceResponse
)

# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Define models for request validation
class Skill(BaseModel):
    skill_id: str
    skill_type: str
    skill_name: str

class JobProfile(BaseModel):
    job_profile_name: Optional[str] = None
    job_profile_skills: Optional[List[Skill]] = None

class UserInfo(BaseModel):
    user_name: str
    managerId: str
    designation: str
    jobProfile: Optional[JobProfile] = None
    assignedCourses: List[str] = []
    completedCourses: List[str] = []
    skills: List[Skill] = []

class Course(BaseModel):
    courseId: str
    name: str
    short_description: str
    description: str
    skills: List[Skill] = []

class CareerGuidanceRequest(BaseModel):
    client_id: int
    user_query: str
    user_data: Dict[str, UserInfo]
    client_all_courses_data: Dict[int, List[Course]]

class RecommendedCourse(BaseModel):
    courseId: str
    courseName: str
    matchScore: str
    reason: str
    scenario_relevance: str

class OnlineCourse(BaseModel):
    courseId: str
    courseName: str
    platform: str
    instructor: str
    duration: str
    matchScore: int
    reason: str
    score: float
    isOnlineCourse: bool = True
    url: str
    type: str
    lastUpdated: int
    thumbnail: Optional[str] = None

# In the CareerGuidanceResponse model, add token_count
class CareerGuidanceResponse(BaseModel):
    answer: str
    recommended_courses: List[RecommendedCourse]
    online_course_recommendations: Optional[List[OnlineCourse]] = []
    skill_gap_chart: Optional[Dict[str, Any]] = None
    job_profile_analysis: Optional[Dict[str, Any]] = None
    career_context: Optional[Dict[str, Any]] = None
    token_count: int 

class PlatformCourseRequest(BaseModel):
    client_id: int
    user_query: str
    user_data: Dict[str, UserInfo]
    platform_name: str

class PlatformCourseResponse(BaseModel):
    online_course_recommendations: List[OnlineCourse]

# Advanced Chatbot specific models
class AdvancedChatbotRequest(BaseModel):
    query: str
    context: Optional[Dict[str, Any]] = None
    user_id: Optional[str] = None
    session_id: Optional[str] = None

class AdvancedChatbotResponse(BaseModel):
    response: str
    search_results: Optional[List[str]] = []
    context: Optional[Dict[str, Any]] = None
    metadata: Optional[Dict[str, Any]] = None

app = FastAPI(
    title="Career Guidance API",
    description="API for career guidance recommendations using Ollama with Gemma 12B",
    version="1.0.0"
)

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

@app.get("/")
async def root():
    return {"status": "API is running", "message": "Welcome to Career Guidance testing API"}

@app.post("/career-guidance", response_model=CareerGuidanceResponse)
async def get_career_guidance(request: CareerGuidanceRequest, background_tasks: BackgroundTasks):
    try:
        payload = request.dict()
        with tempfile.NamedTemporaryFile(mode='w+', suffix='.json', delete=False) as tmp_file:
            temp_file_path = tmp_file.name
            json.dump(payload, tmp_file)
            tmp_file.flush()
            os.fsync(tmp_file.fileno())

        try:
            result = subprocess.run(
                ["python3", "my_career_testing.py", temp_file_path],
                capture_output=True,
                text=True,
                check=True
            )

            output = result.stdout.strip()
            logger.info(f"Script output: {output}")

            try:
                json_start = output.find('{')
                json_end = output.rfind('}') + 1
                if json_start >= 0 and json_end > json_start:
                    json_str = output[json_start:json_end]
                    response_data = json.loads(json_str)
                    
                    # Ensure token_count exists in response
                    if 'token_count' not in response_data:
                        response_data['token_count'] = 0
                else:
                    raise HTTPException(
                        status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                        detail="No valid JSON found in script output"
                    )

            except json.JSONDecodeError as e:
                raise HTTPException(
                    status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                    detail="Failed to parse script output"
                )

            background_tasks.add_task(cleanup_temp_file, temp_file_path)

            # Build response with all required fields
            final_response_data = {
                "answer": response_data.get("answer", ""),
                "recommended_courses": response_data.get("recommended_courses", []),
                "online_course_recommendations": response_data.get("online_course_recommendations", []),
                "skill_gap_chart": response_data.get("skill_gap_chart", {}),
                "job_profile_analysis": response_data.get("job_profile_analysis", {}),
                "career_context": response_data.get("career_context", {}),
                "token_count": response_data.get("token_count", 0)
            }

            return final_response_data

        except subprocess.CalledProcessError as e:
            raise HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                detail=f"Error executing Python script: {e.stderr}"
            )

    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Unexpected error: {str(e)}"
        )
    

@app.post("/career-guidance-manager", response_model=ManagerCareerGuidanceResponse)
async def get_manager_career_guidance(request: ManagerCareerGuidanceRequest, background_tasks: BackgroundTasks):
    """
    Advanced career guidance endpoint specifically designed for managers.
    Provides comprehensive team analysis, management insights, and strategic recommendations.
    Now includes total token count in the response.
    """
    try:
        payload = request.dict()
        with tempfile.NamedTemporaryFile(mode='w+', suffix='.json', delete=False) as tmp_file:
            temp_file_path = tmp_file.name
            json.dump(payload, tmp_file)
            tmp_file.flush()
            os.fsync(tmp_file.fileno())

        try:
            result = subprocess.run(
                ["python3", "manager_career_guidance_processor.py", temp_file_path],
                capture_output=True,
                text=True,
                check=True,
                timeout=120
            )

            output = result.stdout.strip()

            try:
                json_start = output.find('{')
                json_end = output.rfind('}') + 1
                if json_start >= 0 and json_end > json_start:
                    json_str = output[json_start:json_end]
                    response_data = json.loads(json_str)
                else:
                    raise HTTPException(
                        status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                        detail="No valid JSON found in script output"
                    )

            except json.JSONDecodeError as e:
                raise HTTPException(
                    status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                    detail="Failed to parse script output"
                )

            background_tasks.add_task(cleanup_temp_file, temp_file_path)
            
            # Ensure token count is included in the response
            if 'total_tokens' not in response_data:
                response_data['total_tokens'] = 0  # Default value if not provided
                
            return response_data

        except subprocess.CalledProcessError as e:
            raise HTTPException(
                status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
                detail=f"Error executing manager guidance script: {e.stderr}"
            )
        except subprocess.TimeoutExpired:
            raise HTTPException(
                status_code=status.HTTP_408_REQUEST_TIMEOUT,
                detail="Request processing timed out. Please try again."
            )

    except Exception as e:
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Unexpected error: {str(e)}"
        )
import asyncio
    
@app.post("/fetch-platform-courses", response_model=PlatformCourseResponse)
async def fetch_platform_courses(request: PlatformCourseRequest, background_tasks: BackgroundTasks):
    try:
        payload = request.dict()
        with tempfile.NamedTemporaryFile(mode='w+', suffix='.json', delete=False) as tmp_file:
            temp_file_path = tmp_file.name
            json.dump(payload, tmp_file)

        loop = asyncio.get_event_loop()
        result = await loop.run_in_executor(None, execute_subprocess, "my_career_fetch_particular_platform_online_courses.py", temp_file_path)

        output = result.stdout.strip()
        try:
            json_start = output.find('{')
            json_end = output.rfind('}') + 1
            if json_start >= 0 and json_end > json_start:
                json_str = output[json_start:json_end]
                response_data = json.loads(json_str)
            else:
                raise json.JSONDecodeError("No valid JSON found", output, 0)
        except json.JSONDecodeError:
            raise HTTPException(status_code=500, detail="Error parsing JSON output")

        response_data = {
            "online_course_recommendations": response_data.get("online_course_recommendations", [])
        }

        background_tasks.add_task(cleanup_temp_file, temp_file_path)
        return response_data

    except subprocess.CalledProcessError as e:
        raise HTTPException(status_code=500, detail=f"Error executing Python script: {e.stderr if hasattr(e, 'stderr') else str(e)}")
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")

@app.post("/advanced-chatbot-query")
async def advanced_chatbot_query(request: AdvancedChatbotRequest):
    """
    Advanced chatbot endpoint with streaming response support.
    
    Features:
    - Streaming AI responses
    - Real-time search results with structured data (content and URL separate)
    - Context-aware responses
    - Session management support
    """
    async def generate_stream():
        try:
            logger.info(f"Processing streaming chatbot request for query: {request.query}")
            
            # Import chatbot functions directly
            from advanced_chatbot.chatbot import generate_response_with_search_results_stream
            from advanced_chatbot.search_tool import get_search_summary, get_structured_search_results
            
            # Initialize response structure
            response_data = {
                "response": "",
                "search_results": [],
                "search_results_data": [],  # Add structured search results
                "context": {},
                "metadata": {
                    "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
                    "query_length": len(request.query),
                    "user_id": request.user_id,
                    "session_id": request.session_id,
                    "search_performed": False,
                    "search_results_count": 0,
                    "search_summary": ""
                }
            }
            
            # Send initial response
            yield f"data: {json.dumps({'type': 'start', 'data': response_data})}\n\n"
            
            # Generate streaming response
            full_response = ""
            search_results = []
            structured_search_results = []
            
            async for chunk in generate_response_with_search_results_stream(request.query):
                if chunk['type'] == 'search_results':
                    # Get both formatted and structured search results
                    search_results = chunk['data']
                    
                    # Get structured search results if not already provided
                    if 'structured_data' in chunk:
                        structured_search_results = chunk['structured_data']
                    else:
                        # Fallback: get structured results separately
                        structured_search_results = get_structured_search_results(request.query, num_results=5)
                    
                    # Update metadata
                    response_data['metadata']['search_performed'] = len(search_results) > 0
                    response_data['metadata']['search_results_count'] = len(search_results)
                    
                    # Format search results for backward compatibility (same as non-streaming method)
                    formatted_search_results = []
                    for i, result_data in enumerate(structured_search_results, 1):
                        formatted_result = f"{i}. {result_data['content']}\n🔗 URL: {result_data['url']}"
                        formatted_search_results.append(formatted_result)
                    
                    # Update response data with both formats
                    response_data['search_results'] = formatted_search_results
                    response_data['search_results_data'] = [
                        {
                            "content": result['content'],
                            "url": result['url'],
                            "title": result.get('title', ''),
                            "snippet": result.get('snippet', ''),
                            "source": result.get('source', ''),
                            "domain": result.get('domain', ''),
                            "relevance_score": result.get('relevance_score', 0.0)
                        } for result in structured_search_results
                    ]
                    
                    # Send search results update with both formats (matching non-streaming structure)
                    search_update = {
                        'type': 'search_results',
                        'data': {
                            'formatted_results': formatted_search_results,
                            'structured_results': response_data['search_results_data']
                        }
                    }
                    yield f"data: {json.dumps(search_update)}\n\n"
                    
                elif chunk['type'] == 'response_chunk':
                    full_response += chunk['data']
                    response_data['response'] = full_response
                    
                    # Send response chunk
                    yield f"data: {json.dumps({'type': 'response_chunk', 'data': chunk['data']})}\n\n"
            
            # Get search summary for final metadata
            if search_results:
                search_summary = get_search_summary(request.query, search_results)
                response_data['metadata']['search_summary'] = search_summary
            
            # Prepare final context (matching non-streaming method structure)
            response_data['context'] = {
                "last_query": request.query,
                "last_response": full_response[:100] + "..." if len(full_response) > 100 else full_response,
                "search_results": formatted_search_results[:3],  # Keep top 3 for context
                "search_results_data": structured_search_results[:3],  # Add structured results to context
                "previous_context": request.context or {}
            }
            
            # Send final complete response
            yield f"data: {json.dumps({'type': 'complete', 'data': response_data})}\n\n"
            
            logger.info(f"Streaming chatbot request completed. Search results: {len(search_results)}")
            
        except Exception as e:
            logger.error(f"Error in streaming chatbot processing: {str(e)}")
            error_response = {
                "type": "error",
                "data": {
                    "error": str(e),
                    "status_code": 500
                }
            }
            yield f"data: {json.dumps(error_response)}\n\n"
    
    return StreamingResponse(
        generate_stream(),
        media_type="text/plain",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Content-Type": "text/event-stream"
        }
    )

from advanced_chatbot.schemas import EnhancedAdvancedChatbotResponse
import json
import time

@app.post("/advanced-chatbot-query-non-stream", response_model=EnhancedAdvancedChatbotResponse)
async def advanced_chatbot_query(request: AdvancedChatbotRequest):
    """
    Enhanced chatbot endpoint with real-time web search and structured URLs
    """
    try:
        start_time = time.time()
        logger.info(f"Processing chatbot request for query: {request.query}")
        logger.info(f"User ID: {request.user_id}, Session ID: {request.session_id}")
        
        # Validate request
        if not request.query or not request.query.strip():
            return EnhancedAdvancedChatbotResponse(
                type="complete",
                data={
                    "response": "Please provide a valid query.",
                    "search_results": [],
                    "context": request.context,
                    "metadata": {
                        "error": "Empty query provided", 
                        "processing_time": 0,
                        "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
                        "user_id": request.user_id,
                        "session_id": request.session_id,
                        "search_performed": False,
                        "search_results_count": 0
                    }
                }
            )
        
        # Import chatbot functions
        from advanced_chatbot.chatbot import generate_response_with_llm_search
        from advanced_chatbot.search_tool import get_search_summary, get_structured_search_results
        
        # Generate response with search
        ai_response, search_results = generate_response_with_llm_search(request.query)
        
        # Get structured search results with separate content and URL
        structured_results = get_structured_search_results(request.query, num_results=5)
        
        # Format search results for response (backward compatibility)
        formatted_search_results = []
        for i, result in enumerate(structured_results, 1):
            formatted_result = f"{i}. {result['content']}\n🔗 URL: {result['url']}"
            formatted_search_results.append(formatted_result)
        
        # Generate search summary
        search_summary = get_search_summary(request.query, search_results)
        
        # Calculate processing time
        processing_time = time.time() - start_time
        
        # Prepare context (merge with existing context if provided)
        response_context = {
            "last_query": request.query,
            "last_response": ai_response[:100] + "..." if len(ai_response) > 100 else ai_response,
            "search_results": formatted_search_results[:3],  # Keep top 3 for context
            "previous_context": request.context or {}
        }
        
        # Prepare metadata
        metadata = {
            "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
            "query_length": len(request.query),
            "user_id": request.user_id,
            "session_id": request.session_id,
            "search_performed": len(search_results) > 0,
            "search_results_count": len(formatted_search_results),
            "search_summary": search_summary
        }
        
        # Prepare the response data
        response_data = {
            "response": ai_response,
            "search_results": formatted_search_results,
            "context": response_context,
            "metadata": metadata
        }
        
        logger.info(f"Chatbot request completed in {processing_time:.3f}s")
        
        return EnhancedAdvancedChatbotResponse(
            type="complete",
            data=response_data
        )
        
    except Exception as e:
        logger.error(f"Error in advanced_chatbot_query: {str(e)}")
        error_time = time.time() - start_time
        
        return EnhancedAdvancedChatbotResponse(
            type="complete",
            data={
                "response": f"I apologize, but I encountered an error while processing your request: {str(e)}",
                "search_results": [],
                "context": request.context,
                "metadata": {
                    "error": str(e),
                    "processing_time": round(error_time, 3),
                    "timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
                    "user_id": request.user_id,
                    "session_id": request.session_id,
                    "search_performed": False,
                    "search_results_count": 0,
                    "status": "error"
                }
            }
        )

# Alternative endpoint for testing search functionality only
@app.post("/test-search")
async def test_search(request: dict):
    """
    Test endpoint for search functionality
    """
    try:
        query = request.get("query", "")
        if not query:
            raise HTTPException(status_code=400, detail="Query is required")
        
        from advanced_chatbot.search_tool import perform_web_search, get_search_summary
        
        search_results = perform_web_search(query, num_results=5)
        search_summary = get_search_summary(query, search_results)
        
        return {
            "query": query,
            "search_results": search_results,
            "search_summary": search_summary,
            "results_count": len(search_results)
        }
        
    except Exception as e:
        logger.error(f"Error in test search: {str(e)}")
        raise HTTPException(
            status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
            detail=f"Search test error: {str(e)}"
        )
    
""" ---------------------------------------------------------------------------------------------- ROLEPLAY START ------------------------------------------------------------------------------------------------------     """

from fastapi import HTTPException
from pydantic import BaseModel, Field
from typing import List, Optional, Dict, Any
from roleplay_processing import RoleplayProcessor

class OrganizationKnowledgeBase(BaseModel):
    organisation_name: str
    organisation_objective: str
    organisation_information: str

class SkillForRoleplay(BaseModel):
    skill_id: str = Field(..., description="Unique identifier for the skill")
    skill_name: str = Field(..., description="Name of the skill")

class RoleplayRequest(BaseModel):
    client_id: str
    user_type: str  # admin, manager, learner
    session_id: Optional[int] = None
    org_knowledge_base: OrganizationKnowledgeBase
    roleplay_data: Dict[str, Any]
    skills_for_roleplay: List[SkillForRoleplay] = Field(default_factory=list, description="Skills to focus on during roleplay")
    is_suggested_responses: int = Field(..., ge=0, le=1, description="1 to include suggested responses, 0 to exclude")
    previous_roleplay_memory: Optional[List[str]] = []
    query: str

class RoleplayResponse(BaseModel):
    success: bool
    response: str
    updated_memory: List[str]
    session_id: int
    suggested_responses: List[str] = Field(default_factory=list, description="Suggested responses when is_suggested_responses=1")
    timestamp: str

@app.post("/fetch-roleplay-data", response_model=RoleplayResponse)
async def fetch_roleplay_data(request: RoleplayRequest):
    """
    Process roleplay data and generate conversational response with optional suggested responses
    """
    try:
        # Validate user_type
        if request.user_type not in ["admin", "manager", "learner"]:
            raise HTTPException(status_code=400, detail="Invalid user_type. Must be admin, manager, or learner")

        # Validate required fields
        if not request.client_id:
            raise HTTPException(status_code=400, detail="client_id is required")

        if not request.org_knowledge_base:
            raise HTTPException(status_code=400, detail="org_knowledge_base is required")

        if not request.roleplay_data:
            raise HTTPException(status_code=400, detail="roleplay_data is required")

        # Validate is_suggested_responses value
        if request.is_suggested_responses not in [0, 1]:
            raise HTTPException(status_code=400, detail="is_suggested_responses must be either 0 or 1")

        # Validate skills data if provided
        if request.skills_for_roleplay:
            for skill in request.skills_for_roleplay:
                if not skill.skill_id or not skill.skill_name:
                    raise HTTPException(status_code=400, detail="Each skill must have both skill_id and skill_name")

        # Convert Pydantic models to dictionaries for processing
        skills_data = []
        if request.skills_for_roleplay:
            for skill in request.skills_for_roleplay:
                skills_data.append({
                    "skill_id": skill.skill_id,
                    "skill_name": skill.skill_name
                })

        # Initialize roleplay processor
        processor = RoleplayProcessor()

        # Process the roleplay request with new parameters
        result = await processor.process_roleplay(
            client_id=request.client_id,
            user_type=request.user_type,
            session_id=request.session_id,
            org_knowledge_base=request.org_knowledge_base.dict(),
            roleplay_data=request.roleplay_data,
            skills_for_roleplay=skills_data,
            is_suggested_responses=request.is_suggested_responses,
            previous_memory=request.previous_roleplay_memory,
            query=request.query
        )

        return RoleplayResponse(
            success=True,
            response=result["response"],
            updated_memory=result["updated_memory"],
            session_id=result["session_id"],
            suggested_responses=result.get("suggested_responses", []),
            timestamp=result["timestamp"]
        )

    except Exception as e:
        raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")


""" ---------------------------------------------------------------------------------------------- ROLEPLAY END ------------------------------------------------------------------------------------------------------     """    
    
    
def execute_subprocess(script_path, temp_file_path):
    result = subprocess.run(
        ["python3", script_path, temp_file_path],
        capture_output=True,
        text=True,
        check=True
    )
    return result

def cleanup_temp_file(file_path: str):
    try:
        if os.path.exists(file_path):
            os.unlink(file_path)
    except Exception:
        pass