import requests
import json
import logging
import re
import time
from typing import List, Dict, Any, Optional, Tuple
from urllib.parse import quote_plus, urljoin, urlparse
import hashlib
from concurrent.futures import ThreadPoolExecutor, as_completed
import random
from dataclasses import dataclass
from collections import defaultdict
import threading
from functools import lru_cache
from bs4 import BeautifulSoup
import urllib3
from fake_useragent import UserAgent
import cloudscraper

# Suppress SSL warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

logger = logging.getLogger(__name__)

@dataclass
class SearchResult:
    """Enhanced search result with URL and metadata"""
    title: str
    url: str
    snippet: str
    source: str
    relevance_score: float
    domain: str = ""
    timestamp: Optional[str] = None
    content_quality: float = 0.0

class ReliableWebSearchEngine:
    """Highly reliable web scraping search engine with multiple fallback strategies"""
    
    def __init__(self):
        # Initialize multiple session types for reliability
        self.sessions = {
            'cloudscraper': cloudscraper.create_scraper(),
            'requests': requests.Session(),
            'requests_alt': requests.Session()
        }
        
        # Configure all sessions
        for session_name, session in self.sessions.items():
            session.verify = False
            session.timeout = 20
            session.headers.update({
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                'Accept-Language': 'en-US,en;q=0.9',
                'Accept-Encoding': 'gzip, deflate',
                'Connection': 'keep-alive',
                'Upgrade-Insecure-Requests': '1',
                'Cache-Control': 'no-cache',
                'DNT': '1'
            })
        
        # Initialize user agent rotator
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
            'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/121.0',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/121.0.0.0'
        ]
        
        # Enhanced search engines configuration with multiple selectors
        self.search_engines = {
            'google': {
                'urls': [
                    'https://www.google.com/search',
                    'https://google.com/search',
                    'https://www.google.co.uk/search'
                ],
                'params': {'q': '', 'num': 20, 'hl': 'en', 'gl': 'us', 'start': 0},
                'result_selectors': [
                    'div.g',
                    'div[data-sokoban-container]',
                    '.rc',
                    'div.yuRUbf'
                ],
                'title_selectors': [
                    'h3',
                    'h3.LC20lb',
                    'h3.r',
                    'div.yuRUbf h3'
                ],
                'link_selectors': [
                    'a',
                    'div.yuRUbf a',
                    'h3 a'
                ],
                'snippet_selectors': [
                    '.VwiC3b',
                    '.s3v9rd',
                    '.st',
                    'span.aCOpRe',
                    'div.IsZvec'
                ]
            },
            'bing': {
                'urls': [
                    'https://www.bing.com/search',
                    'https://bing.com/search'
                ],
                'params': {'q': '', 'count': 20, 'setlang': 'en', 'first': 1},
                'result_selectors': [
                    'li.b_algo',
                    'li.b_ad',
                    '.b_algo'
                ],
                'title_selectors': [
                    'h2',
                    'h2 a',
                    '.b_title'
                ],
                'link_selectors': [
                    'a',
                    'h2 a'
                ],
                'snippet_selectors': [
                    '.b_caption p',
                    '.b_caption',
                    '.b_snippet'
                ]
            },
            'duckduckgo': {
                'urls': [
                    'https://duckduckgo.com/html',
                    'https://html.duckduckgo.com/html'
                ],
                'params': {'q': '', 'kl': 'us-en', 's': 0},
                'result_selectors': [
                    '.result',
                    'div.result'
                ],
                'title_selectors': [
                    '.result__title',
                    '.result__title a'
                ],
                'link_selectors': [
                    '.result__title a',
                    'a.result__a'
                ],
                'snippet_selectors': [
                    '.result__snippet',
                    '.result__body'
                ]
            },
            'yandex': {
                'urls': [
                    'https://yandex.com/search',
                    'https://www.yandex.com/search'
                ],
                'params': {'text': '', 'lr': 84, 'numdoc': 20},
                'result_selectors': [
                    '.serp-item',
                    'li.serp-item'
                ],
                'title_selectors': [
                    'h2.serp-item__title',
                    '.serp-item__title'
                ],
                'link_selectors': [
                    'h2.serp-item__title a',
                    '.serp-item__title a'
                ],
                'snippet_selectors': [
                    '.serp-item__text',
                    '.serp-item__snippet'
                ]
            }
        }
        
        # Success tracking
        self.success_stats = defaultdict(int)
        
    def _get_random_user_agent(self) -> str:
        """Get a random user agent"""
        return random.choice(self.user_agents)
    
    def _make_request_with_fallback(self, url: str, params: dict, headers: dict, max_retries: int = 3) -> Optional[requests.Response]:
        """Make request with multiple session fallbacks"""
        session_order = ['cloudscraper', 'requests', 'requests_alt']
        
        for attempt in range(max_retries):
            for session_name in session_order:
                try:
                    session = self.sessions[session_name]
                    
                    # Update user agent for each attempt
                    current_headers = headers.copy()
                    current_headers['User-Agent'] = self._get_random_user_agent()
                    
                    # Add random delay
                    time.sleep(random.uniform(0.3, 1.0))
                    
                    response = session.get(
                        url,
                        params=params,
                        headers=current_headers,
                        timeout=20,
                        allow_redirects=True
                    )
                    
                    if response.status_code == 200:
                        self.success_stats[session_name] += 1
                        return response
                    elif response.status_code == 429:
                        # Rate limited, wait longer
                        time.sleep(random.uniform(2, 5))
                        continue
                    else:
                        logger.warning(f"HTTP {response.status_code} from {session_name}")
                        continue
                        
                except Exception as e:
                    logger.warning(f"Request failed with {session_name}: {e}")
                    continue
            
            # Wait before retry
            if attempt < max_retries - 1:
                time.sleep(random.uniform(1, 3))
        
        return None
    
    def _clean_url(self, url: str, base_url: str = None) -> str:
        """Clean and normalize URLs"""
        if not url:
            return ""
        
        # Handle relative URLs
        if url.startswith('/'):
            if base_url:
                parsed_base = urlparse(base_url)
                return f"{parsed_base.scheme}://{parsed_base.netloc}{url}"
            return url
        
        # Handle Google redirect URLs
        if url.startswith('/url?q='):
            url = url.split('/url?q=')[1].split('&')[0]
            try:
                url = requests.utils.unquote(url)
            except:
                pass
        
        # Handle Bing redirect URLs
        if 'bing.com/ck/a' in url:
            return ""
        
        # Skip internal search engine URLs
        skip_domains = ['google.com', 'bing.com', 'duckduckgo.com', 'yandex.com']
        try:
            parsed_url = urlparse(url)
            if any(domain in parsed_url.netloc for domain in skip_domains):
                return ""
        except:
            pass
        
        # Ensure URL has scheme
        if not url.startswith(('http://', 'https://')):
            url = 'https://' + url
        
        return url
    
    def _extract_text_content(self, element) -> str:
        """Extract clean text content from BeautifulSoup element"""
        if not element:
            return ""
        
        # Get text and clean it
        text = element.get_text(strip=True)
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'[\r\n\t]', ' ', text)
        return text.strip()
    
    def _calculate_relevance(self, query: str, title: str, snippet: str) -> float:
        """Calculate relevance score for search result"""
        if not query or not title:
            return 0.0
        
        query_words = set(query.lower().split())
        title_words = set(title.lower().split())
        snippet_words = set(snippet.lower().split()) if snippet else set()
        
        # Calculate word overlap
        title_overlap = len(query_words & title_words) / len(query_words) if query_words else 0
        snippet_overlap = len(query_words & snippet_words) / len(query_words) if query_words else 0
        
        # Weighted relevance score
        relevance = (title_overlap * 0.7) + (snippet_overlap * 0.3)
        
        # Boost for exact phrase match
        if query.lower() in title.lower():
            relevance += 0.3
        elif query.lower() in snippet.lower():
            relevance += 0.2
        
        return min(relevance, 1.0)
    
    def _scrape_search_engine(self, engine_name: str, query: str, num_results: int = 10) -> List[SearchResult]:
        """Scrape a specific search engine with enhanced reliability"""
        if engine_name not in self.search_engines:
            return []
        
        engine_config = self.search_engines[engine_name]
        results = []
        
        # Try multiple URLs for the same engine
        for base_url in engine_config['urls']:
            try:
                # Prepare headers
                headers = {
                    'User-Agent': self._get_random_user_agent(),
                    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                    'Accept-Language': 'en-US,en;q=0.9',
                    'Accept-Encoding': 'gzip, deflate',
                    'Connection': 'keep-alive',
                    'Upgrade-Insecure-Requests': '1',
                    'Cache-Control': 'no-cache',
                    'DNT': '1'
                }
                
                # Engine-specific headers
                if engine_name == 'google':
                    headers.update({
                        'Sec-Fetch-Dest': 'document',
                        'Sec-Fetch-Mode': 'navigate',
                        'Sec-Fetch-Site': 'none'
                    })
                elif engine_name == 'bing':
                    headers['Referer'] = 'https://www.bing.com/'
                
                # Build parameters
                params = engine_config['params'].copy()
                if 'q' in params:
                    params['q'] = query
                elif 'text' in params:
                    params['text'] = query
                
                # Make request with fallback
                response = self._make_request_with_fallback(base_url, params, headers)
                
                if not response:
                    continue
                
                # Parse HTML
                soup = BeautifulSoup(response.content, 'html.parser')
                
                # Try multiple selectors for results
                result_elements = []
                for selector in engine_config['result_selectors']:
                    elements = soup.select(selector)
                    if elements:
                        result_elements = elements
                        break
                
                logger.info(f"{engine_name.capitalize()}: Found {len(result_elements)} result elements")
                
                # Extract results
                for element in result_elements[:num_results]:
                    try:
                        # Extract title with multiple selectors
                        title = ""
                        for title_selector in engine_config['title_selectors']:
                            title_elem = element.select_one(title_selector)
                            if title_elem:
                                title = self._extract_text_content(title_elem)
                                if title and len(title) > 3:
                                    break
                        
                        if not title:
                            continue
                        
                        # Extract URL with multiple selectors
                        url = ""
                        for link_selector in engine_config['link_selectors']:
                            link_elem = element.select_one(link_selector)
                            if link_elem:
                                url = link_elem.get('href', '')
                                url = self._clean_url(url, base_url)
                                if url and url.startswith('http'):
                                    break
                        
                        if not url:
                            continue
                        
                        # Extract snippet with multiple selectors
                        snippet = ""
                        for snippet_selector in engine_config['snippet_selectors']:
                            snippet_elem = element.select_one(snippet_selector)
                            if snippet_elem:
                                snippet = self._extract_text_content(snippet_elem)
                                if snippet:
                                    break
                        
                        # Calculate relevance
                        relevance = self._calculate_relevance(query, title, snippet)
                        
                        # Create result
                        result = SearchResult(
                            title=title[:200],
                            url=url,
                            snippet=snippet[:300] if snippet else "",
                            source=engine_name,
                            relevance_score=relevance,
                            domain=urlparse(url).netloc.lower(),
                            content_quality=relevance
                        )
                        
                        results.append(result)
                        
                    except Exception as e:
                        logger.warning(f"Error parsing {engine_name} result: {e}")
                        continue
                
                logger.info(f"{engine_name.capitalize()}: Successfully extracted {len(results)} results")
                
                # If we got results, break out of URL loop
                if results:
                    break
                    
            except Exception as e:
                logger.error(f"{engine_name.capitalize()} scraping error with {base_url}: {e}")
                continue
        
        return results
    
    def search_multiple_engines(self, query: str, num_results: int = 5) -> List[SearchResult]:
        """Search multiple engines with enhanced reliability"""
        if not query or not query.strip():
            return []
        
        query = query.strip()
        all_results = []
        
        # Define engines in order of preference
        engines_to_use = ['google', 'bing', 'duckduckgo', 'yandex']
        
        # Sequential search with immediate fallback
        for engine in engines_to_use:
            try:
                results = self._scrape_search_engine(engine, query, num_results * 2)
                if results:
                    all_results.extend(results)
                    logger.info(f"{engine.capitalize()}: Got {len(results)} results")
                    
                    # If we have enough results, we can stop
                    if len(all_results) >= num_results:
                        break
                        
            except Exception as e:
                logger.error(f"{engine.capitalize()} search failed: {e}")
                continue
        
        # If sequential didn't work well, try parallel
        if len(all_results) < num_results:
            logger.info("Attempting parallel search for better results...")
            
            with ThreadPoolExecutor(max_workers=2) as executor:
                remaining_engines = [e for e in engines_to_use if e not in [r.source for r in all_results]]
                
                future_to_engine = {
                    executor.submit(self._scrape_search_engine, engine, query, num_results): engine
                    for engine in remaining_engines[:2]  # Limit to 2 concurrent
                }
                
                for future in as_completed(future_to_engine):
                    engine_name = future_to_engine[future]
                    try:
                        results = future.result(timeout=30)
                        if results:
                            all_results.extend(results)
                            logger.info(f"{engine_name.capitalize()}: Got {len(results)} additional results")
                    except Exception as e:
                        logger.error(f"{engine_name.capitalize()} parallel search failed: {e}")
        
        if not all_results:
            logger.warning("No results from any search engine")
            return []
        
        # Deduplicate and rank results
        unique_results = self._deduplicate_results(all_results)
        ranked_results = sorted(unique_results, key=lambda x: x.relevance_score, reverse=True)
        
        logger.info(f"Total unique results: {len(ranked_results)}")
        return ranked_results[:num_results]
    
    def _deduplicate_results(self, results: List[SearchResult]) -> List[SearchResult]:
        """Remove duplicate results based on URL and title similarity"""
        if not results:
            return []
        
        unique_results = []
        seen_urls = set()
        seen_titles = set()
        
        # Sort by relevance first
        sorted_results = sorted(results, key=lambda x: x.relevance_score, reverse=True)
        
        for result in sorted_results:
            # Skip if URL already seen
            if result.url in seen_urls:
                continue
            
            # Check title similarity
            title_normalized = re.sub(r'[^\w\s]', '', result.title.lower()).strip()
            title_words = set(title_normalized.split())
            
            # Check if similar title exists
            is_duplicate = False
            for seen_title in seen_titles:
                seen_words = set(seen_title.split())
                if title_words and seen_words:
                    overlap = len(title_words & seen_words) / len(title_words | seen_words)
                    if overlap > 0.7:
                        is_duplicate = True
                        break
            
            if not is_duplicate:
                seen_urls.add(result.url)
                seen_titles.add(title_normalized)
                unique_results.append(result)
        
        return unique_results

# Initialize the enhanced search engine
search_engine = ReliableWebSearchEngine()

def perform_web_search(query: str, num_results: int = 5) -> List[str]:
    """
    Perform highly reliable web search with multiple fallback strategies
    """
    if not query or not query.strip():
        return ["Please provide a valid search query."]
    
    try:
        # Limit results to reasonable range
        num_results = min(max(num_results, 1), 20)
        
        # Perform search with enhanced reliability
        results = search_engine.search_multiple_engines(query, num_results)
        
        if not results:
            return ["No relevant results found. Please try rephrasing your query."]
        
        # Format results with enhanced presentation
        formatted_results = []
        for i, result in enumerate(results, 1):
            # Source badges
            source_badges = {
                'google': '[GOOGLE] 🔍',
                'bing': '[BING] 🔍',
                'duckduckgo': '[DUCKDUCKGO] 🦆',
                'yandex': '[YANDEX] 🔍'
            }
            source_badge = source_badges.get(result.source, f'[{result.source.upper()}]')
            
            # Domain indicator
            domain_name = result.domain
            if len(domain_name) > 30:
                domain_name = domain_name[:27] + "..."
            
            # Relevance stars
            relevance_stars = '⭐' * min(5, max(1, int(result.relevance_score * 5)))
            
            # Format snippet
            snippet = result.snippet
            if len(snippet) > 200:
                snippet = snippet[:197] + "..."
            
            # Build formatted result
            formatted_result = (
                f"{source_badge} {result.title}\n"
                f"{snippet}\n"
                f"🔗 URL: {result.url}\n"
                f"🌐 Domain: {domain_name}\n"
                f"📊 Relevance: {relevance_stars} ({result.relevance_score:.2f})"
            )
            
            formatted_results.append(formatted_result)
        
        return formatted_results
        
    except Exception as e:
        logger.error(f"Web search error: {e}")
        return [f"Search encountered an error: {str(e)}. Please try again."]

def get_search_summary(query: str, search_results: List[str]) -> str:
    """
    Generate intelligent search summary with enhanced analytics
    """
    if not search_results:
        return "No search results generated."
    
    if any("error" in result.lower() for result in search_results):
        return f"Search encountered issues for query: '{query}'"
    
    # Analyze result sources
    source_counts = {'GOOGLE': 0, 'BING': 0, 'DUCKDUCKGO': 0, 'YANDEX': 0}
    avg_relevance = 0.0
    total_relevance_scores = 0
    
    for result in search_results:
        lines = result.split('\n')
        if lines:
            first_line = lines[0]
            # Count sources
            for source in source_counts.keys():
                if f'[{source}]' in first_line:
                    source_counts[source] += 1
                    break
            
            # Extract relevance score
            for line in lines:
                if 'Relevance:' in line and '(' in line:
                    try:
                        score_text = line.split('(')[1].split(')')[0]
                        score = float(score_text)
                        avg_relevance += score
                        total_relevance_scores += 1
                    except:
                        pass
    
    if total_relevance_scores > 0:
        avg_relevance /= total_relevance_scores
    
    # Build source summary
    source_summary = ', '.join([f"{count} from {source}" for source, count in source_counts.items() if count > 0])
    
    # Quality rating
    if avg_relevance > 0.8:
        quality_rating = "Excellent"
    elif avg_relevance > 0.6:
        quality_rating = "Good"
    elif avg_relevance > 0.4:
        quality_rating = "Fair"
    else:
        quality_rating = "Basic"
    
    return (
        f"🔍 Enhanced Web Search Results for '{query}':\n"
        f"📊 Found {len(search_results)} high-quality results\n"
        f"🌐 Sources: {source_summary}\n"
        f"⭐ Average Relevance: {quality_rating} ({avg_relevance:.2f}/1.00)\n"
        f"🚀 Multi-engine scraping with reliability fallbacks\n"
        f"🔗 All results verified with direct URLs"
    )

def get_structured_search_results(query: str, num_results: int = 5) -> List[Dict[str, str]]:
    """
    Get structured search results with enhanced reliability
    """
    if not query or not query.strip():
        return []
    
    try:
        # Get search results using enhanced search engine
        results = search_engine.search_multiple_engines(query, num_results)
        
        if not results:
            return []
        
        # Format results as structured data
        structured_results = []
        for result in results:
            # Create formatted content
            content = (
                f"[{result.source.upper()}] 🔍 {result.title}\n"
                f"{result.snippet}\n"
                f"🌐 Domain: {result.domain}\n"
                f"📊 Relevance: {'⭐' * min(5, max(1, int(result.relevance_score * 5)))} ({result.relevance_score:.2f})"
            )
            
            structured_result = {
                "content": content,
                "url": result.url,
                "title": result.title,
                "snippet": result.snippet,
                "source": result.source,
                "domain": result.domain,
                "relevance_score": result.relevance_score
            }
            structured_results.append(structured_result)
        
        return structured_results
        
    except Exception as e:
        logger.error(f"Structured search error: {e}")
        return []