import os
import time
import random
import re
import json
import requests
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.prompts import ChatPromptTemplate
from crawl4ai import AsyncWebCrawler
import asyncio
from langchain_groq import ChatGroq
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser, PydanticOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_community.document_loaders import PyPDFLoader, TextLoader, Docx2txtLoader, UnstructuredPowerPointLoader
from PyPDF2 import PdfReader
from langchain_experimental.text_splitter import SemanticChunker
from pydantic import BaseModel, Field
from typing import List, Optional, Dict
from urllib.parse import urlparse
from langchain.chains import LLMChain
import shutil

# from langchain.chat_models import ChatGroq
# from langchain.prompts import ChatPromptTemplate

def auto_scenario_chat(groq_api_key, scenario,output_filename= "Conver.json"):
    # Initialize LLaMA 3 model
    llm = ChatGroq(
        model="llama-3.1-8b-instant",
        temperature=0.7,
        max_tokens=500,
        timeout=None,
        max_retries=2,
        groq_api_key=groq_api_key,
    )

    # Define the chat history with the initial scenario
    chat_history = [
        ("system", "You are an AI assistant trained to handle real-world scenarios professionally. "
                   "You will simulate both the client's complaint and the AI's response, making a full conversation."
                   "It is front desk office scenario"),
        ("user", f"Scenario: {scenario}\n\nGenerate a full back-and-forth conversation.")
    ]

    # Generate AI response based on the scenario
    prompt_template = ChatPromptTemplate.from_messages(chat_history)
    formatted_prompt = prompt_template.format()
    ai_response = llm.invoke(formatted_prompt)

    # Extract content (text response) from the AIMessage object
    ai_response_text = ai_response.content if hasattr(ai_response, "content") else str(ai_response)

    # Ensure proper JSON formatting
    try:
        conversation_json = json.loads(ai_response_text)
    except json.JSONDecodeError:
        conversation_json = {
            "error": "Invalid JSON response from LLM",
            "raw_response": ai_response_text
        }

    # Save the conversation JSON to a file with proper formatting
    with open(output_filename, "w", encoding="utf-8") as json_file:
        json.dump(conversation_json, json_file, indent=4, ensure_ascii=False)

    return conversation_json

if __name__ == "__main__":
    GROQ_API_KEY = "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"  # Replace with your actual Groq API key
    #scenario = "A customer complains that their food delivery is late. How should customer support respond?"
    #scenario = "A customer at a restaurant complains about spoiled food and feeling sick." 
    scenario = " Asks the guest to retry, suggests alternative payment methods, and contacts the bank if needed."
    response = auto_scenario_chat(GROQ_API_KEY, scenario)
    print(json.dumps(response, indent=2))  # Pretty print the JSON response


