# DocPres-inspired LLM Pipeline for Slide Generation

from langchain_core.prompts import PromptTemplate
from langchain_community.llms import OpenAI
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings

from langchain.text_splitter import MarkdownHeaderTextSplitter  ,RecursiveCharacterTextSplitter
from langchain_experimental.text_splitter import SemanticChunker

from langchain_community.document_loaders import PyPDFLoader
from langchain_groq import ChatGroq
import os
GROQ_API_KEY = "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"
OLLAMA_MODEL = "nomic-embed-text"
# Step 0: Configuration
NUM_SLIDES = 10
DOCUMENT_PATH = "cyber.pdf"

# Step 1: Load Document and Chunk by Header
pdf = PyPDFLoader( file_path = DOCUMENT_PATH)
loader = pdf.load()
# Step 2: Generate Bird's Eye View using Hierarchical Summarization
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000, chunk_overlap=200)
text_chunks = text_splitter.split_text(loader[0].page_content)

chunk_summary_prompt = PromptTemplate.from_template(
    """
    Summarize the following text into 2-3 bullet points capturing the key ideas:

    {chunk}
    """
)
llm = llm = ChatGroq(model_name='llama-3.3-70b-versatile', groq_api_key=GROQ_API_KEY)
chunk_summary_chain = chunk_summary_prompt|llm
# Summarize each chunk
chunk_summaries = []
for i, chunk in enumerate(text_chunks):
    response = chunk_summary_chain.invoke({"chunk": chunk})
    chunk_summaries.append(response.content)  # ✅ Extract string from AIMessage

# Aggregate summaries and generate final bird's eye view
final_summary_input = "\n".join(chunk_summaries)

bird_eye_prompt = PromptTemplate.from_template(
    """
    Based on the following summaries, generate a high-level bird's eye view of the full document.
    Summarize in 3-5 bullet points:

    {document}
    """
)
bird_eye_chain = bird_eye_prompt|llm
bird_eye_summary = bird_eye_chain.invoke({"document": final_summary_input})

# print("\n🔭 Bird's Eye View (Hierarchical):")
# print(bird_eye_summary)

# Step 3: Outline Generation
outline_prompt = PromptTemplate.from_template(
    """
    Based on the summary below, generate an outline with {num} slide topics.

    Summary:
    {summary}

    Provide {num} bullet points, one for each slide.
    """
)

outline_chain = outline_prompt|llm
outline = outline_chain.invoke({"summary":bird_eye_summary, "num":NUM_SLIDES})

# print("\n📋 Outline:")
# print(outline)

# Step 4: Chunk Embedding and Mapping Slides to Sections
embedding_model = OllamaEmbeddings(model=OLLAMA_MODEL)
text_splitter = SemanticChunker(
    embedding_model, breakpoint_threshold_type="percentile"  # Can be changed to "standard_deviation", "interquartile"
)
documents = text_splitter.create_documents([doc.page_content for doc in loader])
vectorstore = FAISS.from_documents(documents, embedding_model)

slide_topics = [line.strip("- ") for line in outline.content.strip().split("\n") if line.strip()]
slide_to_section_map = {}

for topic in slide_topics:
    docs = vectorstore.similarity_search(topic, k=2)
    slide_to_section_map[topic] = docs
# print("yoho")
# print(slide_to_section_map)

slide_prompt = PromptTemplate.from_template(
    """
    Create a slide with a clear title and 2-4 informative bullet points based on the following topic and content:

    Slide Topic: {topic}
    Content:
    {content}

    Format:
    1) Heading: <Slide Title>
    2) Points:
    - Bullet 1
    - Bullet 2
    """
)

slide_chain = slide_prompt | llm

print("\n📝 Generated Slides:")

for idx, (topic, docs) in enumerate(slide_to_section_map.items(), 1):
    section_text = "\n".join([doc.page_content for doc in docs])
    result = slide_chain.invoke({"topic": topic, "content": section_text})
    print(f"\nSlide {idx}:")
    print(result.content)
