# from langchain_community.document_loaders import PyPDFLoader

# #from langchain.embeddings import OpenAIEmbeddings  # or HuggingFaceEmbeddings
# from langchain_ollama import OllamaEmbeddings
# from langchain_experimental.text_splitter import SemanticChunker
# # For OpenAI Embeddings
# #embeddings = OpenAIEmbeddings()

# # Load PDF with LangChain
# loader = PyPDFLoader("1719331743Code-of-Conduct.pdf",extract_images = True)
# docs = loader.load()
# #print(docs)
# all_page_contents = [doc.page_content for doc in docs]
# print(all_page_contents)
# embeddings = OllamaEmbeddings(model='nomic-embed-text')
# # def split_text_with_semantic_chunker(docs, embeddings):
# #     """Splits the text into semantic chunks using the given embeddings."""
# #     text_splitter = SemanticChunker(
# #         embeddings,
# #         breakpoint_threshold_type="percentile"  # Options: "percentile", "standard_deviation", "interquartile"
        
# #     )
# #     documents = text_splitter.create_documents([doc.page_content for doc in docs])
# #     print(f"✅ Documents split into {len(documents)} semantic chunks.")
# #     return documents

# # Run chunking
# semantic_chunks = split_text_with_semantic_chunker(docs, embeddings)

# # Optional: Print sample chunks
# # for i, chunk in enumerate(semantic_chunks[:3]):
# #     print(f"\n--- Semantic Chunk {i+1} ---\n{chunk.page_content}")
from marker.converters.pdf import PdfConverter
from marker.models import create_model_dict
from marker.output import text_from_rendered
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_text_splitters import MarkdownHeaderTextSplitter
from langchain_experimental.text_splitter import SemanticChunker
from langchain_ollama import OllamaEmbeddings
import os

#Step 1: Convert PDF to Markdown-like text using Marker
converter = PdfConverter(artifact_dict=create_model_dict())
rendered = converter("pdf/Conduct.pdf")
text, _, images = text_from_rendered(rendered)
md_path = "temp_content1.md"
# md_path = "temp_content.md"
# with open(md_path, "r", encoding="utf-8") as f:
#     content = f.read()
#     print("Markdown file content preview:\n", content[:500])
with open(md_path, "w", encoding="utf-8") as f:
    f.write(text)
# Step 2: Load Markdown conten
loader = UnstructuredMarkdownLoader(md_path)
data = loader.load()
print(data)
# Your existing code
import re
# Step 1: Combine all pages into a single Markdown string
markdown_text = "\n".join(doc.page_content for doc in data)

# Step 2: Normalize headers like '####Header' → '#### Header'
markdown_text = re.sub(r"^(#+)([^\s#])", r"\1 \2", markdown_text, flags=re.MULTILINE)


# Step 3: Define headers you actually see in the document
headers_to_split_on = [
    ("###", "Header 1"),
    ("####", "Header 2"),
]
embeddings = OllamaEmbeddings(model='nomic-embed-text')
text_splitter = SemanticChunker(
    embeddings, breakpoint_threshold_type="percentile"
)

docs = text_splitter.create_documents([data])
print(docs[0].page_content)
# Step 4: Initialize splitter and split the text
# markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
# md_header_splits = markdown_splitter.split_text(markdown_text)

# # Step 5: Save each split into a separate Markdown file
# output_dir = "markdown_chunks"
# os.makedirs(output_dir, exist_ok=True)

# for i, split in enumerate(md_header_splits):
#     # Optional: use header name for the file
#     title = split.metadata.get("Header 2") or split.metadata.get("Header 1") or f"chunk_{i+1}"
#     safe_title = re.sub(r'\W+', '_', title).strip("_")
#     file_path = os.path.join(output_dir, f"{i+1:02d}_{safe_title}.md")
    
#     with open(file_path, "w", encoding="utf-8") as f:
#         f.write(split.page_content)

# print(f"✅ Saved {len(md_header_splits)} chunks to '{output_dir}' directory.")

# print(type(markdown_text))
# print("\n📌 Headers found in markdown_text:\n")
# for line in markdown_text.split("\n"):
#     if line.strip().startswith("#"):
#         print(line)
# print(type(data))