# from langchain_community.document_loaders.llmsherpa import LLMSherpaFileLoader

# loader = LLMSherpaFileLoader(
#     file_path="cyber.pdf",
#     new_indent_parser=True,
#     apply_ocr=True,
#     strategy="sections",
#     #llmsherpa_api_url="http://localhost:5010/api/parseDocument?renderFormat=all",
# )
# docs = loader.load()
# print(docs)

from langchain_experimental.text_splitter import SemanticChunker
from langchain_openai.embeddings import OpenAIEmbeddings
from langchain_community.document_loaders import PyPDFLoader
from langchain_core.documents import Document

file_path = "cyber.pdf"
loader = PyPDFLoader(file_path)
docs=loader.load()

#print(docs)
# print(type(docs))
page_texts = [doc.page_content for doc in docs]
# print("page_texts")
full_text = "\n".join([doc.page_content for doc in docs])
document = Document(page_content=full_text)
# Print or save the text
#print(full_text)  # Or write it to a file

#print(full_text)
OPENAI_API_KEY = "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"
text_splitter = SemanticChunker(OpenAIEmbeddings(api_key=OPENAI_API_KEY), breakpoint_threshold_type="interquartile")
docs = text_splitter.split_documents([document])
#docs = text_splitter.split_text(full_text)
print(len(docs))
print("docs")
print(docs)
print("docs ended")
print(docs[0].page_content)
# print("Second")
# print(docs[1].page_content)
# print("Third")
# print(docs[2].page_content)

