# This file was auto-generated by Fern from our API Definition.

import typing
import urllib.parse
from json.decoder import JSONDecodeError

from ...core.api_error import ApiError
from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ...core.jsonable_encoder import jsonable_encoder
from ...core.remove_none_from_dict import remove_none_from_dict
from ...errors.unprocessable_entity_error import UnprocessableEntityError
from ...types.http_validation_error import HttpValidationError
from ...types.llama_parse_supported_file_extensions import LlamaParseSupportedFileExtensions
from ...types.parser_languages import ParserLanguages
from ...types.parsing_history_item import ParsingHistoryItem
from ...types.parsing_job import ParsingJob
from ...types.parsing_job_json_result import ParsingJobJsonResult
from ...types.parsing_job_markdown_result import ParsingJobMarkdownResult
from ...types.parsing_job_structured_result import ParsingJobStructuredResult
from ...types.parsing_job_text_result import ParsingJobTextResult
from ...types.parsing_mode import ParsingMode
from ...types.presigned_url import PresignedUrl

try:
    import pydantic
    if pydantic.__version__.startswith("1."):
        raise ImportError
    import pydantic.v1 as pydantic  # type: ignore
except ImportError:
    import pydantic  # type: ignore

# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)


class ParsingClient:
    def __init__(self, *, client_wrapper: SyncClientWrapper):
        self._client_wrapper = client_wrapper

    def get_job_image_result(self, job_id: str, name: str) -> None:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - name: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_image_result(
            job_id="string",
            name="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/image/{name}"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_supported_file_extensions(self) -> typing.List[LlamaParseSupportedFileExtensions]:
        """
        Get a list of supported file extensions

        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_supported_file_extensions()
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/supported_file_extensions"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.List[LlamaParseSupportedFileExtensions], _response.json())  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def screenshot(
        self,
        *,
        organization_id: typing.Optional[str] = None,
        project_id: typing.Optional[str] = None,
        file: typing.Optional[str] = OMIT,
        do_not_cache: bool,
        http_proxy: str,
        input_s_3_path: str,
        input_s_3_region: str,
        input_url: str,
        invalidate_cache: bool,
        max_pages: typing.Optional[int] = OMIT,
        output_s_3_path_prefix: str,
        output_s_3_region: str,
        target_pages: str,
        webhook_url: str,
        job_timeout_in_seconds: float,
        job_timeout_extra_time_per_page_in_seconds: float,
    ) -> ParsingJob:
        """
        Parameters:
            - organization_id: typing.Optional[str].

            - project_id: typing.Optional[str].

            - file: typing.Optional[str].

            - do_not_cache: bool.

            - http_proxy: str.

            - input_s_3_path: str.

            - input_s_3_region: str.

            - input_url: str.

            - invalidate_cache: bool.

            - max_pages: typing.Optional[int].

            - output_s_3_path_prefix: str.

            - output_s_3_region: str.

            - target_pages: str.

            - webhook_url: str.

            - job_timeout_in_seconds: float.

            - job_timeout_extra_time_per_page_in_seconds: float.
        """
        _request: typing.Dict[str, typing.Any] = {
            "do_not_cache": do_not_cache,
            "http_proxy": http_proxy,
            "input_s3_path": input_s_3_path,
            "input_s3_region": input_s_3_region,
            "input_url": input_url,
            "invalidate_cache": invalidate_cache,
            "output_s3_path_prefix": output_s_3_path_prefix,
            "output_s3_region": output_s_3_region,
            "target_pages": target_pages,
            "webhook_url": webhook_url,
            "job_timeout_in_seconds": job_timeout_in_seconds,
            "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
        }
        if file is not OMIT:
            _request["file"] = file
        if max_pages is not OMIT:
            _request["max_pages"] = max_pages
        _response = self._client_wrapper.httpx_client.request(
            "POST",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
            params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
            json=jsonable_encoder(_request),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJob, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def upload_file(
        self,
        *,
        organization_id: typing.Optional[str] = None,
        project_id: typing.Optional[str] = None,
        file: typing.Optional[str] = OMIT,
        adaptive_long_table: bool,
        annotate_links: bool,
        auto_mode: bool,
        auto_mode_trigger_on_image_in_page: bool,
        auto_mode_trigger_on_table_in_page: bool,
        auto_mode_trigger_on_text_in_page: str,
        auto_mode_trigger_on_regexp_in_page: str,
        azure_openai_api_version: str,
        azure_openai_deployment_name: str,
        azure_openai_endpoint: str,
        azure_openai_key: str,
        bbox_bottom: float,
        bbox_left: float,
        bbox_right: float,
        bbox_top: float,
        compact_markdown_table: bool,
        disable_ocr: bool,
        disable_reconstruction: bool,
        disable_image_extraction: bool,
        do_not_cache: bool,
        do_not_unroll_columns: bool,
        extract_charts: bool,
        guess_xlsx_sheet_name: bool,
        html_make_all_elements_visible: bool,
        html_remove_fixed_elements: bool,
        html_remove_navigation_elements: bool,
        http_proxy: str,
        input_s_3_path: str,
        input_s_3_region: str,
        input_url: str,
        invalidate_cache: bool,
        language: typing.List[ParserLanguages],
        extract_layout: bool,
        max_pages: typing.Optional[int] = OMIT,
        output_pdf_of_document: bool,
        output_s_3_path_prefix: str,
        output_s_3_region: str,
        page_prefix: str,
        page_separator: str,
        page_suffix: str,
        preserve_layout_alignment_across_pages: bool,
        skip_diagonal_text: bool,
        spreadsheet_extract_sub_tables: bool,
        structured_output: bool,
        structured_output_json_schema: str,
        structured_output_json_schema_name: str,
        take_screenshot: bool,
        target_pages: str,
        vendor_multimodal_api_key: str,
        vendor_multimodal_model_name: str,
        model: str,
        webhook_url: str,
        preset: str,
        parse_mode: typing.Optional[ParsingMode] = OMIT,
        system_prompt: str,
        system_prompt_append: str,
        user_prompt: str,
        job_timeout_in_seconds: float,
        job_timeout_extra_time_per_page_in_seconds: float,
        strict_mode_image_extraction: bool,
        strict_mode_image_ocr: bool,
        strict_mode_reconstruction: bool,
        strict_mode_buggy_font: bool,
        ignore_document_elements_for_layout_detection: bool,
        output_tables_as_html: bool,
        use_vendor_multimodal_model: bool,
        bounding_box: str,
        gpt_4_o_mode: bool,
        gpt_4_o_api_key: str,
        complemental_formatting_instruction: str,
        content_guideline_instruction: str,
        premium_mode: bool,
        is_formatting_instruction: bool,
        continuous_mode: bool,
        parsing_instruction: str,
        fast_mode: bool,
        formatting_instruction: str,
    ) -> ParsingJob:
        """
        Upload a file to s3 and create a job. return a job id

        Parameters:
            - organization_id: typing.Optional[str].

            - project_id: typing.Optional[str].

            - file: typing.Optional[str].

            - adaptive_long_table: bool.

            - annotate_links: bool.

            - auto_mode: bool.

            - auto_mode_trigger_on_image_in_page: bool.

            - auto_mode_trigger_on_table_in_page: bool.

            - auto_mode_trigger_on_text_in_page: str.

            - auto_mode_trigger_on_regexp_in_page: str.

            - azure_openai_api_version: str.

            - azure_openai_deployment_name: str.

            - azure_openai_endpoint: str.

            - azure_openai_key: str.

            - bbox_bottom: float.

            - bbox_left: float.

            - bbox_right: float.

            - bbox_top: float.

            - compact_markdown_table: bool.

            - disable_ocr: bool.

            - disable_reconstruction: bool.

            - disable_image_extraction: bool.

            - do_not_cache: bool.

            - do_not_unroll_columns: bool.

            - extract_charts: bool.

            - guess_xlsx_sheet_name: bool.

            - html_make_all_elements_visible: bool.

            - html_remove_fixed_elements: bool.

            - html_remove_navigation_elements: bool.

            - http_proxy: str.

            - input_s_3_path: str.

            - input_s_3_region: str.

            - input_url: str.

            - invalidate_cache: bool.

            - language: typing.List[ParserLanguages].

            - extract_layout: bool.

            - max_pages: typing.Optional[int].

            - output_pdf_of_document: bool.

            - output_s_3_path_prefix: str.

            - output_s_3_region: str.

            - page_prefix: str.

            - page_separator: str.

            - page_suffix: str.

            - preserve_layout_alignment_across_pages: bool.

            - skip_diagonal_text: bool.

            - spreadsheet_extract_sub_tables: bool.

            - structured_output: bool.

            - structured_output_json_schema: str.

            - structured_output_json_schema_name: str.

            - take_screenshot: bool.

            - target_pages: str.

            - vendor_multimodal_api_key: str.

            - vendor_multimodal_model_name: str.

            - model: str.

            - webhook_url: str.

            - preset: str.

            - parse_mode: typing.Optional[ParsingMode].

            - system_prompt: str.

            - system_prompt_append: str.

            - user_prompt: str.

            - job_timeout_in_seconds: float.

            - job_timeout_extra_time_per_page_in_seconds: float.

            - strict_mode_image_extraction: bool.

            - strict_mode_image_ocr: bool.

            - strict_mode_reconstruction: bool.

            - strict_mode_buggy_font: bool.

            - ignore_document_elements_for_layout_detection: bool.

            - output_tables_as_html: bool.

            - use_vendor_multimodal_model: bool.

            - bounding_box: str.

            - gpt_4_o_mode: bool.

            - gpt_4_o_api_key: str.

            - complemental_formatting_instruction: str.

            - content_guideline_instruction: str.

            - premium_mode: bool.

            - is_formatting_instruction: bool.

            - continuous_mode: bool.

            - parsing_instruction: str.

            - fast_mode: bool.

            - formatting_instruction: str.
        """
        _request: typing.Dict[str, typing.Any] = {
            "adaptive_long_table": adaptive_long_table,
            "annotate_links": annotate_links,
            "auto_mode": auto_mode,
            "auto_mode_trigger_on_image_in_page": auto_mode_trigger_on_image_in_page,
            "auto_mode_trigger_on_table_in_page": auto_mode_trigger_on_table_in_page,
            "auto_mode_trigger_on_text_in_page": auto_mode_trigger_on_text_in_page,
            "auto_mode_trigger_on_regexp_in_page": auto_mode_trigger_on_regexp_in_page,
            "azure_openai_api_version": azure_openai_api_version,
            "azure_openai_deployment_name": azure_openai_deployment_name,
            "azure_openai_endpoint": azure_openai_endpoint,
            "azure_openai_key": azure_openai_key,
            "bbox_bottom": bbox_bottom,
            "bbox_left": bbox_left,
            "bbox_right": bbox_right,
            "bbox_top": bbox_top,
            "compact_markdown_table": compact_markdown_table,
            "disable_ocr": disable_ocr,
            "disable_reconstruction": disable_reconstruction,
            "disable_image_extraction": disable_image_extraction,
            "do_not_cache": do_not_cache,
            "do_not_unroll_columns": do_not_unroll_columns,
            "extract_charts": extract_charts,
            "guess_xlsx_sheet_name": guess_xlsx_sheet_name,
            "html_make_all_elements_visible": html_make_all_elements_visible,
            "html_remove_fixed_elements": html_remove_fixed_elements,
            "html_remove_navigation_elements": html_remove_navigation_elements,
            "http_proxy": http_proxy,
            "input_s3_path": input_s_3_path,
            "input_s3_region": input_s_3_region,
            "input_url": input_url,
            "invalidate_cache": invalidate_cache,
            "language": language,
            "extract_layout": extract_layout,
            "output_pdf_of_document": output_pdf_of_document,
            "output_s3_path_prefix": output_s_3_path_prefix,
            "output_s3_region": output_s_3_region,
            "page_prefix": page_prefix,
            "page_separator": page_separator,
            "page_suffix": page_suffix,
            "preserve_layout_alignment_across_pages": preserve_layout_alignment_across_pages,
            "skip_diagonal_text": skip_diagonal_text,
            "spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
            "structured_output": structured_output,
            "structured_output_json_schema": structured_output_json_schema,
            "structured_output_json_schema_name": structured_output_json_schema_name,
            "take_screenshot": take_screenshot,
            "target_pages": target_pages,
            "vendor_multimodal_api_key": vendor_multimodal_api_key,
            "vendor_multimodal_model_name": vendor_multimodal_model_name,
            "model": model,
            "webhook_url": webhook_url,
            "preset": preset,
            "system_prompt": system_prompt,
            "system_prompt_append": system_prompt_append,
            "user_prompt": user_prompt,
            "job_timeout_in_seconds": job_timeout_in_seconds,
            "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
            "strict_mode_image_extraction": strict_mode_image_extraction,
            "strict_mode_image_ocr": strict_mode_image_ocr,
            "strict_mode_reconstruction": strict_mode_reconstruction,
            "strict_mode_buggy_font": strict_mode_buggy_font,
            "ignore_document_elements_for_layout_detection": ignore_document_elements_for_layout_detection,
            "output_tables_as_HTML": output_tables_as_html,
            "use_vendor_multimodal_model": use_vendor_multimodal_model,
            "bounding_box": bounding_box,
            "gpt4o_mode": gpt_4_o_mode,
            "gpt4o_api_key": gpt_4_o_api_key,
            "complemental_formatting_instruction": complemental_formatting_instruction,
            "content_guideline_instruction": content_guideline_instruction,
            "premium_mode": premium_mode,
            "is_formatting_instruction": is_formatting_instruction,
            "continuous_mode": continuous_mode,
            "parsing_instruction": parsing_instruction,
            "fast_mode": fast_mode,
            "formatting_instruction": formatting_instruction,
        }
        if file is not OMIT:
            _request["file"] = file
        if max_pages is not OMIT:
            _request["max_pages"] = max_pages
        if parse_mode is not OMIT:
            _request["parse_mode"] = parse_mode
        _response = self._client_wrapper.httpx_client.request(
            "POST",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/upload"),
            params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
            json=jsonable_encoder(_request),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJob, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job(self, job_id: str) -> ParsingJob:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJob, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_parsing_job_details(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_parsing_job_details(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/details"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_text_result(self, job_id: str, *, organization_id: typing.Optional[str] = None) -> ParsingJobTextResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_text_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/text"),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobTextResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_raw_text_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_raw_text_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/pdf"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_structured_result(
        self, job_id: str, *, organization_id: typing.Optional[str] = None
    ) -> ParsingJobStructuredResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_structured_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/structured"
            ),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobStructuredResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_raw_structured_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_raw_structured_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/structured"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_raw_xlsx_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_raw_xlsx_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/xlsx"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_result(self, job_id: str, *, organization_id: typing.Optional[str] = None) -> ParsingJobMarkdownResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/markdown"
            ),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobMarkdownResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_raw_md_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_raw_md_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/markdown"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_json_result(self, job_id: str, *, organization_id: typing.Optional[str] = None) -> ParsingJobJsonResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_json_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/json"),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobJsonResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_job_json_raw_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_job_json_raw_result(
            job_id="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/json"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def get_parsing_history_result(self) -> typing.List[ParsingHistoryItem]:
        """
        Get parsing history for user

        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.get_parsing_history_result()
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/history"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.List[ParsingHistoryItem], _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    def generate_presigned_url(self, job_id: str, filename: str) -> PresignedUrl:
        """
        Generate a presigned URL for a job

        Parameters:
            - job_id: str.

            - filename: str.
        ---
        from llama_cloud.client import LlamaCloud

        client = LlamaCloud(
            token="YOUR_TOKEN",
        )
        client.parsing.generate_presigned_url(
            job_id="string",
            filename="string",
        )
        """
        _response = self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/read/{filename}"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(PresignedUrl, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)


class AsyncParsingClient:
    def __init__(self, *, client_wrapper: AsyncClientWrapper):
        self._client_wrapper = client_wrapper

    async def get_job_image_result(self, job_id: str, name: str) -> None:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - name: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_image_result(
            job_id="string",
            name="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/image/{name}"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_supported_file_extensions(self) -> typing.List[LlamaParseSupportedFileExtensions]:
        """
        Get a list of supported file extensions

        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_supported_file_extensions()
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/supported_file_extensions"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.List[LlamaParseSupportedFileExtensions], _response.json())  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def screenshot(
        self,
        *,
        organization_id: typing.Optional[str] = None,
        project_id: typing.Optional[str] = None,
        file: typing.Optional[str] = OMIT,
        do_not_cache: bool,
        http_proxy: str,
        input_s_3_path: str,
        input_s_3_region: str,
        input_url: str,
        invalidate_cache: bool,
        max_pages: typing.Optional[int] = OMIT,
        output_s_3_path_prefix: str,
        output_s_3_region: str,
        target_pages: str,
        webhook_url: str,
        job_timeout_in_seconds: float,
        job_timeout_extra_time_per_page_in_seconds: float,
    ) -> ParsingJob:
        """
        Parameters:
            - organization_id: typing.Optional[str].

            - project_id: typing.Optional[str].

            - file: typing.Optional[str].

            - do_not_cache: bool.

            - http_proxy: str.

            - input_s_3_path: str.

            - input_s_3_region: str.

            - input_url: str.

            - invalidate_cache: bool.

            - max_pages: typing.Optional[int].

            - output_s_3_path_prefix: str.

            - output_s_3_region: str.

            - target_pages: str.

            - webhook_url: str.

            - job_timeout_in_seconds: float.

            - job_timeout_extra_time_per_page_in_seconds: float.
        """
        _request: typing.Dict[str, typing.Any] = {
            "do_not_cache": do_not_cache,
            "http_proxy": http_proxy,
            "input_s3_path": input_s_3_path,
            "input_s3_region": input_s_3_region,
            "input_url": input_url,
            "invalidate_cache": invalidate_cache,
            "output_s3_path_prefix": output_s_3_path_prefix,
            "output_s3_region": output_s_3_region,
            "target_pages": target_pages,
            "webhook_url": webhook_url,
            "job_timeout_in_seconds": job_timeout_in_seconds,
            "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
        }
        if file is not OMIT:
            _request["file"] = file
        if max_pages is not OMIT:
            _request["max_pages"] = max_pages
        _response = await self._client_wrapper.httpx_client.request(
            "POST",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/screenshot"),
            params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
            json=jsonable_encoder(_request),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJob, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def upload_file(
        self,
        *,
        organization_id: typing.Optional[str] = None,
        project_id: typing.Optional[str] = None,
        file: typing.Optional[str] = OMIT,
        adaptive_long_table: bool,
        annotate_links: bool,
        auto_mode: bool,
        auto_mode_trigger_on_image_in_page: bool,
        auto_mode_trigger_on_table_in_page: bool,
        auto_mode_trigger_on_text_in_page: str,
        auto_mode_trigger_on_regexp_in_page: str,
        azure_openai_api_version: str,
        azure_openai_deployment_name: str,
        azure_openai_endpoint: str,
        azure_openai_key: str,
        bbox_bottom: float,
        bbox_left: float,
        bbox_right: float,
        bbox_top: float,
        compact_markdown_table: bool,
        disable_ocr: bool,
        disable_reconstruction: bool,
        disable_image_extraction: bool,
        do_not_cache: bool,
        do_not_unroll_columns: bool,
        extract_charts: bool,
        guess_xlsx_sheet_name: bool,
        html_make_all_elements_visible: bool,
        html_remove_fixed_elements: bool,
        html_remove_navigation_elements: bool,
        http_proxy: str,
        input_s_3_path: str,
        input_s_3_region: str,
        input_url: str,
        invalidate_cache: bool,
        language: typing.List[ParserLanguages],
        extract_layout: bool,
        max_pages: typing.Optional[int] = OMIT,
        output_pdf_of_document: bool,
        output_s_3_path_prefix: str,
        output_s_3_region: str,
        page_prefix: str,
        page_separator: str,
        page_suffix: str,
        preserve_layout_alignment_across_pages: bool,
        skip_diagonal_text: bool,
        spreadsheet_extract_sub_tables: bool,
        structured_output: bool,
        structured_output_json_schema: str,
        structured_output_json_schema_name: str,
        take_screenshot: bool,
        target_pages: str,
        vendor_multimodal_api_key: str,
        vendor_multimodal_model_name: str,
        model: str,
        webhook_url: str,
        preset: str,
        parse_mode: typing.Optional[ParsingMode] = OMIT,
        system_prompt: str,
        system_prompt_append: str,
        user_prompt: str,
        job_timeout_in_seconds: float,
        job_timeout_extra_time_per_page_in_seconds: float,
        strict_mode_image_extraction: bool,
        strict_mode_image_ocr: bool,
        strict_mode_reconstruction: bool,
        strict_mode_buggy_font: bool,
        ignore_document_elements_for_layout_detection: bool,
        output_tables_as_html: bool,
        use_vendor_multimodal_model: bool,
        bounding_box: str,
        gpt_4_o_mode: bool,
        gpt_4_o_api_key: str,
        complemental_formatting_instruction: str,
        content_guideline_instruction: str,
        premium_mode: bool,
        is_formatting_instruction: bool,
        continuous_mode: bool,
        parsing_instruction: str,
        fast_mode: bool,
        formatting_instruction: str,
    ) -> ParsingJob:
        """
        Upload a file to s3 and create a job. return a job id

        Parameters:
            - organization_id: typing.Optional[str].

            - project_id: typing.Optional[str].

            - file: typing.Optional[str].

            - adaptive_long_table: bool.

            - annotate_links: bool.

            - auto_mode: bool.

            - auto_mode_trigger_on_image_in_page: bool.

            - auto_mode_trigger_on_table_in_page: bool.

            - auto_mode_trigger_on_text_in_page: str.

            - auto_mode_trigger_on_regexp_in_page: str.

            - azure_openai_api_version: str.

            - azure_openai_deployment_name: str.

            - azure_openai_endpoint: str.

            - azure_openai_key: str.

            - bbox_bottom: float.

            - bbox_left: float.

            - bbox_right: float.

            - bbox_top: float.

            - compact_markdown_table: bool.

            - disable_ocr: bool.

            - disable_reconstruction: bool.

            - disable_image_extraction: bool.

            - do_not_cache: bool.

            - do_not_unroll_columns: bool.

            - extract_charts: bool.

            - guess_xlsx_sheet_name: bool.

            - html_make_all_elements_visible: bool.

            - html_remove_fixed_elements: bool.

            - html_remove_navigation_elements: bool.

            - http_proxy: str.

            - input_s_3_path: str.

            - input_s_3_region: str.

            - input_url: str.

            - invalidate_cache: bool.

            - language: typing.List[ParserLanguages].

            - extract_layout: bool.

            - max_pages: typing.Optional[int].

            - output_pdf_of_document: bool.

            - output_s_3_path_prefix: str.

            - output_s_3_region: str.

            - page_prefix: str.

            - page_separator: str.

            - page_suffix: str.

            - preserve_layout_alignment_across_pages: bool.

            - skip_diagonal_text: bool.

            - spreadsheet_extract_sub_tables: bool.

            - structured_output: bool.

            - structured_output_json_schema: str.

            - structured_output_json_schema_name: str.

            - take_screenshot: bool.

            - target_pages: str.

            - vendor_multimodal_api_key: str.

            - vendor_multimodal_model_name: str.

            - model: str.

            - webhook_url: str.

            - preset: str.

            - parse_mode: typing.Optional[ParsingMode].

            - system_prompt: str.

            - system_prompt_append: str.

            - user_prompt: str.

            - job_timeout_in_seconds: float.

            - job_timeout_extra_time_per_page_in_seconds: float.

            - strict_mode_image_extraction: bool.

            - strict_mode_image_ocr: bool.

            - strict_mode_reconstruction: bool.

            - strict_mode_buggy_font: bool.

            - ignore_document_elements_for_layout_detection: bool.

            - output_tables_as_html: bool.

            - use_vendor_multimodal_model: bool.

            - bounding_box: str.

            - gpt_4_o_mode: bool.

            - gpt_4_o_api_key: str.

            - complemental_formatting_instruction: str.

            - content_guideline_instruction: str.

            - premium_mode: bool.

            - is_formatting_instruction: bool.

            - continuous_mode: bool.

            - parsing_instruction: str.

            - fast_mode: bool.

            - formatting_instruction: str.
        """
        _request: typing.Dict[str, typing.Any] = {
            "adaptive_long_table": adaptive_long_table,
            "annotate_links": annotate_links,
            "auto_mode": auto_mode,
            "auto_mode_trigger_on_image_in_page": auto_mode_trigger_on_image_in_page,
            "auto_mode_trigger_on_table_in_page": auto_mode_trigger_on_table_in_page,
            "auto_mode_trigger_on_text_in_page": auto_mode_trigger_on_text_in_page,
            "auto_mode_trigger_on_regexp_in_page": auto_mode_trigger_on_regexp_in_page,
            "azure_openai_api_version": azure_openai_api_version,
            "azure_openai_deployment_name": azure_openai_deployment_name,
            "azure_openai_endpoint": azure_openai_endpoint,
            "azure_openai_key": azure_openai_key,
            "bbox_bottom": bbox_bottom,
            "bbox_left": bbox_left,
            "bbox_right": bbox_right,
            "bbox_top": bbox_top,
            "compact_markdown_table": compact_markdown_table,
            "disable_ocr": disable_ocr,
            "disable_reconstruction": disable_reconstruction,
            "disable_image_extraction": disable_image_extraction,
            "do_not_cache": do_not_cache,
            "do_not_unroll_columns": do_not_unroll_columns,
            "extract_charts": extract_charts,
            "guess_xlsx_sheet_name": guess_xlsx_sheet_name,
            "html_make_all_elements_visible": html_make_all_elements_visible,
            "html_remove_fixed_elements": html_remove_fixed_elements,
            "html_remove_navigation_elements": html_remove_navigation_elements,
            "http_proxy": http_proxy,
            "input_s3_path": input_s_3_path,
            "input_s3_region": input_s_3_region,
            "input_url": input_url,
            "invalidate_cache": invalidate_cache,
            "language": language,
            "extract_layout": extract_layout,
            "output_pdf_of_document": output_pdf_of_document,
            "output_s3_path_prefix": output_s_3_path_prefix,
            "output_s3_region": output_s_3_region,
            "page_prefix": page_prefix,
            "page_separator": page_separator,
            "page_suffix": page_suffix,
            "preserve_layout_alignment_across_pages": preserve_layout_alignment_across_pages,
            "skip_diagonal_text": skip_diagonal_text,
            "spreadsheet_extract_sub_tables": spreadsheet_extract_sub_tables,
            "structured_output": structured_output,
            "structured_output_json_schema": structured_output_json_schema,
            "structured_output_json_schema_name": structured_output_json_schema_name,
            "take_screenshot": take_screenshot,
            "target_pages": target_pages,
            "vendor_multimodal_api_key": vendor_multimodal_api_key,
            "vendor_multimodal_model_name": vendor_multimodal_model_name,
            "model": model,
            "webhook_url": webhook_url,
            "preset": preset,
            "system_prompt": system_prompt,
            "system_prompt_append": system_prompt_append,
            "user_prompt": user_prompt,
            "job_timeout_in_seconds": job_timeout_in_seconds,
            "job_timeout_extra_time_per_page_in_seconds": job_timeout_extra_time_per_page_in_seconds,
            "strict_mode_image_extraction": strict_mode_image_extraction,
            "strict_mode_image_ocr": strict_mode_image_ocr,
            "strict_mode_reconstruction": strict_mode_reconstruction,
            "strict_mode_buggy_font": strict_mode_buggy_font,
            "ignore_document_elements_for_layout_detection": ignore_document_elements_for_layout_detection,
            "output_tables_as_HTML": output_tables_as_html,
            "use_vendor_multimodal_model": use_vendor_multimodal_model,
            "bounding_box": bounding_box,
            "gpt4o_mode": gpt_4_o_mode,
            "gpt4o_api_key": gpt_4_o_api_key,
            "complemental_formatting_instruction": complemental_formatting_instruction,
            "content_guideline_instruction": content_guideline_instruction,
            "premium_mode": premium_mode,
            "is_formatting_instruction": is_formatting_instruction,
            "continuous_mode": continuous_mode,
            "parsing_instruction": parsing_instruction,
            "fast_mode": fast_mode,
            "formatting_instruction": formatting_instruction,
        }
        if file is not OMIT:
            _request["file"] = file
        if max_pages is not OMIT:
            _request["max_pages"] = max_pages
        if parse_mode is not OMIT:
            _request["parse_mode"] = parse_mode
        _response = await self._client_wrapper.httpx_client.request(
            "POST",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/upload"),
            params=remove_none_from_dict({"organization_id": organization_id, "project_id": project_id}),
            json=jsonable_encoder(_request),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJob, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job(self, job_id: str) -> ParsingJob:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJob, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_parsing_job_details(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_parsing_job_details(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/details"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_text_result(
        self, job_id: str, *, organization_id: typing.Optional[str] = None
    ) -> ParsingJobTextResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_text_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/text"),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobTextResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_raw_text_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_raw_text_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/pdf"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_structured_result(
        self, job_id: str, *, organization_id: typing.Optional[str] = None
    ) -> ParsingJobStructuredResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_structured_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/structured"
            ),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobStructuredResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_raw_structured_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_raw_structured_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/structured"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_raw_xlsx_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_raw_xlsx_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/xlsx"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_result(
        self, job_id: str, *, organization_id: typing.Optional[str] = None
    ) -> ParsingJobMarkdownResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/markdown"
            ),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobMarkdownResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_raw_md_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_raw_md_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/markdown"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_json_result(
        self, job_id: str, *, organization_id: typing.Optional[str] = None
    ) -> ParsingJobJsonResult:
        """
        Get a job by id

        Parameters:
            - job_id: str.

            - organization_id: typing.Optional[str].
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_json_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/json"),
            params=remove_none_from_dict({"organization_id": organization_id}),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(ParsingJobJsonResult, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_job_json_raw_result(self, job_id: str) -> typing.Any:
        """
        Get a job by id

        Parameters:
            - job_id: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_job_json_raw_result(
            job_id="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/result/raw/json"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.Any, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def get_parsing_history_result(self) -> typing.List[ParsingHistoryItem]:
        """
        Get parsing history for user

        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.get_parsing_history_result()
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(f"{self._client_wrapper.get_base_url()}/", "api/v1/parsing/history"),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(typing.List[ParsingHistoryItem], _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)

    async def generate_presigned_url(self, job_id: str, filename: str) -> PresignedUrl:
        """
        Generate a presigned URL for a job

        Parameters:
            - job_id: str.

            - filename: str.
        ---
        from llama_cloud.client import AsyncLlamaCloud

        client = AsyncLlamaCloud(
            token="YOUR_TOKEN",
        )
        await client.parsing.generate_presigned_url(
            job_id="string",
            filename="string",
        )
        """
        _response = await self._client_wrapper.httpx_client.request(
            "GET",
            urllib.parse.urljoin(
                f"{self._client_wrapper.get_base_url()}/", f"api/v1/parsing/job/{job_id}/read/{filename}"
            ),
            headers=self._client_wrapper.get_headers(),
            timeout=60,
        )
        if 200 <= _response.status_code < 300:
            return pydantic.parse_obj_as(PresignedUrl, _response.json())  # type: ignore
        if _response.status_code == 422:
            raise UnprocessableEntityError(pydantic.parse_obj_as(HttpValidationError, _response.json()))  # type: ignore
        try:
            _response_json = _response.json()
        except JSONDecodeError:
            raise ApiError(status_code=_response.status_code, body=_response.text)
        raise ApiError(status_code=_response.status_code, body=_response_json)
