From 89d6fedae9534fd265e7b17ddab8c41d53f753b8 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 10:05:34 -0600 Subject: [PATCH 01/17] Add test to check behavior of is_extracted metadata during normalization --- .../partition/common/test_common.py | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/test_unstructured/partition/common/test_common.py b/test_unstructured/partition/common/test_common.py index e8cfeb233d..35825dedf4 100644 --- a/test_unstructured/partition/common/test_common.py +++ b/test_unstructured/partition/common/test_common.py @@ -4,6 +4,7 @@ import numpy as np import pytest from PIL import Image +from unstructured_inference.constants import IsExtracted from unstructured_inference.inference import layout from unstructured_inference.inference.elements import TextRegion from unstructured_inference.inference.layoutelement import LayoutElement @@ -445,3 +446,23 @@ def test_ocr_data_to_elements(): points=layout_el.bbox.coordinates, system=coordinate_system, ) + + +def test_normalize_layout_element_layout_element_text_source_metadata(): + layout_element = LayoutElement.from_coords( + type="NarrativeText", + x1=1, + y1=2, + x2=3, + y2=4, + text="Some lovely text", + is_extracted=IsExtracted.TRUE, + ) + coordinate_system = PixelSpace(width=10, height=20) + element = common.normalize_layout_element( + layout_element, + coordinate_system=coordinate_system, + ) + assert hasattr(element, "metadata") + assert hasattr(element.metadata, "is_extracted") + assert element.metadata.is_extracted == "true" From 130c867782cb78497c7bfdaecbbf849f4fed07b7 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 10:09:33 -0600 Subject: [PATCH 02/17] test element merge behavior for extracted text metadata --- .../pdf_image/test_merge_elements.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 test_unstructured/partition/pdf_image/test_merge_elements.py diff --git a/test_unstructured/partition/pdf_image/test_merge_elements.py b/test_unstructured/partition/pdf_image/test_merge_elements.py new file mode 100644 index 0000000000..714fc3e591 --- /dev/null +++ b/test_unstructured/partition/pdf_image/test_merge_elements.py @@ -0,0 +1,49 @@ +from PIL import Image + +from unstructured_inference.inference.layout import DocumentLayout, PageLayout +from unstructured_inference.inference.layoutelement import LayoutElement, LayoutElements +from unstructured_inference.inference.elements import Rectangle +from unstructured.partition.pdf_image.pdfminer_processing import merge_inferred_with_extracted_layout +from unstructured_inference.constants import IsExtracted + +def test_text_source_preserved_during_merge(): + """Test that text_source property is preserved when elements are merged.""" + + # Create two simple LayoutElements with different text_source values + inferred_element = LayoutElement( + bbox=Rectangle(0, 0, 100, 50), + text=None, + is_extracted=IsExtracted.FALSE + ) + + extracted_element = LayoutElement( + bbox=Rectangle(0, 0, 100, 50), + text="Extracted text", + is_extracted=IsExtracted.TRUE + ) + + # Create LayoutElements arrays + inferred_layout_elements = LayoutElements.from_list([inferred_element]) + extracted_layout_elements = LayoutElements.from_list([extracted_element]) + + # Create a PageLayout for the inferred layout + image = Image.new("RGB", (200, 200)) + inferred_page = PageLayout(number=1, image=image) + inferred_page.elements_array = inferred_layout_elements + + # Create DocumentLayout from the PageLayout + inferred_document_layout = DocumentLayout(pages=[inferred_page]) + + # Merge them + merged_layout = merge_inferred_with_extracted_layout( + inferred_document_layout=inferred_document_layout, + extracted_layout=[extracted_layout_elements], + hi_res_model_name="test_model", + ) + + # Verify text_source is preserved + # Check the merged page's elements_array + merged_page = merged_layout.pages[0] + assert "Extracted text" in merged_page.elements_array.texts + assert hasattr(merged_page.elements_array, 'is_extracted_array') + assert IsExtracted.TRUE in merged_page.elements_array.is_extracted_array From 1a78d06fc66ac7bcbe8c574a7b7ebe44d7f518ef Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 10:27:31 -0600 Subject: [PATCH 03/17] support is_extracted metadata for elements --- unstructured/documents/elements.py | 2 ++ unstructured/partition/common/common.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/unstructured/documents/elements.py b/unstructured/documents/elements.py index 0caf340b96..dbf4c4d3ef 100644 --- a/unstructured/documents/elements.py +++ b/unstructured/documents/elements.py @@ -206,6 +206,7 @@ class ElementMetadata: # -- used for Table elements to capture rows/col structure -- text_as_html: Optional[str] + is_extracted: Optional[str] table_as_cells: Optional[dict[str, str | int]] url: Optional[str] @@ -498,6 +499,7 @@ def field_consolidation_strategies(cls) -> dict[str, ConsolidationStrategy]: "image_base64": cls.DROP, "image_mime_type": cls.DROP, "is_continuation": cls.DROP, # -- not expected, added by chunking, not before -- + "is_extracted": cls.DROP, "languages": cls.LIST_UNIQUE, "last_modified": cls.FIRST, "link_texts": cls.LIST_CONCATENATE, diff --git a/unstructured/partition/common/common.py b/unstructured/partition/common/common.py index 468d356b44..d18fc8c87b 100644 --- a/unstructured/partition/common/common.py +++ b/unstructured/partition/common/common.py @@ -2,6 +2,7 @@ import numbers import subprocess +from enum import Enum from io import BufferedReader, BytesIO, TextIOWrapper from tempfile import SpooledTemporaryFile from time import sleep @@ -58,12 +59,17 @@ def normalize_layout_element( prob = layout_dict.get("prob") aux_origin = layout_dict.get("source", None) origin = None + if isinstance(layout_dict.get("is_extracted"), Enum): + is_extracted = layout_dict["is_extracted"].value + else: + is_extracted = None if aux_origin: origin = aux_origin.value if prob and isinstance(prob, (int, str, float, numbers.Number)): class_prob_metadata = ElementMetadata(detection_class_prob=float(prob)) # type: ignore else: class_prob_metadata = ElementMetadata() + class_prob_metadata.is_extracted = is_extracted common_kwargs = { "coordinates": coordinates, "coordinate_system": coordinate_system, From abcc4f3917bee083f0cb4c2c401338c1f2cb6998 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 10:27:44 -0600 Subject: [PATCH 04/17] Add merge logic for is_extracted --- unstructured/partition/pdf_image/pdfminer_processing.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index 8941d5022b..5d3da3a203 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -8,7 +8,7 @@ from pdfminer.pdftypes import PDFObjRef from pdfminer.utils import open_filename from unstructured_inference.config import inference_config -from unstructured_inference.constants import FULL_PAGE_REGION_THRESHOLD +from unstructured_inference.constants import FULL_PAGE_REGION_THRESHOLD, IsExtracted from unstructured_inference.inference.elements import Rectangle from unstructured.documents.coordinates import PixelSpace, PointSpace @@ -647,13 +647,14 @@ def merge_inferred_with_extracted_layout( merged_layout = sort_text_regions(merged_layout, SORT_MODE_BASIC) # so that we can modify the text without worrying about hitting length limit merged_layout.texts = merged_layout.texts.astype(object) - + merged_layout.is_extracted_array = merged_layout.is_extracted_array.astype(object) for i, text in enumerate(merged_layout.texts): if text is None: text = aggregate_embedded_text_by_block( target_region=merged_layout.slice([i]), source_regions=extracted_page_layout, ) + merged_layout.is_extracted_array[i] = IsExtracted.TRUE merged_layout.texts[i] = remove_control_characters(text) inferred_page.elements_array = merged_layout From 7e159c48eb7912a5295d44904e954b62994dba3f Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 11:30:28 -0600 Subject: [PATCH 05/17] Add test that pdfminer processed file layouelements are recognized as extracted --- .../partition/pdf_image/test_pdfminer_processing.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/test_unstructured/partition/pdf_image/test_pdfminer_processing.py b/test_unstructured/partition/pdf_image/test_pdfminer_processing.py index 309ea1336f..e6d9774405 100644 --- a/test_unstructured/partition/pdf_image/test_pdfminer_processing.py +++ b/test_unstructured/partition/pdf_image/test_pdfminer_processing.py @@ -4,7 +4,7 @@ import pytest from pdfminer.layout import LAParams from PIL import Image -from unstructured_inference.constants import Source as InferenceSource +from unstructured_inference.constants import Source as InferenceSource, IsExtracted from unstructured_inference.inference.elements import ( EmbeddedTextRegion, Rectangle, @@ -249,6 +249,11 @@ def test_process_file_with_pdfminer(): assert links[0][0]["url"] == "https://layout-parser.github.io" +def test_process_file_with_pdfminer_is_extracted_array(): + layout, _ = process_file_with_pdfminer(example_doc_path("pdf/layout-parser-paper-fast.pdf")) + assert all(is_extracted is IsExtracted.TRUE for is_extracted in layout[0].is_extracted_array) + + @patch("unstructured.partition.pdf_image.pdfminer_utils.LAParams", return_value=LAParams()) def test_laprams_are_passed_from_partition_to_pdfminer(pdfminer_mock): partition( From ae8f1a178499e317d37f05ae95eae6214c0391e3 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 15:33:54 -0600 Subject: [PATCH 06/17] merge array elements while retaining extracted status --- unstructured/partition/pdf_image/pdfminer_processing.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index 5d3da3a203..ab01c3f384 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -128,6 +128,7 @@ def _merge_extracted_into_inferred_when_almost_the_same( extracted_to_remove = extracted_layout.slice(extracted_almost_the_same_as_inferred) # copy here in case we change the extracted layout later inferred_layout.texts[inferred_indices_to_update] = extracted_to_remove.texts.copy() + inferred_layout.is_extracted_array[inferred_indices_to_update] = extracted_to_remove.is_extracted_array.copy() # use coords that can bound BOTH the inferred and extracted region as final bounding box coords inferred_layout.element_coords[inferred_indices_to_update] = _minimum_containing_coords( inferred_layout.slice(inferred_indices_to_update), @@ -426,6 +427,7 @@ def process_page_layout_from_pdfminer( element_class_ids=np.array(element_class), element_class_id_map={0: ElementType.UNCATEGORIZED_TEXT, 1: ElementType.IMAGE}, sources=np.array([Source.PDFMINER] * len(element_class)), + is_extracted_array=np.array([IsExtracted.TRUE] * len(element_class)), ), urls_metadata, ) From d7fc5a091f17f93d6681a3435c61983fa9c3619c Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Tue, 4 Nov 2025 15:45:03 -0600 Subject: [PATCH 07/17] formatting --- .../partition/pdf_image/test_merge_elements.py | 11 +++++++---- .../partition/pdf_image/test_pdfminer_processing.py | 3 ++- .../partition/pdf_image/pdfminer_processing.py | 4 +++- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/test_unstructured/partition/pdf_image/test_merge_elements.py b/test_unstructured/partition/pdf_image/test_merge_elements.py index 714fc3e591..7905f29ede 100644 --- a/test_unstructured/partition/pdf_image/test_merge_elements.py +++ b/test_unstructured/partition/pdf_image/test_merge_elements.py @@ -1,10 +1,13 @@ from PIL import Image - +from unstructured_inference.constants import IsExtracted +from unstructured_inference.inference.elements import Rectangle from unstructured_inference.inference.layout import DocumentLayout, PageLayout from unstructured_inference.inference.layoutelement import LayoutElement, LayoutElements -from unstructured_inference.inference.elements import Rectangle -from unstructured.partition.pdf_image.pdfminer_processing import merge_inferred_with_extracted_layout -from unstructured_inference.constants import IsExtracted + +from unstructured.partition.pdf_image.pdfminer_processing import ( + merge_inferred_with_extracted_layout, +) + def test_text_source_preserved_during_merge(): """Test that text_source property is preserved when elements are merged.""" diff --git a/test_unstructured/partition/pdf_image/test_pdfminer_processing.py b/test_unstructured/partition/pdf_image/test_pdfminer_processing.py index e6d9774405..31eae49283 100644 --- a/test_unstructured/partition/pdf_image/test_pdfminer_processing.py +++ b/test_unstructured/partition/pdf_image/test_pdfminer_processing.py @@ -4,7 +4,8 @@ import pytest from pdfminer.layout import LAParams from PIL import Image -from unstructured_inference.constants import Source as InferenceSource, IsExtracted +from unstructured_inference.constants import IsExtracted +from unstructured_inference.constants import Source as InferenceSource from unstructured_inference.inference.elements import ( EmbeddedTextRegion, Rectangle, diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index ab01c3f384..9ead11a2b3 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -128,7 +128,9 @@ def _merge_extracted_into_inferred_when_almost_the_same( extracted_to_remove = extracted_layout.slice(extracted_almost_the_same_as_inferred) # copy here in case we change the extracted layout later inferred_layout.texts[inferred_indices_to_update] = extracted_to_remove.texts.copy() - inferred_layout.is_extracted_array[inferred_indices_to_update] = extracted_to_remove.is_extracted_array.copy() + inferred_layout.is_extracted_array[inferred_indices_to_update] = ( + extracted_to_remove.is_extracted_array.copy() + ) # use coords that can bound BOTH the inferred and extracted region as final bounding box coords inferred_layout.element_coords[inferred_indices_to_update] = _minimum_containing_coords( inferred_layout.slice(inferred_indices_to_update), From 22fc9b332d379a1b4bd97e003a6b3bd67fcfef91 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Wed, 5 Nov 2025 13:12:47 -0600 Subject: [PATCH 08/17] update deps --- requirements/base.txt | 12 ++++---- requirements/dev.txt | 2 +- requirements/extra-markdown.txt | 2 +- requirements/extra-paddleocr.txt | 20 ++++++------- requirements/extra-pdf-image.in | 2 +- requirements/extra-pdf-image.txt | 49 ++++++++++++++++---------------- requirements/extra-pptx.txt | 2 +- requirements/extra-xlsx.txt | 12 ++++---- requirements/huggingface.txt | 8 +++--- requirements/test.txt | 12 ++++---- 10 files changed, 61 insertions(+), 60 deletions(-) diff --git a/requirements/base.txt b/requirements/base.txt index cd904abb86..25a996dc54 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -27,7 +27,7 @@ click==8.3.0 # via # nltk # python-oxmsg -cryptography==46.0.2 +cryptography==46.0.3 # via unstructured-client dataclasses-json==0.6.7 # via @@ -85,11 +85,11 @@ packaging==25.0 # via # marshmallow # unstructured-client -psutil==7.1.0 +psutil==7.1.3 # via -r ./base.in pycparser==2.23 # via cffi -pypdf==6.1.1 +pypdf==6.1.3 # via unstructured-client python-dateutil==2.9.0.post0 # via unstructured-client @@ -99,9 +99,9 @@ python-magic==0.4.27 # via -r ./base.in python-oxmsg==0.0.2 # via -r ./base.in -rapidfuzz==3.14.1 +rapidfuzz==3.14.3 # via -r ./base.in -regex==2025.9.18 +regex==2025.11.3 # via nltk requests==2.32.5 # via @@ -150,5 +150,5 @@ urllib3==2.5.0 # unstructured-client webencodings==0.5.1 # via html5lib -wrapt==1.17.3 +wrapt==2.0.0 # via -r ./base.in diff --git a/requirements/dev.txt b/requirements/dev.txt index d759e083a5..8d4b60b5a1 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -50,7 +50,7 @@ typing-extensions==4.15.0 # -c ./base.txt # -c ./test.txt # virtualenv -virtualenv==20.35.3 +virtualenv==20.35.4 # via pre-commit wheel==0.45.1 # via pip-tools diff --git a/requirements/extra-markdown.txt b/requirements/extra-markdown.txt index 98ad827c8b..815640b728 100644 --- a/requirements/extra-markdown.txt +++ b/requirements/extra-markdown.txt @@ -4,5 +4,5 @@ # # pip-compile ./extra-markdown.in # -markdown==3.9 +markdown==3.10 # via -r ./extra-markdown.in diff --git a/requirements/extra-paddleocr.txt b/requirements/extra-paddleocr.txt index ff3c9a3018..7f320650c3 100644 --- a/requirements/extra-paddleocr.txt +++ b/requirements/extra-paddleocr.txt @@ -30,7 +30,7 @@ charset-normalizer==3.4.4 # via # -c ./base.txt # requests -cython==3.1.4 +cython==3.2.0 # via unstructured-paddleocr exceptiongroup==1.3.0 # via @@ -58,7 +58,7 @@ idna==3.11 # anyio # httpx # requests -imageio==2.37.0 +imageio==2.37.2 # via scikit-image lazy-loader==0.4 # via scikit-image @@ -101,23 +101,23 @@ packaging==25.0 # -c ./base.txt # lazy-loader # scikit-image -paddlepaddle==3.2.0 +paddlepaddle==3.2.1 # via -r ./extra-paddleocr.in -pillow==11.3.0 +pillow==12.0.0 # via # imageio # paddlepaddle # scikit-image # unstructured-paddleocr -protobuf==6.32.1 +protobuf==6.33.0 # via # -c ./deps/constraints.txt # paddlepaddle pyclipper==1.3.0.post6 # via unstructured-paddleocr -pydantic==2.12.2 +pydantic==2.12.4 # via albumentations -pydantic-core==2.41.4 +pydantic-core==2.41.5 # via pydantic python-docx==1.2.0 # via unstructured-paddleocr @@ -125,7 +125,7 @@ pyyaml==6.0.3 # via # albumentations # unstructured-paddleocr -rapidfuzz==3.14.1 +rapidfuzz==3.14.3 # via # -c ./base.txt # unstructured-paddleocr @@ -153,9 +153,9 @@ soupsieve==2.8 # via # -c ./base.txt # beautifulsoup4 -stringzilla==4.2.1 +stringzilla==4.2.3 # via albucore -termcolor==3.1.0 +termcolor==3.2.0 # via fire tifffile==2025.5.10 # via scikit-image diff --git a/requirements/extra-pdf-image.in b/requirements/extra-pdf-image.in index b0caffbb95..1c05870d42 100644 --- a/requirements/extra-pdf-image.in +++ b/requirements/extra-pdf-image.in @@ -12,5 +12,5 @@ google-cloud-vision effdet # Do not move to constraints.in, otherwise unstructured-inference will not be upgraded # when unstructured library is. -unstructured-inference>=1.0.5 +unstructured-inference>=1.1.1 unstructured.pytesseract>=0.3.12 diff --git a/requirements/extra-pdf-image.txt b/requirements/extra-pdf-image.txt index b718909ae4..8e4ab6e06f 100644 --- a/requirements/extra-pdf-image.txt +++ b/requirements/extra-pdf-image.txt @@ -4,7 +4,7 @@ # # pip-compile ./extra-pdf-image.in # -accelerate==1.10.1 +accelerate==1.11.0 # via unstructured-inference antlr4-python3-runtime==4.9.3 # via omegaconf @@ -27,13 +27,13 @@ coloredlogs==15.0.1 # via onnxruntime contourpy==1.3.2 # via matplotlib -cryptography==46.0.2 +cryptography==46.0.3 # via # -c ./base.txt # pdfminer-six cycler==0.12.1 # via matplotlib -deprecated==1.2.18 +deprecated==1.3.1 # via pikepdf effdet==0.4.1 # via -r ./extra-pdf-image.in @@ -46,31 +46,32 @@ flatbuffers==25.9.23 # via onnxruntime fonttools==4.60.1 # via matplotlib -fsspec==2025.9.0 +fsspec==2025.10.0 # via # huggingface-hub # torch -google-api-core[grpc]==2.26.0 +google-api-core[grpc]==2.28.1 # via google-cloud-vision -google-auth==2.41.1 +google-auth==2.42.1 # via # google-api-core # google-cloud-vision -google-cloud-vision==3.10.2 +google-cloud-vision==3.11.0 # via -r ./extra-pdf-image.in -googleapis-common-protos==1.70.0 +googleapis-common-protos==1.71.0 # via # google-api-core # grpcio-status -grpcio==1.75.1 +grpcio==1.76.0 # via # google-api-core + # google-cloud-vision # grpcio-status -grpcio-status==1.75.1 +grpcio-status==1.76.0 # via google-api-core -hf-xet==1.1.10 +hf-xet==1.2.0 # via huggingface-hub -huggingface-hub==0.35.3 +huggingface-hub==0.36.0 # via # accelerate # timm @@ -123,7 +124,7 @@ onnx==1.19.1 # via # -r ./extra-pdf-image.in # unstructured-inference -onnxruntime==1.23.1 +onnxruntime==1.23.2 # via # -r ./extra-pdf-image.in # unstructured-inference @@ -150,9 +151,9 @@ pdfminer-six==20250327 # unstructured-inference pi-heif==1.1.1 # via -r ./extra-pdf-image.in -pikepdf==9.11.0 +pikepdf==10.0.0 # via -r ./extra-pdf-image.in -pillow==11.3.0 +pillow==12.0.0 # via # matplotlib # pdf2image @@ -164,7 +165,7 @@ proto-plus==1.26.1 # via # google-api-core # google-cloud-vision -protobuf==6.32.1 +protobuf==6.33.0 # via # -c ./deps/constraints.txt # google-api-core @@ -174,7 +175,7 @@ protobuf==6.32.1 # onnx # onnxruntime # proto-plus -psutil==7.1.0 +psutil==7.1.3 # via # -c ./base.txt # accelerate @@ -192,11 +193,11 @@ pycparser==2.23 # cffi pyparsing==3.2.5 # via matplotlib -pypdf==6.1.1 +pypdf==6.1.3 # via # -c ./base.txt # -r ./extra-pdf-image.in -pypdfium2==4.30.0 +pypdfium2==5.0.0 # via unstructured-inference python-dateutil==2.9.0.post0 # via @@ -214,11 +215,11 @@ pyyaml==6.0.3 # omegaconf # timm # transformers -rapidfuzz==3.14.1 +rapidfuzz==3.14.3 # via # -c ./base.txt # unstructured-inference -regex==2025.9.18 +regex==2025.11.3 # via # -c ./base.txt # transformers @@ -245,7 +246,7 @@ sympy==1.14.0 # via # onnxruntime # torch -timm==1.0.20 +timm==1.0.22 # via # effdet # unstructured-inference @@ -282,7 +283,7 @@ typing-extensions==4.15.0 # torch tzdata==2025.2 # via pandas -unstructured-inference==1.0.5 +unstructured-inference==1.1.1 # via -r ./extra-pdf-image.in unstructured-pytesseract==0.3.15 # via -r ./extra-pdf-image.in @@ -291,7 +292,7 @@ urllib3==2.5.0 # -c ./base.txt # -c ./deps/constraints.txt # requests -wrapt==1.17.3 +wrapt==2.0.0 # via # -c ./base.txt # deprecated diff --git a/requirements/extra-pptx.txt b/requirements/extra-pptx.txt index 8e07963237..267d8229ac 100644 --- a/requirements/extra-pptx.txt +++ b/requirements/extra-pptx.txt @@ -6,7 +6,7 @@ # lxml==6.0.2 # via python-pptx -pillow==11.3.0 +pillow==12.0.0 # via python-pptx python-pptx==1.0.2 # via -r ./extra-pptx.in diff --git a/requirements/extra-xlsx.txt b/requirements/extra-xlsx.txt index 6235383d48..329f69770c 100644 --- a/requirements/extra-xlsx.txt +++ b/requirements/extra-xlsx.txt @@ -8,16 +8,16 @@ cffi==2.0.0 # via # -c ./base.txt # cryptography -cryptography==46.0.2 +cryptography==46.0.3 # via # -c ./base.txt # msoffcrypto-tool et-xmlfile==2.0.0 # via openpyxl msoffcrypto-tool==5.4.2 - # via -r ./extra-xlsx.in + # via -r extra-xlsx.in networkx==3.4.2 - # via -r ./extra-xlsx.in + # via -r extra-xlsx.in numpy==2.2.6 # via # -c ./base.txt @@ -27,9 +27,9 @@ olefile==0.47 # -c ./base.txt # msoffcrypto-tool openpyxl==3.1.5 - # via -r ./extra-xlsx.in + # via -r extra-xlsx.in pandas==2.3.3 - # via -r ./extra-xlsx.in + # via -r extra-xlsx.in pycparser==2.23 # via # -c ./base.txt @@ -51,4 +51,4 @@ typing-extensions==4.15.0 tzdata==2025.2 # via pandas xlrd==2.0.2 - # via -r ./extra-xlsx.in + # via -r extra-xlsx.in diff --git a/requirements/huggingface.txt b/requirements/huggingface.txt index 6f9263585a..ff5ab3f195 100644 --- a/requirements/huggingface.txt +++ b/requirements/huggingface.txt @@ -21,13 +21,13 @@ filelock==3.20.0 # huggingface-hub # torch # transformers -fsspec==2025.9.0 +fsspec==2025.10.0 # via # huggingface-hub # torch -hf-xet==1.1.10 +hf-xet==1.2.0 # via huggingface-hub -huggingface-hub==0.35.3 +huggingface-hub==0.36.0 # via # tokenizers # transformers @@ -64,7 +64,7 @@ pyyaml==6.0.3 # via # huggingface-hub # transformers -regex==2025.9.18 +regex==2025.11.3 # via # -c ./base.txt # sacremoses diff --git a/requirements/test.txt b/requirements/test.txt index 4f71caccaa..d28e3ae10f 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -32,9 +32,9 @@ flake8-print==5.0.0 # via -r ./test.in freezegun==1.5.5 # via -r ./test.in -grpcio==1.75.1 +grpcio==1.76.0 # via -r ./test.in -iniconfig==2.1.0 +iniconfig==2.3.0 # via pytest liccheck==0.9.2 # via -r ./test.in @@ -66,9 +66,9 @@ pycodestyle==2.14.0 # via # flake8 # flake8-print -pydantic==2.12.2 +pydantic==2.12.4 # via -r ./test.in -pydantic-core==2.41.4 +pydantic-core==2.41.5 # via pydantic pyflakes==3.4.0 # via @@ -91,9 +91,9 @@ python-dateutil==2.9.0.post0 # via # -c ./base.txt # freezegun -pytokens==0.2.0 +pytokens==0.3.0 # via black -ruff==0.14.0 +ruff==0.14.3 # via -r ./test.in semantic-version==2.10.0 # via liccheck From 2f59dc093e6ff35e3f31e38a4be4237e6822695c Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Wed, 5 Nov 2025 13:49:31 -0600 Subject: [PATCH 09/17] format --- .../partition/pdf_image/test_merge_elements.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/test_unstructured/partition/pdf_image/test_merge_elements.py b/test_unstructured/partition/pdf_image/test_merge_elements.py index 7905f29ede..0ada627434 100644 --- a/test_unstructured/partition/pdf_image/test_merge_elements.py +++ b/test_unstructured/partition/pdf_image/test_merge_elements.py @@ -14,15 +14,11 @@ def test_text_source_preserved_during_merge(): # Create two simple LayoutElements with different text_source values inferred_element = LayoutElement( - bbox=Rectangle(0, 0, 100, 50), - text=None, - is_extracted=IsExtracted.FALSE + bbox=Rectangle(0, 0, 100, 50), text=None, is_extracted=IsExtracted.FALSE ) extracted_element = LayoutElement( - bbox=Rectangle(0, 0, 100, 50), - text="Extracted text", - is_extracted=IsExtracted.TRUE + bbox=Rectangle(0, 0, 100, 50), text="Extracted text", is_extracted=IsExtracted.TRUE ) # Create LayoutElements arrays @@ -48,5 +44,5 @@ def test_text_source_preserved_during_merge(): # Check the merged page's elements_array merged_page = merged_layout.pages[0] assert "Extracted text" in merged_page.elements_array.texts - assert hasattr(merged_page.elements_array, 'is_extracted_array') + assert hasattr(merged_page.elements_array, "is_extracted_array") assert IsExtracted.TRUE in merged_page.elements_array.is_extracted_array From 9b96a95ddc8fc3474a442e9a63f35fed34088757 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Wed, 5 Nov 2025 13:54:18 -0600 Subject: [PATCH 10/17] Update changelog and version --- CHANGELOG.md | 9 +++++++++ unstructured/__version__.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64a7157619..90b7a706bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +## 0.18.17-dev0 + +### Enhancement +- Flag extracted elements as such in the metadata for downstream use + +### Features + +### Fixes + ## 0.18.16 ### Enhancement diff --git a/unstructured/__version__.py b/unstructured/__version__.py index c71f847e71..08c0547590 100644 --- a/unstructured/__version__.py +++ b/unstructured/__version__.py @@ -1 +1 @@ -__version__ = "0.18.16" # pragma: no cover +__version__ = "0.18.17-dev0" # pragma: no cover From bb5ff8b1ae446f0b1bd128dadfdc5807f2f83fcc Mon Sep 17 00:00:00 2001 From: ryannikolaidis <1208590+ryannikolaidis@users.noreply.github.com> Date: Wed, 5 Nov 2025 12:40:07 -0800 Subject: [PATCH 11/17] feat: track text source <- Ingest test fixtures update (#4113) This pull request includes updated ingest test fixtures. Please review and merge if appropriate. --- ...iomedical-Data-Scientists-2-pages.pdf.json | 26 +++ .../layout-parser-paper.pdf.json | 168 ++++++++++++++++++ 2 files changed, 194 insertions(+) diff --git a/test_unstructured_ingest/expected-structured-output/azure/Core-Skills-for-Biomedical-Data-Scientists-2-pages.pdf.json b/test_unstructured_ingest/expected-structured-output/azure/Core-Skills-for-Biomedical-Data-Scientists-2-pages.pdf.json index 24c362f451..859cdedfa1 100644 --- a/test_unstructured_ingest/expected-structured-output/azure/Core-Skills-for-Biomedical-Data-Scientists-2-pages.pdf.json +++ b/test_unstructured_ingest/expected-structured-output/azure/Core-Skills-for-Biomedical-Data-Scientists-2-pages.pdf.json @@ -4,6 +4,7 @@ "element_id": "1e41f20785644cdea2f017cfb67bb359", "text": "Core Skills for Biomedical Data Scientists", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -26,6 +27,7 @@ "element_id": "c915a2a57c901810a698491ca2393669", "text": "Maryam Zaringhalam, PhD, AAAS Science & Technology Policy Fellow", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -48,6 +50,7 @@ "element_id": "b24c3f8d268b2f834a00966d8faef975", "text": "Lisa Federer, MLIS, Data Science Training Coordinator", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -70,6 +73,7 @@ "element_id": "fcff333f886b39cee0a7084a9ff9204d", "text": "Michael F. Huerta, PhD, Associate Director of NLM for Program Development and NLM Coordinator of Data Science and Open Science Initiatives", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -92,6 +96,7 @@ "element_id": "1b86fad341db35208d75a543bcf819ae", "text": "Executive Summary", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -114,6 +119,7 @@ "element_id": "fee71d4f7ef7a5f253a44f6df648d12a", "text": "This report provides recommendations for a minimal set of core skills for biomedical data scientists based on analysis that draws on opinions of data scientists, curricula for existing biomedical data science programs, and requirements for biomedical data science jobs. Suggested high-level core skills include:", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -136,6 +142,7 @@ "element_id": "caa3c2eba90fedb7c8923ae8cd8de961", "text": "1. General biomedical subject matter knowledge: biomedical data scientists should have a general working knowledge of the principles of biology, bioinformatics, and basic clinical science;", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -158,6 +165,7 @@ "element_id": "a4622e6575ee04b0c4d74c0c6b3b2452", "text": "2. Programming language expertise: biomedical data scientists should be fluent in at least one programming language (typically R and/or Python);", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -180,6 +188,7 @@ "element_id": "206899164b194bb9c379531b35eae01b", "text": "3. Predictive analytics, modeling, and machine learning: while a range of statistical methods may be useful, predictive analytics, modeling, and machine learning emerged as especially important skills in biomedical data science;", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -202,6 +211,7 @@ "element_id": "36eb8f3c3778fbb71dc056571e71175d", "text": "4. Team science and scientific communication: “soft” skills, like the ability to work well on teams and communicate effectively in both verbal and written venues, may be as important as the more technical skills typically associated with data science.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -224,6 +234,7 @@ "element_id": "afe37b1ec10a6d08294ff0fb6df79996", "text": "5. Responsible data stewardship: a successful data scientist must be able to implement best practices for data management and stewardship, as well as conduct research in an ethical manner that maintains data security and privacy.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -246,6 +257,7 @@ "element_id": "b29f66200f2cc9ff2b49f3d07fd8022b", "text": "The report further details specific skills and expertise relevant to biomedical data scientists.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -268,6 +280,7 @@ "element_id": "bab05a183c34df666bfc920f04d17637", "text": "Motivation", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -290,6 +303,7 @@ "element_id": "f250e86931949c66fe99d742fd9be29c", "text": "Training a biomedical data science (BDS) workforce is a central theme in NLM’s Strategic Plan for the coming decade. That commitment is echoed in the NIH-wide Big Data to Knowledge (BD2K) initiative, which invested $61 million between FY2014 and FY2017 in training programs for the development and use of biomedical big data science methods and tools. In line with", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -312,6 +326,7 @@ "element_id": "9aa82368657b60536f152fd413aec316", "text": "Core Skills for Biomedical Data Scientists", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -334,6 +349,7 @@ "element_id": "4f2dbe3656a9ebc60c7e3426ad3cb3e3", "text": "_____________________________________________________________________________________________", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -356,6 +372,7 @@ "element_id": "cd359ae8c49885ead47318021438eead", "text": "this commitment, a recent report to the NLM Director recommended working across NIH to identify and develop core skills required of a biomedical data scientist to consistency across the cohort of NIH-trained data scientists. This report provides a set of recommended core skills based on analysis of current BD2K-funded training programs, biomedical data science job ads, and practicing members of the current data science workforce.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -378,6 +395,7 @@ "element_id": "bf8321a34edb7103ec4209f3e4a8a8da", "text": "Methodology", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -400,6 +418,7 @@ "element_id": "1e1d3d1a5c1397fc588393568d829bc8", "text": "The Workforce Excellence team took a three-pronged approach to identifying core skills required of a biomedical data scientist (BDS), drawing from:", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -422,6 +441,7 @@ "element_id": "45d7ff56632d66a2ab2d4dd2716d4d2e", "text": "a) Responses to a 2017 Kaggle1 survey2 of over 16,000 self-identified data scientists working across many industries. Analysis of the Kaggle survey responses from the current data science workforce provided insights into the current generation of data scientists, including how they were trained and what programming and analysis skills they use.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -444,6 +464,7 @@ "element_id": "bf452aac5123fcedda30dd6ed179f41c", "text": "b) Data science skills taught in BD2K-funded training programs. A qualitative content analysis was applied to the descriptions of required courses offered under the 12 BD2K-funded training programs. Each course was coded using qualitative data analysis software, with each skill that was present in the description counted once. The coding schema of data science-related skills was inductively developed and was organized into four major categories: (1) statistics and math skills; (2) computer science; (3) subject knowledge; (4) general skills, like communication and teamwork. The coding schema is detailed in Appendix A.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -466,6 +487,7 @@ "element_id": "ca176cbef532792b1f11830ff7520587", "text": "c) Desired skills identified from data science-related job ads. 59 job ads from government (8.5%), academia (42.4%), industry (33.9%), and the nonprofit sector (15.3%) were sampled from websites like Glassdoor, Linkedin, and Ziprecruiter. The content analysis methodology and coding schema utilized in analyzing the training programs were applied to the job descriptions. Because many job ads mentioned the same skill more than once, each occurrence of the skill was coded, therefore weighting important skills that were mentioned multiple times in a single ad.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -488,6 +510,7 @@ "element_id": "11b170fedd889c3b895bbd28acd811ca", "text": "Analysis of the above data provided insights into the current state of biomedical data science training, as well as a view into data science-related skills likely to be needed to prepare the BDS workforce to succeed in the future. Together, these analyses informed recommendations for core skills necessary for a competitive biomedical data scientist.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -510,6 +533,7 @@ "element_id": "2665aadf75bca259f1f5b4c91a53a301", "text": "1 Kaggle is an online community for data scientists, serving as a platform for collaboration, competition, and learning: http://kaggle.com", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -532,6 +556,7 @@ "element_id": "8bbfe1c3e6bca9a33226d20d69b2297a", "text": "2 In August 2017, Kaggle conducted an industry-wide survey to gain a clearer picture of the state of data science and machine learning. A standard set of questions were asked of all respondents, with more specific questions related to work for employed data scientists and questions related to learning for data scientists in training. Methodology and results: https://www.kaggle.com/kaggle/kaggle-survey-2017", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -554,6 +579,7 @@ "element_id": "dd4a661e1a3c898a5cf6328ba56b924d", "text": "2", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" diff --git a/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json b/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json index 9fcd28ff45..8f147a6b38 100644 --- a/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json +++ b/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json @@ -4,6 +4,7 @@ "element_id": "04fa31034847cbbf6c840f4da683ccf8", "text": "1", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -48,6 +49,7 @@ "element_id": "fc05a198b2ff732119edea8986775994", "text": "2", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -70,6 +72,7 @@ "element_id": "4a90480c2297c31b4d7ad43b0801ae98", "text": "0", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -92,6 +95,7 @@ "element_id": "e3a383b7e9439f39773c13ea769297b7", "text": "2 n u J 1 2 ] V C . s c [ 2 v 8 4 3 5 1 . 3 0 1 2 :", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -114,6 +118,7 @@ "element_id": "4608f9aa33a0cab158565817b0d15743", "text": "v", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -158,6 +163,7 @@ "element_id": "ed4e590932b333f40d0e1367b6b0e32e", "text": "i", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -180,6 +186,7 @@ "element_id": "8cb024fb60457b7c572b167801037f75", "text": "X", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -202,6 +209,7 @@ "element_id": "c202bdacd2daf4c52fa3a6ddd64a0728", "text": "r", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -224,6 +232,7 @@ "element_id": "3db474893ec321c81ef9d1a2afd5f660", "text": "a", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -246,6 +255,7 @@ "element_id": "d3be9e3d661e2a79f37257caa5b54d8c", "text": "LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -268,6 +278,7 @@ "element_id": "97c951d2dd3a1b5452d0c55e62e8ea78", "text": "Zejiang Shen! (4), Ruochen Zhang”, Melissa Dell?, Benjamin Charles Germain Lee*, Jacob Carlson’, and Weining Li®", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -290,6 +301,7 @@ "element_id": "23b8def20ce16f929d4f558b2a19f200", "text": "1 Allen Institute for AI shannons@allenai.org 2 Brown University ruochen zhang@brown.edu 3 Harvard University {melissadell,jacob carlson}@fas.harvard.edu 4 University of Washington bcgl@cs.washington.edu 5 University of Waterloo w422li@uwaterloo.ca", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -312,6 +324,7 @@ "element_id": "f1169388c7749db52e388e2fe4feaec6", "text": "Abstract. Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of im- portant innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applica- tions. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout de- tection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digiti- zation pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-word use cases. The library is publicly available at https://layout-parser.github.io.", "metadata": { + "is_extracted": "true", "links": [ { "text": "https :// layout - parser . github . io", @@ -341,6 +354,7 @@ "element_id": "caffc7480fdd82a089ae387e01aabdb9", "text": "Keywords: Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -363,6 +377,7 @@ "element_id": "3a170066f972d25cc303a05ddc16d52c", "text": "1 Introduction", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -385,6 +400,7 @@ "element_id": "8de96d1e80af35f9b6954252e14c2caf", "text": "Deep Learning(DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11,", "metadata": { + "is_extracted": "true", "links": [ { "text": "11", @@ -414,6 +430,7 @@ "element_id": "e7e0acf197e89d650d39fa3ce929509e", "text": "2 Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -436,6 +453,7 @@ "element_id": "4b097cc42d7d30e720512dbce0cb4905", "text": "37], layout detection [38, 22], table detection [26], and scene text detection [4]. A generalized learning-based framework dramatically reduces the need for the manual specification of complicated rules, which is the status quo with traditional methods. DL has the potential to transform DIA pipelines and benefit a broad spectrum of large-scale document digitization projects.", "metadata": { + "is_extracted": "true", "links": [ { "text": "37", @@ -485,6 +503,7 @@ "element_id": "45844a4901777afaf6de9a0994e017eb", "text": "However, there are several practical difficulties for taking advantages of re- cent advances in DL-based methods: 1) DL models are notoriously convoluted for reuse and extension. Existing models are developed using distinct frame- works like TensorFlow [1] or PyTorch [24], and the high-level parameters can be obfuscated by implementation details [8]. It can be a time-consuming and frustrating experience to debug, reproduce, and adapt existing models for DIA, and many researchers who would benefit the most from using these methods lack the technical background to implement them from scratch. 2) Document images contain diverse and disparate patterns across domains, and customized training is often required to achieve a desirable detection accuracy. Currently there is no full-fledged infrastructure for easily curating the target document image datasets and fine-tuning or re-training the models. 3) DIA usually requires a sequence of models and other processing to obtain the final outputs. Often research teams use DL models and then perform further document analyses in separate processes, and these pipelines are not documented in any central location (and often not documented at all). This makes it difficult for research teams to learn about how full pipelines are implemented and leads them to invest significant resources in reinventing the DIA wheel.", "metadata": { + "is_extracted": "true", "links": [ { "text": "1", @@ -524,6 +543,7 @@ "element_id": "6f3c8d55dd5a4f95d8a59d146ca9ffa7", "text": "LayoutParser provides a unified toolkit to support DL-based document image analysis and processing. To address the aforementioned challenges, LayoutParser is built with the following components:", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -546,6 +566,7 @@ "element_id": "9ce12a49c1a9972b4cd2c3f66595b2b6", "text": "1. An off-the-shelf toolkit for applying DL models for layout detection, character recognition, and other DIA tasks (Section 3)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -568,6 +589,7 @@ "element_id": "40f42a96bdd1559e09d74090c0fe9df3", "text": "2. A rich repository of pre-trained neural network models (Model Zoo) that underlies the off-the-shelf usage", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -590,6 +612,7 @@ "element_id": "0ca448d3ae0c4ee73bf46e8edfcd417d", "text": "3. Comprehensive tools for efficient document image data annotation and model tuning to support different levels of customization", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -612,6 +635,7 @@ "element_id": "7a9de9b00d51bd670feccc2eb84a147e", "text": "4. A DL model hub and community platform for the easy sharing, distribu- tion, and discussion of DIA models and pipelines, to promote reusability, reproducibility, and extensibility (Section 4)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -634,6 +658,7 @@ "element_id": "8e216e91ff3471241858f1df445cdf0a", "text": "The library implements simple and intuitive Python APIs without sacrificing generalizability and versatility, and can be easily installed via pip. Its convenient functions for handling document image data can be seamlessly integrated with existing DIA pipelines. With detailed documentations and carefully curated tutorials, we hope this tool will benefit a variety of end-users, and will lead to advances in applications in both industry and academic research.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -656,6 +681,7 @@ "element_id": "583775f22c8080098beebbef960e2fbf", "text": "LayoutParser is well aligned with recent efforts for improving DL model reusability in other disciplines like natural language processing [8, 34] and com- puter vision [35], but with a focus on unique challenges in DIA. We show LayoutParser can be applied in sophisticated and large-scale digitization projects", "metadata": { + "is_extracted": "true", "links": [ { "text": "8", @@ -695,6 +721,7 @@ "element_id": "f5a6697190c20bf6030d8e4ae8f6861a", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -717,6 +744,7 @@ "element_id": "50846086f4d9ece02052735686278699", "text": "that require precision, efficiency, and robustness, as well as simple and light- weight document processing tasks focusing on efficacy and flexibility (Section 5). LayoutParser is being actively maintained, and support for more deep learning models and novel methods in text-based layout analysis methods [37, 34] is planned.", "metadata": { + "is_extracted": "true", "links": [ { "text": "5", @@ -756,6 +784,7 @@ "element_id": "0ce686208eb4aba70d0cd053d50c7bc2", "text": "The rest of the paper is organized as follows. Section 2 provides an overview of related work. The core LayoutParser library, DL Model Zoo, and customized model training are described in Section 3, and the DL model hub and commu- nity platform are detailed in Section 4. Section 5 shows two examples of how LayoutParser can be used in practical DIA projects, and Section 6 concludes.", "metadata": { + "is_extracted": "true", "links": [ { "text": "2", @@ -805,6 +834,7 @@ "element_id": "1548efaaa18cf819f9498d76a0440316", "text": "2 Related Work", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -827,6 +857,7 @@ "element_id": "8153390c1bb8652313be64034531449e", "text": "Recently, various DL models and datasets have been developed for layout analysis tasks. The dhSegment [22] utilizes fully convolutional networks [20] for segmen- tation tasks on historical documents. Object detection-based methods like Faster R-CNN [28] and Mask R-CNN [12] are used for identifying document elements [38] and detecting tables [30, 26]. Most recently, Graph Neural Networks [29] have also been used in table detection [27]. However, these models are usually implemented individually and there is no unified framework to load and use such models.", "metadata": { + "is_extracted": "true", "links": [ { "text": "22", @@ -896,6 +927,7 @@ "element_id": "45d6d494603e84706884918c1f785c9f", "text": "There has been a surge of interest in creating open-source tools for document image processing: a search of document image analysis in Github leads to 5M relevant code pieces 6; yet most of them rely on traditional rule-based methods or provide limited functionalities. The closest prior research to our work is the OCR-D project7, which also tries to build a complete toolkit for DIA. However, similar to the platform developed by Neudecker et al. [21], it is designed for analyzing historical documents, and provides no supports for recent DL models. The DocumentLayoutAnalysis project8 focuses on processing born-digital PDF documents via analyzing the stored PDF data. Repositories like DeepLayout9 and Detectron2-PubLayNet10 are individual deep learning models trained on layout analysis datasets without support for the full DIA pipeline. The Document Analysis and Exploitation (DAE) platform [15] and the DeepDIVA project [2] aim to improve the reproducibility of DIA methods (or DL models), yet they are not actively maintained. OCR engines like Tesseract [14], easyOCR11 and paddleOCR12 usually do not come with comprehensive functionalities for other DIA tasks like layout analysis.", "metadata": { + "is_extracted": "true", "links": [ { "text": "6 ;", @@ -975,6 +1007,7 @@ "element_id": "73feaff827cbc7089d3f95d1e5aac6aa", "text": "Recent years have also seen numerous efforts to create libraries for promoting reproducibility and reusability in the field of DL. Libraries like Dectectron2 [35],", "metadata": { + "is_extracted": "true", "links": [ { "text": "35", @@ -1004,6 +1037,7 @@ "element_id": "b1fa4bbd1bdda08489faab5bf3adf5cc", "text": "6 The number shown is obtained by specifying the search type as ‘code’.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1026,6 +1060,7 @@ "element_id": "db639db124b6064248de0c0dc71510a4", "text": "7 https://ocr-d.de/en/about", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1048,6 +1083,7 @@ "element_id": "d881ce84f017d89f6e35e2bc4b133bfc", "text": "8 https://github.com/BobLd/DocumentLayoutAnalysis", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1070,6 +1106,7 @@ "element_id": "9b96c128deddda1a32c739a2df157496", "text": "9 https://github.com/leonlulu/DeepLayout", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1092,6 +1129,7 @@ "element_id": "5cf72e821375f4480a1529bef97608ef", "text": "10 https://github.com/hpanwar08/detectron2", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1114,6 +1152,7 @@ "element_id": "4ab94e79eedc3a7ac498aaf737ca8878", "text": "11 https://github.com/JaidedAI/EasyOCR", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1136,6 +1175,7 @@ "element_id": "460b163c13ad7cad4fce325820a76481", "text": "12 https://github.com/PaddlePaddle/PaddleOCR", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1158,6 +1198,7 @@ "element_id": "fe238f610fe610b8ce1abaa08a0e3e63", "text": "4", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1180,6 +1221,7 @@ "element_id": "92c4289ad4af7c0793e40d5662707e0a", "text": "Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1202,6 +1244,7 @@ "element_id": "285d83f3098b26302329b33637fd265f", "text": "Efficient Data Annotation Model Customization Document Images Community Platform ‘A >) ¥ DIA Model Hub a Customized Model Training] == | Layout Detection Models | ——= DIA Pipeline Sharing ~ OCR Module = { Layout Data stuctue ) = (store Visualization LY", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1224,6 +1267,7 @@ "element_id": "466f0bc21599ccf0fa27c021cb023f90", "text": "Fig.1: The overall architecture of LayoutParser. For an input document image, the core LayoutParser library provides a set of off-the-shelf tools for layout detection, OCR, visualization, and storage, backed by a carefully designed layout data structure. LayoutParser also supports high level customization via efficient layout annotation and model training functions. These improve model accuracy on the target samples. The community platform enables the easy sharing of DIA models and whole digitization pipelines to promote reusability and reproducibility. A collection of detailed documentation, tutorials and exemplar projects make LayoutParser easy to learn and use.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1246,6 +1290,7 @@ "element_id": "b4948db85ca791e99aa92589fc41734f", "text": "AllenNLP [8] and transformers [34] have provided the community with complete DL-based support for developing and deploying models for general computer vision and natural language processing problems. LayoutParser, on the other hand, specializes specifically in DIA tasks. LayoutParser is also equipped with a community platform inspired by established model hubs such as Torch Hub [23] and TensorFlow Hub [1]. It enables the sharing of pretrained models as well as full document processing pipelines that are unique to DIA tasks.", "metadata": { + "is_extracted": "true", "links": [ { "text": "8", @@ -1290,6 +1335,7 @@ "element_id": "7651db80014a85ab253367d3bd3e4f88", "text": "There have been a variety of document data collections to facilitate the development of DL models. Some examples include PRImA [3](magazine layouts), PubLayNet [38](academic paper layouts), Table Bank [18](tables in academic papers), Newspaper Navigator Dataset [16, 17](newspaper figure layouts) and HJDataset [31](historical Japanese document layouts). A spectrum of models trained on these datasets are currently available in the LayoutParser model zoo to support different use cases.", "metadata": { + "is_extracted": "true", "links": [ { "text": "3", @@ -1344,6 +1390,7 @@ "element_id": "5a1838a8f40b4523094652cf14ab974c", "text": "3 The Core LayoutParser Library", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1366,6 +1413,7 @@ "element_id": "47e45d28d96fc14ddc709835de35ece5", "text": "At the core of LayoutParser is an off-the-shelf toolkit that streamlines DL- based document image analysis. Five components support a simple interface with comprehensive functionalities: 1) The layout detection models enable using pre-trained or self-trained DL models for layout detection with just four lines of code. 2) The detected layout information is stored in carefully engineered", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1388,6 +1436,7 @@ "element_id": "cd1112d2b15a0d27a29b1c83b2afd0dd", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1410,6 +1459,7 @@ "element_id": "0b9956dc7ccd1d758263217beda63196", "text": "Table 1: Current layout detection models in the LayoutParser model zoo", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1432,6 +1482,7 @@ "element_id": "cb534ba64da736dc53d60b660f5e1153", "text": "Dataset Base Model1 Large Model Notes PubLayNet [38] F / M M Layouts of modern scientific documents PRImA [3] M - Layouts of scanned modern magazines and scientific reports Newspaper [17] F - Layouts of scanned US newspapers from the 20th century TableBank [18] F F Table region on modern scientific and business document HJDataset [31] F / M - Layouts of history Japanese documents", "metadata": { + "is_extracted": "true", "links": [ { "text": "[ 38 ]", @@ -1482,6 +1533,7 @@ "element_id": "f978160527177fa39c13774ec8dfa9cb", "text": "1 For each dataset, we train several models of different sizes for different needs (the trade-off between accuracy vs. computational cost). For “base model” and “large model”, we refer to using the ResNet 50 or ResNet 101 backbones [13], respectively. One can train models of different architectures, like Faster R-CNN [28] (F) and Mask R-CNN [12] (M). For example, an F in the Large Model column indicates it has a Faster R-CNN model trained using the ResNet 101 backbone. The platform is maintained and a number of additions will be made to the model zoo in coming months.", "metadata": { + "is_extracted": "true", "links": [ { "text": "[ 13", @@ -1521,6 +1573,7 @@ "element_id": "55b33df7609960c3552a0b7bc1a5a9c6", "text": "layout data structures, which are optimized for efficiency and versatility. 3) When necessary, users can employ existing or customized OCR models via the unified API provided in the OCR module. 4) LayoutParser comes with a set of utility functions for the visualization and storage of the layout data. 5) LayoutParser is also highly customizable, via its integration with functions for layout data annotation and model training. We now provide detailed descriptions for each component.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1543,6 +1596,7 @@ "element_id": "6e9df774416cc71548308e324b4bdbb7", "text": "3.1 Layout Detection Models", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1565,6 +1619,7 @@ "element_id": "bbcc10c2b92de0cbdce8629f18b0d7ad", "text": "In LayoutParser, a layout model takes a document image as an input and generates a list of rectangular boxes for the target content regions. Different from traditional methods, it relies on deep convolutional neural networks rather than manually curated rules to identify content regions. It is formulated as an object detection problem and state-of-the-art models like Faster R-CNN [28] and Mask R-CNN [12] are used. This yields prediction results of high accuracy and makes it possible to build a concise, generalized interface for layout detection. LayoutParser, built upon Detectron2 [35], provides a minimal API that can perform layout detection with only four lines of code in Python:", "metadata": { + "is_extracted": "true", "links": [ { "text": "28", @@ -1604,6 +1659,7 @@ "element_id": "508a6705bb0bfb693616cc14fec5e1b9", "text": "1 import layoutparser as lp", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1626,6 +1682,7 @@ "element_id": "7d55b80ca5a0c2888ff44b931430b0d8", "text": "2 image = cv2.imread(\"image_file\") # load images 3 model = lp.Detectron2LayoutModel( 4 \"lp://PubLayNet/faster_rcnn_R_50_FPN_3x/config\")", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1648,6 +1705,7 @@ "element_id": "f30541418a7dca51e3e4cd880486ab9c", "text": "3 model = lp.Detectron2LayoutModel(", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1670,6 +1728,7 @@ "element_id": "ecaf88c55d275f8fdc8c25e2d919077f", "text": "5 layout = model.detect(image)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1692,6 +1751,7 @@ "element_id": "f888c5e8f5b1339f2af75612ea13c719", "text": "LayoutParser provides a wealth of pre-trained model weights using various datasets covering different languages, time periods, and document types. Due to domain shift [7], the prediction performance can notably drop when models are ap- plied to target samples that are significantly different from the training dataset. As document structures and layouts vary greatly in different domains, it is important to select models trained on a dataset similar to the test samples. A semantic syntax is used for initializing the model weights in LayoutParser, using both the dataset name and model name lp:///.", "metadata": { + "is_extracted": "true", "links": [ { "text": "7", @@ -1721,6 +1781,7 @@ "element_id": "676118b62c2261113a23a610c2ac50cb", "text": "6", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1743,6 +1804,7 @@ "element_id": "710ac103981c6363195774b02ee582d4", "text": "Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1765,6 +1827,7 @@ "element_id": "fd2288e4e3cf90f109d1c1198cea4ca0", "text": "3 a a 4 a 3 Rectangle vada 4 8 4 iS v 2 [S) af : fa & o a 6 ‘ g 4 Coordinate g 2 8 3 + 4 * v 8 Extra features =| 9%) | Hock) Reading é ret | | Type | | order 2 & a ¢ @ [ coordinatel textblock1 , 8 » , ee 3 , textblock2 , layoutl ] 8 q ® A list of the layout elements ‘sf", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1787,6 +1850,7 @@ "element_id": "9f11aa6b22dea1bba7eb0d122c0c5562", "text": "Fig.2: The relationship between the three types of layout data structures. Coordinate supports three kinds of variation; TextBlock consists of the co- ordinate information and extra features like block text, types, and reading orders; a Layout object is a list of all possible layout elements, including other Layout objects. They all support the same set of transformation and operation APIs for maximum flexibility.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1809,6 +1873,7 @@ "element_id": "d997f63fd79c7e03050ca01b58dfdf0a", "text": "Shown in Table 1, LayoutParser currently hosts 9 pre-trained models trained on 5 different datasets. Description of the training dataset is provided alongside with the trained models such that users can quickly identify the most suitable models for their tasks. Additionally, when such a model is not readily available, LayoutParser also supports training customized layout models and community sharing of the models (detailed in Section 3.5).", "metadata": { + "is_extracted": "true", "links": [ { "text": "1", @@ -1838,6 +1903,7 @@ "element_id": "836e9227ef393d8b00369e6300fbba4c", "text": "3.2 Layout Data Structures", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1860,6 +1926,7 @@ "element_id": "601f7d95172984c75de081023ca64c15", "text": "A critical feature of LayoutParser is the implementation of a series of data structures and operations that can be used to efficiently process and manipulate the layout elements. In document image analysis pipelines, various post-processing on the layout analysis model outputs is usually required to obtain the final outputs. Traditionally, this requires exporting DL model outputs and then loading the results into other pipelines. All model outputs from LayoutParser will be stored in carefully engineered data types optimized for further processing, which makes it possible to build an end-to-end document digitization pipeline within LayoutParser. There are three key components in the data structure, namely the Coordinate system, the TextBlock, and the Layout. They provide different levels of abstraction for the layout data, and a set of APIs are supported for transformations or operations on these classes.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1882,6 +1949,7 @@ "element_id": "48d58ed9a3d95637df68c8b810147ba1", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1904,6 +1972,7 @@ "element_id": "dcdc0dc4759bd20c04026973cbe386e2", "text": "Coordinates are the cornerstones for storing layout information. Currently, three types of Coordinate data structures are provided in LayoutParser, shown in Figure 2. Interval and Rectangle are the most common data types and support specifying 1D or 2D regions within a document. They are parameterized with 2 and 4 parameters. A Quadrilateral class is also implemented to support a more generalized representation of rectangular regions when the document is skewed or distorted, where the 4 corner points can be specified and a total of 8 degrees of freedom are supported. A wide collection of transformations like shift, pad, and scale, and operations like intersect, union, and is_in, are supported for these classes. Notably, it is common to separate a segment of the image and analyze it individually. LayoutParser provides full support for this scenario via image cropping operations crop_image and coordinate transformations like relative_to and condition_on that transform coordinates to and from their relative representations. We refer readers to Table 2 for a more detailed description of these operations13.", "metadata": { + "is_extracted": "true", "links": [ { "text": "2", @@ -1943,6 +2012,7 @@ "element_id": "3f620e1ad95cd446170613ed9d780853", "text": "Based on Coordinates, we implement the TextBlock class that stores both the positional and extra features of individual layout elements. It also supports specifying the reading orders via setting the parent field to the index of the parent object. A Layout class is built that takes in a list of TextBlocks and supports processing the elements in batch. Layout can also be nested to support hierarchical layout structures. They support the same operations and transformations as the Coordinate classes, minimizing both learning and deployment effort.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1965,6 +2035,7 @@ "element_id": "2b81bd7a3f21b84379bfcd4bb175c5d1", "text": "3.3 OCR", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1987,6 +2058,7 @@ "element_id": "16565416942e53cf65f75a8a845df211", "text": "LayoutParser provides a unified interface for existing OCR tools. Though there are many OCR tools available, they are usually configured differently with distinct APIs or protocols for using them. It can be inefficient to add new OCR tools into an existing pipeline, and difficult to make direct comparisons among the available tools to find the best option for a particular project. To this end, LayoutParser builds a series of wrappers among existing OCR engines, and provides nearly the same syntax for using them. It supports a plug-and-play style of using OCR engines, making it effortless to switch, evaluate, and compare different OCR modules:", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2009,6 +2081,7 @@ "element_id": "2e605dfb574532cf2ab54ded080a2ab9", "text": "1 ocr_agent = lp.TesseractAgent() 2 # Can be easily switched to other OCR software 3 tokens = ocr_agent.detect(image)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2031,6 +2104,7 @@ "element_id": "5bc3c9470dc53c60c1fd04828105afdd", "text": "The OCR outputs will also be stored in the aforementioned layout data structures and can be seamlessly incorporated into the digitization pipeline. Currently LayoutParser supports the Tesseract and Google Cloud Vision OCR engines.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2053,6 +2127,7 @@ "element_id": "fa023ccf2ac1042ef254ecf47cc592ca", "text": "LayoutParser also comes with a DL-based CNN-RNN OCR model [6] trained with the Connectionist Temporal Classification (CTC) loss [10]. It can be used like the other OCR modules, and can be easily trained on customized datasets.", "metadata": { + "is_extracted": "true", "links": [ { "text": "6", @@ -2087,6 +2162,7 @@ "element_id": "a2a0a2ef0279f0710f3cd34474ca8645", "text": "13 This is also available in the LayoutParser documentation pages.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2109,6 +2185,7 @@ "element_id": "5498a550b5367fa8dc935013956d09fa", "text": "8 Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2131,6 +2208,7 @@ "element_id": "a5ce184b53898a543bca90a5b0acd156", "text": "Table 2: All operations supported by the layout elements. The same APIs are supported across different layout element classes including Coordinate types, TextBlock and Layout.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2153,6 +2231,7 @@ "element_id": "64bc79d1132a89c71837f420d6e4e2dc", "text": "Operation Name Description block.pad(top, bottom, right, left) Enlarge the current block according to the input block.scale(fx, fy) Scale the current block given the ratio in x and y direction block.shift(dx, dy) Move the current block with the shift distances in x and y direction block1.is in(block2) Whether block1 is inside of block2 block1.intersect(block2) Return the intersection region of block1 and block2. Coordinate type to be determined based on the inputs. block1.union(block2) Return the union region of block1 and block2. Coordinate type to be determined based on the inputs. block1.relative to(block2) Convert the absolute coordinates of block1 to relative coordinates to block2 block1.condition on(block2) Calculate the absolute coordinates of block1 given the canvas block2’s absolute coordinates block.crop image(image) Obtain the image segments in the block region", "metadata": { + "is_extracted": "true", "text_as_html": "
block.pad(top, bottom,right,left)Enlarge the current block according to the input
block.scale(fx, fy)Scale the current block given the ratio in x and y direction
block.shift(dx, dy)Move the current block with the shift distances in x and y direction
block1.is_in(block2)Whether block] is inside of block2
block1. intersect (block2)Return the intersection region of blockl and block2. Coordinate type to be determined based on the inputs
block1.union(block2)Return the union region of blockl and block2. Coordinate type to be determined based on the inputs
block1.relative_to(block2)Convert the absolute coordinates of blockl to relative coordinates to block2
block1.condition_on(block2)Calculate the absolute coordinates of blockl given the canvas block2’s absolute coordinates
block. crop_image (image)Obtain the image segments in the block region
", "filetype": "application/pdf", "languages": [ @@ -2176,6 +2255,7 @@ "element_id": "3d803300353e1055611e38b1b2eb19a7", "text": "3.4 Storage and visualization", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2198,6 +2278,7 @@ "element_id": "afa5f1dc8b4ce5598f278992d818eaa9", "text": "The end goal of DIA is to transform the image-based document data into a structured database. LayoutParser supports exporting layout data into different formats like JSON, csv, and will add the support for the METS/ALTO XML format 14 . It can also load datasets from layout analysis-specific formats like COCO [38] and the Page Format [25] for training layout models (Section 3.5).", "metadata": { + "is_extracted": "true", "links": [ { "text": "14", @@ -2242,6 +2323,7 @@ "element_id": "28aeb996f497c9d01d06e564483d0854", "text": "Visualization of the layout detection results is critical for both presentation and debugging. LayoutParser is built with an integrated API for displaying the layout information along with the original document image. Shown in Figure 3, it enables presenting layout data with rich meta information and features in different modes. More detailed information can be found in the online LayoutParser documentation page.", "metadata": { + "is_extracted": "true", "links": [ { "text": "3", @@ -2271,6 +2353,7 @@ "element_id": "9e8599877fa8025a800477652dcd29be", "text": "3.5 Customized Model Training", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2293,6 +2376,7 @@ "element_id": "05e5f4e2a196db34263541d1ecebe297", "text": "Besides the off-the-shelf library, LayoutParser is also highly customizable with supports for highly unique and challenging document analysis tasks. Target document images can be vastly different from the existing datasets for train- ing layout models, which leads to low layout detection accuracy. Training data", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2315,6 +2399,7 @@ "element_id": "894921dce9d1291116c38d561c2fff59", "text": "14 https://altoxml.github.io", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2337,6 +2422,7 @@ "element_id": "c069937e6c2bfc0f856835f3af4d6181", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2359,6 +2445,7 @@ "element_id": "02a078081972f7bdb26f06a787773a30", "text": "0g Burpunog uayor Aeydsiq:1 vondo 10g Guypunog usyou ap:z uondo Mode I: Showing Layout on the Original Image Mode Il: Drawing OCR’ Text at the Correspoding Position", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2381,6 +2468,7 @@ "element_id": "4d1b9566e792683b9559b778be4f4046", "text": "Fig.3: Layout detection and OCR results visualization generated by the LayoutParser APIs. Mode I directly overlays the layout region bounding boxes and categories over the original image. Mode II recreates the original document via drawing the OCR’d texts at their corresponding positions on the image canvas. In this figure, tokens in textual regions are filtered using the API and then displayed.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2403,6 +2491,7 @@ "element_id": "625c9e1d41a9740f094041595f79953d", "text": "can also be highly sensitive and not sharable publicly. To overcome these chal- lenges, LayoutParser is built with rich features for efficient data annotation and customized model training.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2425,6 +2514,7 @@ "element_id": "a3498730b5cd3fe9405fad69bcf37882", "text": "LayoutParser incorporates a toolkit optimized for annotating document lay- outs using object-level active learning [32]. With the help from a layout detection model trained along with labeling, only the most important layout objects within each image, rather than the whole image, are required for labeling. The rest of the regions are automatically annotated with high confidence predictions from the layout detection model. This allows a layout dataset to be created more efficiently with only around 60% of the labeling budget.", "metadata": { + "is_extracted": "true", "links": [ { "text": "32", @@ -2454,6 +2544,7 @@ "element_id": "c4ccf2cf2e7495668221cbe51534f90b", "text": "After the training dataset is curated, LayoutParser supports different modes for training the layout models. Fine-tuning can be used for training models on a small newly-labeled dataset by initializing the model with existing pre-trained weights. Training from scratch can be helpful when the source dataset and target are significantly different and a large training set is available. However, as suggested in Studer et al.’s work[33], loading pre-trained weights on large-scale datasets like ImageNet [5], even from totally different domains, can still boost model performance. Through the integrated API provided by LayoutParser, users can easily compare model performances on the benchmark datasets.", "metadata": { + "is_extracted": "true", "links": [ { "text": "33", @@ -2488,6 +2579,7 @@ "element_id": "59c95b02b488f297417af4125e4ac316", "text": "10 Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2510,6 +2602,7 @@ "element_id": "747f46c43a88768fd543e10bac84203b", "text": "Intra-column reading order Token Categories tie (Adress tee Ewumber Variable Column reading order HEE company ype Column Categories (J tite Adress 1] ree [7] Section Header Maximum Allowed Height (b) Illustration of the recreated document with dense text structure for better OCR performance", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2532,6 +2625,7 @@ "element_id": "9667b0e42f9d28607c7c13bffb760906", "text": "Fig.4: Illustration of (a) the original historical Japanese document with layout detection results and (b) a recreated version of the document image that achieves much better character recognition recall. The reorganization algorithm rearranges the tokens based on the their detected bounding boxes given a maximum allowed height.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2554,6 +2648,7 @@ "element_id": "88f6e589165656eceebf898d0240e05c", "text": "4 LayoutParser Community Platform", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2576,6 +2671,7 @@ "element_id": "e9a86eb57ba5483acfeefb0e931402b1", "text": "Another focus of LayoutParser is promoting the reusability of layout detection models and full digitization pipelines. Similar to many existing deep learning libraries, LayoutParser comes with a community model hub for distributing layout models. End-users can upload their self-trained models to the model hub, and these models can be loaded into a similar interface as the currently available LayoutParser pre-trained models. For example, the model trained on the News Navigator dataset [17] has been incorporated in the model hub.", "metadata": { + "is_extracted": "true", "links": [ { "text": "17", @@ -2605,6 +2701,7 @@ "element_id": "c08c76705396fe7a65be5dff6d3bffd5", "text": "Beyond DL models, LayoutParser also promotes the sharing of entire doc- ument digitization pipelines. For example, sometimes the pipeline requires the combination of multiple DL models to achieve better accuracy. Currently, pipelines are mainly described in academic papers and implementations are often not pub- licly available. To this end, the LayoutParser community platform also enables the sharing of layout pipelines to promote the discussion and reuse of techniques. For each shared pipeline, it has a dedicated project page, with links to the source code, documentation, and an outline of the approaches. A discussion panel is provided for exchanging ideas. Combined with the core LayoutParser library, users can easily build reusable components based on the shared pipelines and apply them to solve their unique problems.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2627,6 +2724,7 @@ "element_id": "53da8301ac140e0b72cdcf6a7f405918", "text": "5 Use Cases", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2649,6 +2747,7 @@ "element_id": "1fd6bf73b6c80f8ed034bf977fba5a67", "text": "The core objective of LayoutParser is to make it easier to create both large-scale and light-weight document digitization pipelines. Large-scale document processing", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2671,6 +2770,7 @@ "element_id": "ab543398222da25b3a9231929162d3a0", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2693,6 +2793,7 @@ "element_id": "4b9eddb71426681f2828832312457b67", "text": "focuses on precision, efficiency, and robustness. The target documents may have complicated structures, and may require training multiple layout detection models to achieve the optimal accuracy. Light-weight pipelines are built for relatively simple documents, with an emphasis on development ease, speed and flexibility. Ideally one only needs to use existing resources, and model training should be avoided. Through two exemplar projects, we show how practitioners in both academia and industry can easily build such pipelines using LayoutParser and extract high-quality structured document data for their downstream tasks. The source code for these projects will be publicly available in the LayoutParser community hub.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2715,6 +2816,7 @@ "element_id": "54ee49eac3f4e6098811cda1f9dd0306", "text": "5.1 A Comprehensive Historical Document Digitization Pipeline", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2737,6 +2839,7 @@ "element_id": "083b7889c33f34e7d0479c233cdccc34", "text": "The digitization of historical documents can unlock valuable data that can shed light on many important social, economic, and historical questions. Yet due to scan noises, page wearing, and the prevalence of complicated layout structures, ob- taining a structured representation of historical document scans is often extremely complicated.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2759,6 +2862,7 @@ "element_id": "76dd07abeb9f4bbcb77152deb52c9dc0", "text": "In this example, LayoutParser was used to develop a comprehensive pipeline, shown in Figure 5, to gener- ate high-quality structured data from historical Japanese firm financial ta- bles with complicated layouts. The pipeline applies two layout models to identify different levels of document structures and two customized OCR engines for optimized character recog- nition accuracy.", "metadata": { + "is_extracted": "true", "links": [ { "text": "5", @@ -2788,6 +2892,7 @@ "element_id": "42551c9b40827dcdc52055b4d25c6fc3", "text": "As shown in Figure 4 (a), the document contains columns of text written vertically 15, a common style in Japanese. Due to scanning noise and archaic printing technology, the columns can be skewed or have vari- able widths, and hence cannot be eas- ily identified via rule-based methods. Within each column, words are sepa- rated by white spaces of variable size, and the vertical positions of objects can be an indicator of their layout type.", "metadata": { + "is_extracted": "true", "links": [ { "text": "4", @@ -2822,6 +2927,7 @@ "element_id": "2b90153124fb6f9e9f5539b9db75d240", "text": "———————_+ (| ‘Active Learning Layout Annotate Layout Dataset | + ‘Annotation Toolkit ¥ alae Deep Leaming Layout Model Training & Inference, ¥ ; Handy Data Structures & Post-processing Ee apis for Layout Dat a Ae ror yon Oats 4 Text Recognition | <—— Default ane Customized ¥ ee Layout Structure Visualization & Export | <—— | visualization & Storage The Japanese Document Helpful LayoutParser Digitization Pipeline Modules", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2844,6 +2950,7 @@ "element_id": "80291b42f1785935496188bb52788288", "text": "Fig.5: Illustration of how LayoutParser helps with the historical document digi- tization pipeline.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2866,6 +2973,7 @@ "element_id": "c5b22d5a9f8b657ad4acdf6ad1f0bdd0", "text": "15 A document page consists of eight rows like this. For simplicity we skip the row segmentation discussion and refer readers to the source code when available.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2888,6 +2996,7 @@ "element_id": "9d917f215b0115c679105482b80d2d2d", "text": "12 Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2910,6 +3019,7 @@ "element_id": "4bba0fe5b17811e76afbf7650f2f6792", "text": "To decipher the complicated layout", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2932,6 +3042,7 @@ "element_id": "7e1f7b138c864ed8b40cf0f3d38801ec", "text": "structure, two object detection models have been trained to recognize individual columns and tokens, respectively. A small training set (400 images with approxi- mately 100 annotations each) is curated via the active learning based annotation tool [32] in LayoutParser. The models learn to identify both the categories and regions for each token or column via their distinct visual features. The layout data structure enables easy grouping of the tokens within each column, and rearranging columns to achieve the correct reading orders based on the horizontal position. Errors are identified and rectified via checking the consistency of the model predictions. Therefore, though trained on a small dataset, the pipeline achieves a high level of layout detection accuracy: it achieves a 96.97 AP [19] score across 5 categories for the column detection model, and a 89.23 AP across 4 categories for the token detection model.", "metadata": { + "is_extracted": "true", "links": [ { "text": "32", @@ -2966,6 +3077,7 @@ "element_id": "dccaa93e7bae24dedf523dd39575dfbe", "text": "A combination of character recognition methods is developed to tackle the unique challenges in this document. In our experiments, we found that irregular spacing between the tokens led to a low character recognition recall rate, whereas existing OCR models tend to perform better on densely-arranged texts. To overcome this challenge, we create a document reorganization algorithm that rearranges the text based on the token bounding boxes detected in the layout analysis step. Figure 4 (b) illustrates the generated image of dense text, which is sent to the OCR APIs as a whole to reduce the transaction costs. The flexible coordinate system in LayoutParser is used to transform the OCR results relative to their original positions on the page.", "metadata": { + "is_extracted": "true", "links": [ { "text": "4", @@ -2995,6 +3107,7 @@ "element_id": "60c2e2147d0b0dbd576d51b71a95a2ef", "text": "Additionally, it is common for historical documents to use unique fonts with different glyphs, which significantly degrades the accuracy of OCR models trained on modern texts. In this document, a special flat font is used for printing numbers and could not be detected by off-the-shelf OCR engines. Using the highly flexible functionalities from LayoutParser, a pipeline approach is constructed that achieves a high recognition accuracy with minimal effort. As the characters have unique visual structures and are usually clustered together, we train the layout model to identify number regions with a dedicated category. Subsequently, LayoutParser crops images within these regions, and identifies characters within them using a self-trained OCR model based on a CNN-RNN [6]. The model detects a total of 15 possible categories, and achieves a 0.98 Jaccard score16 and a 0.17 average Levinstein distances17 for token prediction on the test set.", "metadata": { + "is_extracted": "true", "links": [ { "text": "6", @@ -3034,6 +3147,7 @@ "element_id": "de9e855638523c5f77ed4070813e37a3", "text": "Overall, it is possible to create an intricate and highly accurate digitization pipeline for large-scale digitization using LayoutParser. The pipeline avoids specifying the complicated rules used in traditional methods, is straightforward to develop, and is robust to outliers. The DL models also generate fine-grained results that enable creative approaches like page reorganization for OCR.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3056,6 +3170,7 @@ "element_id": "a375a901ba62c168a96725c055b47bad", "text": "16 This measures the overlap between the detected and ground-truth characters, and the maximum is 1.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3078,6 +3193,7 @@ "element_id": "184a3abfd34e7aa04632979ee3c2de36", "text": "17 This measures the number of edits from the ground-truth text to the predicted text, and lower is better.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3100,6 +3216,7 @@ "element_id": "2b7101f39954d5301166b82906202ea9", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3122,6 +3239,7 @@ "element_id": "1359eaa601a24c083e143b8bf5114127", "text": "(@) Partial table at the bottom (6) Full page table (©) Partial table at the top (@) Mis-detected text line", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3144,6 +3262,7 @@ "element_id": "d35d253341e8b8d837f384ecd6ac410a", "text": "Fig.6: This lightweight table detector can identify tables (outlined in red) and cells (shaded in blue) in different locations on a page. In very few cases (d), it might generate minor error predictions, e.g, failing to capture the top text line of a table.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3166,6 +3285,7 @@ "element_id": "60e4fa05c78628ec1c6fa6003b86b52e", "text": "5.2 A light-weight Visual Table Extractor", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3188,6 +3308,7 @@ "element_id": "445ad333fa3f7f85d2be634fbdeeb72a", "text": "Detecting tables and parsing their structures (table extraction) are of central im- portance for many document digitization tasks. Many previous works [26, 30, 27] and tools 18 have been developed to identify and parse table structures. Yet they might require training complicated models from scratch, or are only applicable for born-digital PDF documents. In this section, we show how LayoutParser can help build a light-weight accurate visual table extractor for legal docket tables using the existing resources with minimal effort.", "metadata": { + "is_extracted": "true", "links": [ { "text": "26", @@ -3232,6 +3353,7 @@ "element_id": "923b62eb8550ec49cf6d3f2e6bac7ec8", "text": "The extractor uses a pre-trained layout detection model for identifying the table regions and some simple rules for pairing the rows and the columns in the PDF image. Mask R-CNN [12] trained on the PubLayNet dataset [38] from the LayoutParser Model Zoo can be used for detecting table regions. By filtering out model predictions of low confidence and removing overlapping predictions, LayoutParser can identify the tabular regions on each page, which significantly simplifies the subsequent steps. By applying the line detection functions within the tabular segments, provided in the utility module from LayoutParser, the pipeline can identify the three distinct columns in the tables. A row clustering method is then applied via analyzing the y coordinates of token bounding boxes in the left-most column, which are obtained from the OCR engines. A non-maximal suppression algorithm is used to remove duplicated rows with extremely small gaps. Shown in Figure 6, the built pipeline can detect tables at different positions on a page accurately. Continued tables from different pages are concatenated, and a structured table representation has been easily created.", "metadata": { + "is_extracted": "true", "links": [ { "text": "12", @@ -3271,6 +3393,7 @@ "element_id": "abb8837ab23e7fcaa35969b0119bcc2e", "text": "18 https://github.com/atlanhq/camelot, https://github.com/tabulapdf/tabula", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3293,6 +3416,7 @@ "element_id": "cf4d8c7a6177bda0ced6458173952021", "text": "14 Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3315,6 +3439,7 @@ "element_id": "030602b79a8995e9a457b875d94c016d", "text": "6 Conclusion", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3337,6 +3462,7 @@ "element_id": "e79cef57c86050aa5fc74e5cd3923197", "text": "LayoutParser provides a comprehensive toolkit for deep learning-based document image analysis. The off-the-shelf library is easy to install, and can be used to build flexible and accurate pipelines for processing documents with complicated structures. It also supports high-level customization and enables easy labeling and training of DL models on unique document image datasets. The LayoutParser community platform facilitates sharing DL models and DIA pipelines, inviting discussion and promoting code reproducibility and reusability. The LayoutParser team is committed to keeping the library updated continuously and bringing the most recent advances in DL-based DIA, such as multi-modal document modeling [37, 36, 9] (an upcoming priority), to a diverse audience of end-users.", "metadata": { + "is_extracted": "true", "links": [ { "text": "37", @@ -3376,6 +3502,7 @@ "element_id": "dd3013dfc8b3df79d89268894b10571e", "text": "Acknowledgements We thank the anonymous reviewers for their comments and suggestions. This project is supported in part by NSF Grant OIA-2033558 and funding from the Harvard Data Science Initiative and Harvard Catalyst. Zejiang Shen thanks Doug Downey for suggestions.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3398,6 +3525,7 @@ "element_id": "58c1b97c7988d78495527a0726a7c85a", "text": "References", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3420,6 +3548,7 @@ "element_id": "85e09a5617e58a3a78b22fd12eb29eaf", "text": "[1] Abadi, M., Agarwal, A., Barham, P., Brevdo, E., Chen, Z., Citro, C., Corrado, G.S., Davis, A., Dean, J., Devin, M., Ghemawat, S., Goodfellow, I., Harp, A., Irving, G., Isard, M., Jia, Y., Jozefowicz, R., Kaiser, L., Kudlur, M., Levenberg, J., Man´e, D., Monga, R., Moore, S., Murray, D., Olah, C., Schuster, M., Shlens, J., Steiner, B., Sutskever, I., Talwar, K., Tucker, P., Vanhoucke, V., Vasudevan, V., Vi´egas, F., Vinyals, O., Warden, P., Wattenberg, M., Wicke, M., Yu, Y., Zheng, X.: TensorFlow: Large-scale machine learning on heterogeneous systems (2015), https://www.tensorflow.org/, software available from tensorflow.org", "metadata": { + "is_extracted": "true", "links": [ { "text": "https :// www . tensorflow . org /,", @@ -3449,6 +3578,7 @@ "element_id": "ad466edc2a12c9be4bf951fd8b5bf818", "text": "[2] Alberti, M., Pondenkandath, V., W¨ursch, M., Ingold, R., Liwicki, M.: Deepdiva: a highly-functional python framework for reproducible experiments. In: 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR). pp. 423–428. IEEE (2018)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3471,6 +3601,7 @@ "element_id": "217777f3d44620afddc1e27553e81a66", "text": "[3] Antonacopoulos, A., Bridson, D., Papadopoulos, C., Pletschacher, S.: A realistic dataset for performance evaluation of document layout analysis. In: 2009 10th International Conference on Document Analysis and Recognition. pp. 296–300. IEEE (2009)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3493,6 +3624,7 @@ "element_id": "292dd088dc6a174159395e31be7755d7", "text": "[4] Baek, Y., Lee, B., Han, D., Yun, S., Lee, H.: Character region awareness for text detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 9365–9374 (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3515,6 +3647,7 @@ "element_id": "52a5a7f582c381ec8c7c1abc7e983191", "text": "[5] Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: A Large-Scale Hierarchical Image Database. In: CVPR09 (2009)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3537,6 +3670,7 @@ "element_id": "4e93c51c89970349aa9e0a42cb330c4b", "text": "[6] Deng, Y., Kanervisto, A., Ling, J., Rush, A.M.: Image-to-markup generation with coarse-to-fine attention. In: International Conference on Machine Learning. pp. 980–989. PMLR (2017)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3559,6 +3693,7 @@ "element_id": "8cfd166d282469f765423faae44271e2", "text": "[7] Ganin, Y., Lempitsky, V.: Unsupervised domain adaptation by backpropagation. In: International conference on machine learning. pp. 1180–1189. PMLR (2015)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3581,6 +3716,7 @@ "element_id": "8bce49aab693aad97676011688f3f6f3", "text": "LayoutParser: A Unified Toolkit for DL-Based DIA", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3603,6 +3739,7 @@ "element_id": "a742cc226eba47ed37993cde0d2718d9", "text": "[8] Gardner, M., Grus, J., Neumann, M., Tafjord, O., Dasigi, P., Liu, N., Peters, M., Schmitz, M., Zettlemoyer, L.: Allennlp: A deep semantic natural language processing platform. arXiv preprint arXiv:1803.07640 (2018)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3625,6 +3762,7 @@ "element_id": "0176491ee2584bffdfb943caa8aefab4", "text": "Lukasz Garncarek, Powalski, R., Stanistawek, T., Topolski, B., Halama, P., Graliriski, F.: Lambert: Layout-aware (language) modeling using bert for in- formation extraction (2020)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3647,6 +3785,7 @@ "element_id": "95bc71fb3542f420dfa50e22eb8c734f", "text": "[10] Graves, A., Fern´andez, S., Gomez, F., Schmidhuber, J.: Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In: Proceedings of the 23rd international conference on Machine learning. pp. 369–376 (2006)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3669,6 +3808,7 @@ "element_id": "3fab75481d8e6d389ea6034e18f54e00", "text": "[11] Harley, A.W., Ufkes, A., Derpanis, K.G.: Evaluation of deep convolutional nets for document image classification and retrieval. In: 2015 13th International Conference on Document Analysis and Recognition (ICDAR). pp. 991–995. IEEE (2015)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3691,6 +3831,7 @@ "element_id": "8cd8821b71e4bda1a77f6a114ff54f50", "text": "[12] He, K., Gkioxari, G., Doll´ar, P., Girshick, R.: Mask r-cnn. In: Proceedings of the IEEE international conference on computer vision. pp. 2961–2969 (2017)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3713,6 +3854,7 @@ "element_id": "02c0a0c6c60503798f3894fe244c237d", "text": "[13] He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770–778 (2016)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3735,6 +3877,7 @@ "element_id": "893390a7c02886a034be490296237e30", "text": "[14] Kay, A.: Tesseract: An open-source optical character recognition engine. Linux J. 2007(159), 2 (Jul 2007)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3757,6 +3900,7 @@ "element_id": "bd2e9f3795d8492cadde716193f62aba", "text": "[15] Lamiroy, B., Lopresti, D.: An open architecture for end-to-end document analysis benchmarking. In: 2011 International Conference on Document Analysis and Recognition. pp. 42–47. IEEE (2011)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3779,6 +3923,7 @@ "element_id": "07cef8a161dd1c3f0895c605844d678e", "text": "[16] Lee, B.C., Weld, D.S.: Newspaper navigator: Open faceted search for 1.5 million images. In: Adjunct Publication of the 33rd Annual ACM Sym- posium on User Interface Software and Technology. p. 120–122. UIST ’20 Adjunct, Association for Computing Machinery, New York, NY, USA (2020). https://doi.org/10.1145/3379350.3416143, https://doi-org.offcampus. lib.washington.edu/10.1145/3379350.3416143", "metadata": { + "is_extracted": "true", "links": [ { "text": "https :// doi . org / 10 . 1145 / 3379350 . 3416143", @@ -3818,6 +3963,7 @@ "element_id": "90ad04faa055039bfd37c1a851878048", "text": "[17] Lee, B.C.G., Mears, J., Jakeway, E., Ferriter, M., Adams, C., Yarasavage, N., Thomas, D., Zwaard, K., Weld, D.S.: The Newspaper Navigator Dataset: Extracting Headlines and Visual Content from 16 Million Historic Newspaper Pages in Chronicling America, p. 3055–3062. Association for Computing Machinery, New York, NY, USA (2020), https://doi.org/10.1145/3340531.3412767", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3840,6 +3986,7 @@ "element_id": "dfcf2fc9f58128e98ba312b0c89fbea1", "text": "[18] Li, M., Cui, L., Huang, S., Wei, F., Zhou, M., Li, Z.: Tablebank: Table benchmark for image-based table detection and recognition. arXiv preprint arXiv:1903.01949 (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3862,6 +4009,7 @@ "element_id": "b5e16aae3d43919bb5899fade72c0550", "text": "[19] Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll´ar, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: European conference on computer vision. pp. 740–755. Springer (2014)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3884,6 +4032,7 @@ "element_id": "8344e54a6acb25643c83b5ea96c5c593", "text": "[20] Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3431–3440 (2015)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3906,6 +4055,7 @@ "element_id": "9476b030857c32e55a638928df6d01e8", "text": "[21] Neudecker, C., Schlarb, S., Dogan, Z.M., Missier, P., Sufi, S., Williams, A., Wolsten- croft, K.: An experimental workflow development platform for historical document digitisation and analysis. In: Proceedings of the 2011 workshop on historical document imaging and processing. pp. 161–168 (2011)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3928,6 +4078,7 @@ "element_id": "4640c3f33351b994165071b6d872ef56", "text": "[22] Oliveira, S.A., Seguin, B., Kaplan, F.: dhsegment: A generic deep-learning approach for document segmentation. In: 2018 16th International Conference on Frontiers in Handwriting Recognition (ICFHR). pp. 7–12. IEEE (2018)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3950,6 +4101,7 @@ "element_id": "2125d675311c00e01a24886e8a603ee1", "text": "16 Z. Shen et al.", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3972,6 +4124,7 @@ "element_id": "048415c6e5fc7bdd5466bf9c877b4a14", "text": "[23] Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., Lerer, A.: Automatic differentiation in pytorch (2017)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3994,6 +4147,7 @@ "element_id": "04c0655f0749575bbe838891bf103d6d", "text": "[24] Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al.: Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703 (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4016,6 +4170,7 @@ "element_id": "a2f34eceb4f6036f105c6319de5450d1", "text": "[25] Pletschacher, S., Antonacopoulos, A.: The page (page analysis and ground-truth elements) format framework. In: 2010 20th International Conference on Pattern Recognition. pp. 257–260. IEEE (2010)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4038,6 +4193,7 @@ "element_id": "c81432ac5c76b82c1ccd93d0a3ee15b1", "text": "[26] Prasad, D., Gadpal, A., Kapadni, K., Visave, M., Sultanpure, K.: Cascadetabnet: An approach for end to end table detection and structure recognition from image- based documents. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops. pp. 572–573 (2020)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4060,6 +4216,7 @@ "element_id": "0f5cebf6a7661981062a59f24e0b2a3a", "text": "[27] Qasim, S.R., Mahmood, H., Shafait, F.: Rethinking table recognition using graph neural networks. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 142–147. IEEE (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4082,6 +4239,7 @@ "element_id": "d02327f415141694d5853b57ac0f9e3f", "text": "[28] Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. In: Advances in neural information processing systems. pp. 91–99 (2015)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4104,6 +4262,7 @@ "element_id": "d0529ef231eeac2e8ae2083dee416210", "text": "[29] Scarselli, F., Gori, M., Tsoi, A.C., Hagenbuchner, M., Monfardini, G.: The graph neural network model. IEEE transactions on neural networks 20(1), 61–80 (2008)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4126,6 +4285,7 @@ "element_id": "98fce7a2720ed7eda87a02659071b121", "text": "[30] Schreiber, S., Agne, S., Wolf, I., Dengel, A., Ahmed, S.: Deepdesrt: Deep learning for detection and structure recognition of tables in document images. In: 2017 14th IAPR international conference on document analysis and recognition (ICDAR). vol. 1, pp. 1162–1167. IEEE (2017)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4148,6 +4308,7 @@ "element_id": "e3146a202c282ecab0d87f59d3307983", "text": "[31] Shen, Z., Zhang, K., Dell, M.: A large dataset of historical japanese documents with complex layouts. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops. pp. 548–549 (2020)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4170,6 +4331,7 @@ "element_id": "7b2d2fbb2bcae74fae3cf85c7478eb9f", "text": "[32] Shen, Z., Zhao, J., Dell, M., Yu, Y., Li, W.: Olala: Object-level active learning based layout annotation. arXiv preprint arXiv:2010.01762 (2020)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4192,6 +4354,7 @@ "element_id": "7937fc115bcbbc8c08640587fa5ed827", "text": "[33] Studer, L., Alberti, M., Pondenkandath, V., Goktepe, P., Kolonko, T., Fischer, A., Liwicki, M., Ingold, R.: A comprehensive study of imagenet pre-training for historical document image analysis. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 720–725. IEEE (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4214,6 +4377,7 @@ "element_id": "881f67b82dccc13eaf96e912750c0318", "text": "[34] Wolf, T., Debut, L., Sanh, V., Chaumond, J., Delangue, C., Moi, A., Cistac, P., Rault, T., Louf, R., Funtowicz, M., et al.: Huggingface’s transformers: State-of- the-art natural language processing. arXiv preprint arXiv:1910.03771 (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4236,6 +4400,7 @@ "element_id": "71c1e09e0ae75ac750aaf4bfb71539d5", "text": "[35] Wu, Y., Kirillov, A., Massa, F., Lo, W.Y., Girshick, R.: Detectron2. https:// github.com/facebookresearch/detectron2 (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4258,6 +4423,7 @@ "element_id": "f28bdd6f783474abbbbb57c24978a1ff", "text": "[36] Xu, Y., Xu, Y., Lv, T., Cui, L., Wei, F., Wang, G., Lu, Y., Florencio, D., Zhang, C., Che, W., et al.: Layoutlmv2: Multi-modal pre-training for visually-rich document understanding. arXiv preprint arXiv:2012.14740 (2020)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4280,6 +4446,7 @@ "element_id": "8b9c717003c9c36fc9833b6226aef9a3", "text": "[37] Xu, Y., Li, M., Cui, L., Huang, S., Wei, F., Zhou, M.: Layoutlm: Pre-training of text and layout for document image understanding (2019)", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -4302,6 +4469,7 @@ "element_id": "3ac304a6df305ec0a0bb9079795b6c2e", "text": "[38] Zhong, X., Tang, J., Yepes, A.J.: Publaynet: largest dataset ever for doc- ument layout analysis. In: 2019 International Conference on Document Analysis and Recognition (ICDAR). pp. 1015–1022. IEEE (Sep 2019). https://doi.org/10.1109/ICDAR.2019.00166", "metadata": { + "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" From 00a2e6aa2bb4f0f29b6d6a18242d45b150167022 Mon Sep 17 00:00:00 2001 From: "codeflash-ai[bot]" <148906541+codeflash-ai[bot]@users.noreply.github.com> Date: Thu, 6 Nov 2025 14:55:23 -0600 Subject: [PATCH 12/17] =?UTF-8?q?=E2=9A=A1=EF=B8=8F=20Speed=20up=20functio?= =?UTF-8?q?n=20`=5Fmerge=5Fextracted=5Finto=5Finferred=5Fwhen=5Falmost=5Ft?= =?UTF-8?q?he=5Fsame`=20by=2024%=20in=20PR=20#4112=20(`feat/track-text-sou?= =?UTF-8?q?rce`)=20(#4114)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## ⚡️ This pull request contains optimizations for PR #4112 If you approve this dependent PR, these changes will be merged into the original PR branch `feat/track-text-source`. >This PR will be automatically closed if the original PR is merged. ---- #### 📄 24% (0.24x) speedup for ***`_merge_extracted_into_inferred_when_almost_the_same` in `unstructured/partition/pdf_image/pdfminer_processing.py`*** ⏱️ Runtime : **`40.6 milliseconds`** **→** **`32.6 milliseconds`** (best of `18` runs) #### 📝 Explanation and details The optimized code achieves a **24% speedup** through two key optimizations: **1. Improved `_minimum_containing_coords` function:** - **What**: Replaced `np.vstack` with separate array creation followed by `np.column_stack` - **Why**: The original code created list comprehensions multiple times within `np.vstack`, causing redundant temporary arrays and inefficient memory access patterns. The optimized version pre-computes each coordinate array once, then combines them efficiently - **Impact**: Reduces function time from 1.88ms to 1.41ms (25% faster). Line profiler shows the costly list comprehensions in the original (lines with 27%, 14%, 13%, 12% of time) are replaced with more efficient array operations **2. Optimized comparison in `boxes_iou` function:** - **What**: Changed `(inter_area / denom) > threshold` to `inter_area > (threshold * denom)` - **Why**: Avoids expensive division operations by algebraically rearranging the inequality. Division is significantly slower than multiplication in NumPy, especially for large arrays - **Impact**: Reduces the final comparison from 19% to 5.8% of function time, while the intermediate denominator calculation takes 11.8% **3. Minor optimization in boolean mask creation:** - **What**: Replaced `boxes_almost_same.sum(axis=1).astype(bool)` with `np.any(boxes_almost_same, axis=1)` - **Why**: `np.any` short-circuits on the first True value and is semantically clearer, though the performance gain is minimal **Test case analysis shows the optimizations are particularly effective for:** - Large-scale scenarios (1000+ elements): 17-75% speedup depending on match patterns - Cases with no matches benefit most (74.6% faster) due to avoiding expensive division operations - All test cases show consistent 6-17% improvements, indicating robust optimization across different workloads The optimizations maintain identical functionality while reducing computational overhead through better NumPy usage patterns and mathematical rearrangement. ✅ **Correctness verification report:** | Test | Status | | --------------------------- | ----------------- | | ⏪ Replay Tests | 🔘 **None Found** | | ⚙️ Existing Unit Tests | 🔘 **None Found** | | 🔎 Concolic Coverage Tests | 🔘 **None Found** | | 🌀 Generated Regression Tests | ✅ **18 Passed** | |📊 Tests Coverage | 100.0% |
🌀 Generated Regression Tests and Runtime ```python import numpy as np # imports import pytest from unstructured.partition.pdf_image.pdfminer_processing import \ _merge_extracted_into_inferred_when_almost_the_same # --- Minimal class stubs and helpers to support the function under test --- class DummyLayoutElements: """ Minimal implementation of LayoutElements to support testing. - element_coords: np.ndarray of shape (N, 4) for bounding boxes. - texts: np.ndarray of shape (N,) for text strings. - is_extracted_array: np.ndarray of shape (N,) for boolean flags. """ def __init__(self, element_coords, texts=None, is_extracted_array=None): self.element_coords = np.array(element_coords, dtype=np.float32) self.texts = np.array(texts if texts is not None else [''] * len(element_coords), dtype=object) self.is_extracted_array = np.array(is_extracted_array if is_extracted_array is not None else [False] * len(element_coords), dtype=bool) def __len__(self): return len(self.element_coords) def slice(self, mask): # mask can be a boolean array or integer indices if isinstance(mask, (np.ndarray, list)): if isinstance(mask[0], bool): idx = np.where(mask)[0] else: idx = np.array(mask) else: idx = np.array([mask]) return DummyLayoutElements( self.element_coords[idx], self.texts[idx], self.is_extracted_array[idx] ) from unstructured.partition.pdf_image.pdfminer_processing import \ _merge_extracted_into_inferred_when_almost_the_same # --- Unit Tests --- # ----------- BASIC TEST CASES ----------- def test_no_inferred_elements_returns_false_mask(): # No inferred elements: all extracted should not be merged extracted = DummyLayoutElements([[0, 0, 1, 1], [1, 1, 2, 2]], texts=["a", "b"]) inferred = DummyLayoutElements([]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.9); mask = codeflash_output # 3.50μs -> 3.30μs (6.10% faster) def test_no_extracted_elements_returns_empty_mask(): # No extracted elements: should return empty mask extracted = DummyLayoutElements([]) inferred = DummyLayoutElements([[0, 0, 1, 1]]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.9); mask = codeflash_output # 2.30μs -> 2.31μs (0.475% slower) #------------------------------------------------ import numpy as np # imports import pytest from unstructured.partition.pdf_image.pdfminer_processing import \ _merge_extracted_into_inferred_when_almost_the_same # Minimal stubs for TextRegions and LayoutElements to enable testing class TextRegions: def __init__(self, coords, texts=None, is_extracted_array=None): self.x1 = coords[:, 0] self.y1 = coords[:, 1] self.x2 = coords[:, 2] self.y2 = coords[:, 3] self.texts = np.array(texts) if texts is not None else np.array([""] * len(coords)) self.is_extracted_array = np.array(is_extracted_array) if is_extracted_array is not None else np.zeros(len(coords), dtype=bool) self.element_coords = coords def __len__(self): return len(self.element_coords) def slice(self, mask): # mask can be bool array or indices if isinstance(mask, (np.ndarray, list)): if isinstance(mask, np.ndarray) and mask.dtype == bool: idx = np.where(mask)[0] else: idx = mask else: idx = [mask] coords = self.element_coords[idx] texts = self.texts[idx] is_extracted_array = self.is_extracted_array[idx] return TextRegions(coords, texts, is_extracted_array) class LayoutElements(TextRegions): pass from unstructured.partition.pdf_image.pdfminer_processing import \ _merge_extracted_into_inferred_when_almost_the_same # =========================== # Unit Tests # =========================== # ----------- BASIC TEST CASES ----------- def test_basic_exact_match(): # One extracted, one inferred, same box coords = np.array([[0, 0, 10, 10]]) extracted = LayoutElements(coords, texts=["extracted"], is_extracted_array=[True]) inferred = LayoutElements(coords, texts=["inferred"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 207μs -> 192μs (7.74% faster) def test_basic_no_match(): # Boxes do not overlap extracted = LayoutElements(np.array([[0, 0, 10, 10]]), texts=["extracted"], is_extracted_array=[True]) inferred = LayoutElements(np.array([[20, 20, 30, 30]]), texts=["inferred"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 163μs -> 151μs (7.85% faster) def test_basic_partial_overlap_below_threshold(): # Overlap, but below threshold extracted = LayoutElements(np.array([[0, 0, 10, 10]]), texts=["extracted"], is_extracted_array=[True]) inferred = LayoutElements(np.array([[5, 5, 15, 15]]), texts=["inferred"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 158μs -> 148μs (6.53% faster) def test_basic_partial_overlap_above_threshold(): # Overlap, above threshold extracted = LayoutElements(np.array([[0, 0, 10, 10]]), texts=["extracted"], is_extracted_array=[True]) inferred = LayoutElements(np.array([[0, 0, 10, 10.1]]), texts=["inferred"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 191μs -> 176μs (8.22% faster) def test_basic_multiple_elements_some_match(): # Multiple extracted/inferred, some matches extracted = LayoutElements( np.array([[0, 0, 10, 10], [20, 20, 30, 30]]), texts=["extracted1", "extracted2"], is_extracted_array=[True, True] ) inferred = LayoutElements( np.array([[0, 0, 10, 10], [100, 100, 110, 110]]), texts=["inferred1", "inferred2"], is_extracted_array=[False, False] ) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 172μs -> 162μs (5.98% faster) # ----------- EDGE TEST CASES ----------- def test_edge_empty_extracted(): # No extracted elements extracted = LayoutElements(np.zeros((0, 4)), texts=[], is_extracted_array=[]) inferred = LayoutElements(np.array([[0,0,1,1]]), texts=["foo"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 2.08μs -> 2.06μs (0.969% faster) def test_edge_empty_inferred(): # No inferred elements extracted = LayoutElements(np.array([[0,0,1,1]]), texts=["foo"], is_extracted_array=[True]) inferred = LayoutElements(np.zeros((0, 4)), texts=[], is_extracted_array=[]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 2.71μs -> 2.48μs (9.29% faster) def test_edge_all_elements_match(): # All extracted match inferred coords = np.array([[0,0,10,10], [20,20,30,30]]) extracted = LayoutElements(coords, texts=["A", "B"], is_extracted_array=[True, True]) inferred = LayoutElements(coords, texts=["X", "Y"], is_extracted_array=[False, False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 174μs -> 162μs (7.69% faster) def test_edge_threshold_zero(): # Threshold zero means all overlap counts extracted = LayoutElements(np.array([[0,0,10,10]]), texts=["foo"], is_extracted_array=[True]) inferred = LayoutElements(np.array([[5,5,15,15]]), texts=["bar"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.0); mask = codeflash_output # 159μs -> 150μs (5.94% faster) def test_edge_threshold_one(): # Threshold one means only perfect overlap counts extracted = LayoutElements(np.array([[0,0,10,10]]), texts=["foo"], is_extracted_array=[True]) inferred = LayoutElements(np.array([[0,0,10,10]]), texts=["bar"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 1.0); mask = codeflash_output # 155μs -> 145μs (7.01% faster) def test_edge_multiple_matches_first_match_wins(): # Extracted overlaps with multiple inferred, but only first match is updated extracted = LayoutElements(np.array([[0,0,10,10]]), texts=["foo"], is_extracted_array=[True]) inferred = LayoutElements( np.array([[0,0,10,10], [0,0,10,10]]), texts=["bar1", "bar2"], is_extracted_array=[False, False] ) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 168μs -> 156μs (7.25% faster) def test_edge_coords_are_updated_to_minimum_containing(): # Bounding boxes are updated to minimum containing box extracted = LayoutElements(np.array([[1,2,9,10]]), texts=["foo"], is_extracted_array=[True]) inferred = LayoutElements(np.array([[0,0,10,10]]), texts=["bar"], is_extracted_array=[False]) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 156μs -> 144μs (8.56% faster) # The new coords should be the minimum containing both expected = np.array([0,0,10,10]) # ----------- LARGE SCALE TEST CASES ----------- def test_large_scale_many_elements(): # 500 extracted, 500 inferred, all match N = 500 coords = np.stack([np.arange(N), np.arange(N), np.arange(N)+10, np.arange(N)+10], axis=1) extracted = LayoutElements(coords, texts=[f"ex{i}" for i in range(N)], is_extracted_array=[True]*N) inferred = LayoutElements(coords.copy(), texts=[f"in{i}" for i in range(N)], is_extracted_array=[False]*N) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 2.90ms -> 2.79ms (3.78% faster) def test_large_scale_some_elements_match(): # 1000 extracted, 500 inferred, only first 500 match N = 1000 M = 500 coords_extracted = np.stack([np.arange(N), np.arange(N), np.arange(N)+10, np.arange(N)+10], axis=1) coords_inferred = coords_extracted[:M] extracted = LayoutElements(coords_extracted, texts=[f"ex{i}" for i in range(N)], is_extracted_array=[True]*N) inferred = LayoutElements(coords_inferred.copy(), texts=[f"in{i}" for i in range(M)], is_extracted_array=[False]*M) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 6.49ms -> 5.56ms (16.6% faster) # First 500 should be merged, rest not expected_mask = np.zeros(N, dtype=bool) expected_mask[:M] = True def test_large_scale_no_elements_match(): # 1000 extracted, 500 inferred, none match N = 1000 M = 500 coords_extracted = np.stack([np.arange(N), np.arange(N), np.arange(N)+10, np.arange(N)+10], axis=1) coords_inferred = coords_extracted[:M] + 10000 # Far away extracted = LayoutElements(coords_extracted, texts=[f"ex{i}" for i in range(N)], is_extracted_array=[True]*N) inferred = LayoutElements(coords_inferred, texts=[f"in{i}" for i in range(M)], is_extracted_array=[False]*M) codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 8.91ms -> 5.11ms (74.6% faster) def test_large_scale_performance(): # Test that the function runs efficiently for 1000 elements N = 1000 coords = np.stack([np.arange(N), np.arange(N), np.arange(N)+10, np.arange(N)+10], axis=1) extracted = LayoutElements(coords, texts=[f"ex{i}" for i in range(N)], is_extracted_array=[True]*N) inferred = LayoutElements(coords.copy(), texts=[f"in{i}" for i in range(N)], is_extracted_array=[False]*N) import time start = time.time() codeflash_output = _merge_extracted_into_inferred_when_almost_the_same(extracted, inferred, 0.99); mask = codeflash_output # 20.6ms -> 17.6ms (17.1% faster) elapsed = time.time() - start # codeflash_output is used to check that the output of the original code is the same as that of the optimized code. ```
To edit these changes `git checkout codeflash/optimize-pr4112-2025-11-05T21.03.01` and push. [![Codeflash](https://img.shields.io/badge/Optimized%20with-Codeflash-yellow?style=flat&color=%23ffc428&logo=data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iNDgwIiBoZWlnaHQ9ImF1dG8iIHZpZXdCb3g9IjAgMCA0ODAgMjgwIiBmaWxsPSJub25lIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciPgo8cGF0aCBmaWxsLXJ1bGU9ImV2ZW5vZGQiIGNsaXAtcnVsZT0iZXZlbm9kZCIgZD0iTTI4Ni43IDAuMzc4NDE4SDIwMS43NTFMNTAuOTAxIDE0OC45MTFIMTM1Ljg1MUwwLjk2MDkzOCAyODEuOTk5SDk1LjQzNTJMMjgyLjMyNCA4OS45NjE2SDE5Ni4zNDVMMjg2LjcgMC4zNzg0MThaIiBmaWxsPSIjRkZDMDQzIi8+CjxwYXRoIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMzExLjYwNyAwLjM3ODkwNkwyNTguNTc4IDU0Ljk1MjZIMzc5LjU2N0w0MzIuMzM5IDAuMzc4OTA2SDMxMS42MDdaIiBmaWxsPSIjMEIwQTBBIi8+CjxwYXRoIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMzA5LjU0NyA4OS45NjAxTDI1Ni41MTggMTQ0LjI3NkgzNzcuNTA2TDQzMC4wMjEgODkuNzAyNkgzMDkuNTQ3Vjg5Ljk2MDFaIiBmaWxsPSIjMEIwQTBBIi8+CjxwYXRoIGZpbGwtcnVsZT0iZXZlbm9kZCIgY2xpcC1ydWxlPSJldmVub2RkIiBkPSJNMjQyLjg3MyAxNjQuNjZMMTg5Ljg0NCAyMTkuMjM0SDMxMC44MzNMMzYzLjM0NyAxNjQuNjZIMjQyLjg3M1oiIGZpbGw9IiMwQjBBMEEiLz4KPC9zdmc+Cg==)](https://codeflash.ai) ![Static Badge](https://img.shields.io/badge/🎯_Optimization_Quality-high-green) --- > [!NOTE] > Speeds up layout merging by optimizing bounding-box aggregation, boolean mask creation, and IOU comparison to avoid divisions. > > - **Performance optimizations in `unstructured/partition/pdf_image/pdfminer_processing.py`**: > - `/_minimum_containing_coords`: > - Precomputes `x1/y1/x2/y2` arrays and uses `np.column_stack` to build output; removes extra transpose. > - `/_merge_extracted_into_inferred_when_almost_the_same`: > - Replaces `sum(...).astype(bool)` with `np.any(..., axis=1)` for match mask. > - `/boxes_iou`: > - Computes denominator once and replaces division `(x/y) > t` with `x > t*y` to avoid divisions. > > Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 8a0335f159badc73ea9a7a2694e905f02d56b6e7. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot). Co-authored-by: codeflash-ai[bot] <148906541+codeflash-ai[bot]@users.noreply.github.com> --- .../pdf_image/pdfminer_processing.py | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index 9ead11a2b3..4023a26ff4 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -57,14 +57,20 @@ def _validate_bbox(bbox: list[int | float]) -> bool: def _minimum_containing_coords(*regions: TextRegions) -> np.ndarray: # TODO: refactor to just use np array as input - return np.vstack( + # Optimization: Use np.stack and np.column_stack to build output in a single step + x1s = np.array([region.x1 for region in regions]) + y1s = np.array([region.y1 for region in regions]) + x2s = np.array([region.x2 for region in regions]) + y2s = np.array([region.y2 for region in regions]) + # Use np.min/max reduction rather than create matrix then operate. Transpose last for shape (N, 4) + return np.column_stack( ( - np.min([region.x1 for region in regions], axis=0), - np.min([region.y1 for region in regions], axis=0), - np.max([region.x2 for region in regions], axis=0), - np.max([region.y2 for region in regions], axis=0), + np.min(x1s, axis=0), + np.min(y1s, axis=0), + np.max(x2s, axis=0), + np.max(y2s, axis=0), ) - ).T + ) def _inferred_is_elementtype( @@ -120,7 +126,7 @@ def _merge_extracted_into_inferred_when_almost_the_same( inferred_layout.element_coords, threshold=same_region_threshold, ) - extracted_almost_the_same_as_inferred = boxes_almost_same.sum(axis=1).astype(bool) + extracted_almost_the_same_as_inferred = np.any(boxes_almost_same, axis=1) # NOTE: if a row is full of False the argmax returns first index; we use the mask above to # distinguish those (they would be False in the mask) first_match = np.argmax(boxes_almost_same, axis=1) @@ -584,7 +590,9 @@ def boxes_iou( inter_area, boxa_area, boxb_area = areas_of_boxes_and_intersection_area( coords1, coords2, round_to=round_to ) - return (inter_area / np.maximum(EPSILON_AREA, boxa_area + boxb_area.T - inter_area)) > threshold + denom = np.maximum(EPSILON_AREA, boxa_area + boxb_area.T - inter_area) + # Instead of (x/y) > t, use x > t*y for memory & speed with same result + return inter_area > (threshold * denom) @requires_dependencies("unstructured_inference") From ea47d20b23243a0d496e908fe279fe737e4bc561 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Thu, 6 Nov 2025 15:03:34 -0600 Subject: [PATCH 13/17] reduce comment length for linting --- unstructured/partition/pdf_image/pdfminer_processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index 4023a26ff4..7f228c3e94 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -62,7 +62,7 @@ def _minimum_containing_coords(*regions: TextRegions) -> np.ndarray: y1s = np.array([region.y1 for region in regions]) x2s = np.array([region.x2 for region in regions]) y2s = np.array([region.y2 for region in regions]) - # Use np.min/max reduction rather than create matrix then operate. Transpose last for shape (N, 4) + # Use np.min/max reduction rather than create matrix then operate. return np.column_stack( ( np.min(x1s, axis=0), From 0ede05d1af4dba8754ec5795b5bd82ea90b76753 Mon Sep 17 00:00:00 2001 From: luke-kucing Date: Thu, 6 Nov 2025 16:01:32 -0500 Subject: [PATCH 14/17] Security Fixes - CVE Remediation (#4115) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Main Changes: 1. Removed Clarifai Dependency - Completely removed the clarifai dependency which is no longer used in the codebase - Removed clarifai from the unstructured-ingest extras list in requirements/ingest/ingest.txt:1 - Removed clarifai test script reference from test_unstructured_ingest/test-ingest-dest.sh:23 2. Updated Dependencies to Resolve CVEs - pypdf: Updated from 6.1.1 → 6.1.3 (fixes GHSA-vr63-x8vc-m265) - pip: Added explicit upgrade to >=25.3 in Dockerfile (fixes GHSA-4xh5-x5gv-qwph) - uv: Addressed GHSA-8qf3-x8v5-2pj8 and GHSA-pqhf-p39g-3x64 3. Dockerfile Security Enhancements (Dockerfile:17,28-29) - Added Alpine package upgrade for py3.12-pip - Added explicit pip upgrade step before installing Python dependencies 4. General Dependency Updates Ran pip-compile across all requirement files, resulting in updates to: - cryptography: 46.0.2 → 46.0.3 - psutil: 7.1.0 → 7.1.3 - rapidfuzz: 3.14.1 → 3.14.3 - regex: 2025.9.18 → 2025.11.3 - wrapt: 1.17.3 → 2.0.0 - Plus many other transitive dependencies across all extra requirement files 5. Version Bump - Updated version from 0.18.16 → 0.18.17 in unstructured/__version__.py:1 - Updated CHANGELOG.md with security fixes documentation Impact: This PR resolves 4 CVEs total without introducing breaking changes, making it a pure security maintenance release. --------- Co-authored-by: Claude --- CHANGELOG.md | 11 ++++- Dockerfile | 4 ++ requirements/base.txt | 4 +- requirements/dev.txt | 16 +++---- requirements/extra-csv.txt | 6 +-- requirements/extra-docx.txt | 4 +- requirements/extra-odt.txt | 4 +- requirements/extra-paddleocr.txt | 42 ++++++++--------- requirements/extra-pdf-image.txt | 48 ++++++++++---------- requirements/extra-xlsx.txt | 16 +++---- requirements/huggingface.txt | 32 ++++++------- requirements/ingest/ingest.txt | 2 +- requirements/test.txt | 18 ++++---- test_unstructured_ingest/test-ingest-dest.sh | 1 - unstructured/__version__.py | 2 +- 15 files changed, 111 insertions(+), 99 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 90b7a706bd..af0ad25f43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,11 +1,20 @@ -## 0.18.17-dev0 +## 0.18.18-dev0 ### Enhancement - Flag extracted elements as such in the metadata for downstream use +## 0.18.17 + +### Enhancement + ### Features ### Fixes +- Removed `Clardy` dependency as it is no longer used +- Bumped dependencies via pip-compile to address the following CVEs: + - **pypdf**: GHSA-vr63-x8vc-m265 + - **pip**: GHSA-4xh5-x5gv-qwph + - **uv**: GHSA-8qf3-x8v5-2pj8 GHSA-pqhf-p39g-3x64 ## 0.18.16 diff --git a/Dockerfile b/Dockerfile index 4b6fe48f67..d5fcd351e2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,6 +14,7 @@ COPY example-docs example-docs RUN chown -R notebook-user:notebook-user /app && \ apk add --no-cache font-ubuntu fontconfig git && \ + apk upgrade --no-cache py3.12-pip && \ fc-cache -fv && \ [ -e /usr/bin/python3 ] || ln -s /usr/bin/$PYTHON /usr/bin/python3 @@ -24,6 +25,9 @@ ENV PATH="${PATH}:/home/notebook-user/.local/bin" ENV TESSDATA_PREFIX=/usr/local/share/tessdata ENV NLTK_DATA=/home/notebook-user/nltk_data +# Upgrade pip to fix CVE-2025-8869 +RUN $PIP install --no-cache-dir --user --upgrade "pip>=25.3" + # Install Python dependencies and download required NLTK packages RUN find requirements/ -type f -name "*.txt" ! -name "test.txt" ! -name "dev.txt" ! -name "constraints.txt" -exec $PIP install --no-cache-dir --user -r '{}' ';' && \ mkdir -p ${NLTK_DATA} && \ diff --git a/requirements/base.txt b/requirements/base.txt index 25a996dc54..ed63347f70 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -141,11 +141,11 @@ typing-inspect==0.9.0 # unstructured-client unstructured-client==0.25.9 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # -r ./base.in urllib3==2.5.0 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # requests # unstructured-client webencodings==0.5.1 diff --git a/requirements/dev.txt b/requirements/dev.txt index 8d4b60b5a1..35bba06dd8 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -10,8 +10,8 @@ cfgv==3.4.0 # via pre-commit click==8.3.0 # via - # -c ./base.txt - # -c ./test.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/test.txt # pip-tools distlib==0.4.0 # via virtualenv @@ -23,14 +23,14 @@ nodeenv==1.9.1 # via pre-commit packaging==25.0 # via - # -c ./base.txt - # -c ./test.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/test.txt # build pip-tools==7.5.1 # via -r ./dev.in platformdirs==4.5.0 # via - # -c ./test.txt + # -c /Users/luke/git/unstructured/requirements/test.txt # virtualenv pre-commit==4.3.0 # via -r ./dev.in @@ -42,13 +42,13 @@ pyyaml==6.0.3 # via pre-commit tomli==2.3.0 # via - # -c ./test.txt + # -c /Users/luke/git/unstructured/requirements/test.txt # build # pip-tools typing-extensions==4.15.0 # via - # -c ./base.txt - # -c ./test.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/test.txt # virtualenv virtualenv==20.35.4 # via pre-commit diff --git a/requirements/extra-csv.txt b/requirements/extra-csv.txt index 8f9cdd9c2a..50edfb7262 100644 --- a/requirements/extra-csv.txt +++ b/requirements/extra-csv.txt @@ -6,19 +6,19 @@ # numpy==2.2.6 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pandas pandas==2.3.3 # via -r ./extra-csv.in python-dateutil==2.9.0.post0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pandas pytz==2025.2 # via pandas six==1.17.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-dateutil tzdata==2025.2 # via pandas diff --git a/requirements/extra-docx.txt b/requirements/extra-docx.txt index 50bec141da..ad3bf969ac 100644 --- a/requirements/extra-docx.txt +++ b/requirements/extra-docx.txt @@ -6,11 +6,11 @@ # lxml==6.0.2 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-docx python-docx==1.2.0 # via -r ./extra-docx.in typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-docx diff --git a/requirements/extra-odt.txt b/requirements/extra-odt.txt index 3a91238dc2..ae6624f842 100644 --- a/requirements/extra-odt.txt +++ b/requirements/extra-odt.txt @@ -6,7 +6,7 @@ # lxml==6.0.2 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-docx pypandoc==1.15 # via -r ./extra-odt.in @@ -14,5 +14,5 @@ python-docx==1.2.0 # via -r ./extra-odt.in typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-docx diff --git a/requirements/extra-paddleocr.txt b/requirements/extra-paddleocr.txt index 7f320650c3..9383f0d515 100644 --- a/requirements/extra-paddleocr.txt +++ b/requirements/extra-paddleocr.txt @@ -14,27 +14,27 @@ annotated-types==0.7.0 # via pydantic anyio==4.11.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # httpx beautifulsoup4==4.14.2 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # unstructured-paddleocr certifi==2025.10.5 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # httpcore # httpx # requests charset-normalizer==3.4.4 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # requests cython==3.2.0 # via unstructured-paddleocr exceptiongroup==1.3.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # anyio fire==0.7.1 # via unstructured-paddleocr @@ -42,19 +42,19 @@ fonttools==4.60.1 # via unstructured-paddleocr h11==0.16.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # httpcore httpcore==1.0.9 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # httpx httpx==0.28.1 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # paddlepaddle idna==3.11 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # anyio # httpx # requests @@ -64,7 +64,7 @@ lazy-loader==0.4 # via scikit-image lxml==6.0.2 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-docx networkx==3.4.2 # via @@ -72,7 +72,7 @@ networkx==3.4.2 # scikit-image numpy==2.2.6 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # albucore # albumentations # imageio @@ -98,7 +98,7 @@ opt-einsum==3.3.0 # via paddlepaddle packaging==25.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # lazy-loader # scikit-image paddlepaddle==3.2.1 @@ -111,7 +111,7 @@ pillow==12.0.0 # unstructured-paddleocr protobuf==6.33.0 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # paddlepaddle pyclipper==1.3.0.post6 # via unstructured-paddleocr @@ -127,11 +127,11 @@ pyyaml==6.0.3 # unstructured-paddleocr rapidfuzz==3.14.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # unstructured-paddleocr requests==2.32.5 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # unstructured-paddleocr safetensors==0.6.2 # via paddlepaddle @@ -147,11 +147,11 @@ simsimd==6.5.3 # via albucore sniffio==1.3.1 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # anyio soupsieve==2.8 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # beautifulsoup4 stringzilla==4.2.3 # via albucore @@ -161,11 +161,11 @@ tifffile==2025.5.10 # via scikit-image tqdm==4.67.1 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # unstructured-paddleocr typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # anyio # beautifulsoup4 # exceptiongroup @@ -180,6 +180,6 @@ unstructured-paddleocr==2.10.0 # via -r ./extra-paddleocr.in urllib3==2.5.0 # via - # -c ./base.txt - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # requests diff --git a/requirements/extra-pdf-image.txt b/requirements/extra-pdf-image.txt index 8e4ab6e06f..dda97bbe72 100644 --- a/requirements/extra-pdf-image.txt +++ b/requirements/extra-pdf-image.txt @@ -12,15 +12,15 @@ cachetools==6.2.1 # via google-auth certifi==2025.10.5 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # requests cffi==2.0.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # cryptography charset-normalizer==3.4.4 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pdfminer-six # requests coloredlogs==15.0.1 @@ -29,7 +29,7 @@ contourpy==1.3.2 # via matplotlib cryptography==46.0.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pdfminer-six cycler==0.12.1 # via matplotlib @@ -82,7 +82,7 @@ humanfriendly==10.0 # via coloredlogs idna==3.11 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # requests jinja2==3.1.6 # via torch @@ -90,7 +90,7 @@ kiwisolver==1.4.9 # via matplotlib lxml==6.0.2 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pikepdf markupsafe==3.0.3 # via jinja2 @@ -104,7 +104,7 @@ networkx==3.4.2 # via torch numpy==2.2.6 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # accelerate # contourpy # matplotlib @@ -132,7 +132,7 @@ opencv-python==4.12.0.88 # via unstructured-inference packaging==25.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # accelerate # huggingface-hub # matplotlib @@ -146,7 +146,7 @@ pdf2image==1.17.0 # via -r ./extra-pdf-image.in pdfminer-six==20250327 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # -r ./extra-pdf-image.in # unstructured-inference pi-heif==1.1.1 @@ -167,7 +167,7 @@ proto-plus==1.26.1 # google-cloud-vision protobuf==6.33.0 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # google-api-core # google-cloud-vision # googleapis-common-protos @@ -177,7 +177,7 @@ protobuf==6.33.0 # proto-plus psutil==7.1.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # accelerate pyasn1==0.6.1 # via @@ -189,19 +189,19 @@ pycocotools==2.0.10 # via effdet pycparser==2.23 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # cffi pyparsing==3.2.5 # via matplotlib pypdf==6.1.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # -r ./extra-pdf-image.in pypdfium2==5.0.0 # via unstructured-inference python-dateutil==2.9.0.post0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # matplotlib # pandas python-multipart==0.0.20 @@ -217,15 +217,15 @@ pyyaml==6.0.3 # transformers rapidfuzz==3.14.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # unstructured-inference regex==2025.11.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # transformers requests==2.32.5 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # google-api-core # huggingface-hub # transformers @@ -240,7 +240,7 @@ scipy==1.15.3 # via unstructured-inference six==1.17.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-dateutil sympy==1.14.0 # via @@ -252,7 +252,7 @@ timm==1.0.22 # unstructured-inference tokenizers==0.21.4 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # transformers torch==2.9.0 # via @@ -267,14 +267,14 @@ torchvision==0.24.0 # timm tqdm==4.67.1 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # huggingface-hub # transformers transformers==4.55.4 # via unstructured-inference typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # cryptography # grpcio # huggingface-hub @@ -289,10 +289,10 @@ unstructured-pytesseract==0.3.15 # via -r ./extra-pdf-image.in urllib3==2.5.0 # via - # -c ./base.txt - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # requests wrapt==2.0.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # deprecated diff --git a/requirements/extra-xlsx.txt b/requirements/extra-xlsx.txt index 329f69770c..fbfe39b09e 100644 --- a/requirements/extra-xlsx.txt +++ b/requirements/extra-xlsx.txt @@ -6,11 +6,11 @@ # cffi==2.0.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # cryptography cryptography==46.0.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # msoffcrypto-tool et-xmlfile==2.0.0 # via openpyxl @@ -20,11 +20,11 @@ networkx==3.4.2 # via -r extra-xlsx.in numpy==2.2.6 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pandas olefile==0.47 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # msoffcrypto-tool openpyxl==3.1.5 # via -r extra-xlsx.in @@ -32,21 +32,21 @@ pandas==2.3.3 # via -r extra-xlsx.in pycparser==2.23 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # cffi python-dateutil==2.9.0.post0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pandas pytz==2025.2 # via pandas six==1.17.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-dateutil typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # cryptography tzdata==2025.2 # via pandas diff --git a/requirements/huggingface.txt b/requirements/huggingface.txt index ff5ab3f195..711cd6671f 100644 --- a/requirements/huggingface.txt +++ b/requirements/huggingface.txt @@ -6,15 +6,15 @@ # certifi==2025.10.5 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # requests charset-normalizer==3.4.4 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # requests click==8.3.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # sacremoses filelock==3.20.0 # via @@ -33,17 +33,17 @@ huggingface-hub==0.36.0 # transformers idna==3.11 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # requests jinja2==3.1.6 # via torch joblib==1.5.2 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # sacremoses langdetect==1.0.9 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # -r ./huggingface.in markupsafe==3.0.3 # via jinja2 @@ -53,11 +53,11 @@ networkx==3.4.2 # via torch numpy==2.2.6 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # transformers packaging==25.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # huggingface-hub # transformers pyyaml==6.0.3 @@ -66,12 +66,12 @@ pyyaml==6.0.3 # transformers regex==2025.11.3 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # sacremoses # transformers requests==2.32.5 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # huggingface-hub # transformers sacremoses==0.1.1 @@ -82,19 +82,19 @@ sentencepiece==0.2.1 # via -r ./huggingface.in six==1.17.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # langdetect sympy==1.14.0 # via torch tokenizers==0.21.4 # via - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # transformers torch==2.9.0 # via -r ./huggingface.in tqdm==4.67.1 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # huggingface-hub # sacremoses # transformers @@ -102,11 +102,11 @@ transformers==4.55.4 # via -r ./huggingface.in typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # huggingface-hub # torch urllib3==2.5.0 # via - # -c ./base.txt - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # requests diff --git a/requirements/ingest/ingest.txt b/requirements/ingest/ingest.txt index e817913f10..9623318c78 100644 --- a/requirements/ingest/ingest.txt +++ b/requirements/ingest/ingest.txt @@ -1,4 +1,4 @@ -unstructured-ingest[airtable, astradb, azure, azure-cognitive-search, bedrock, biomed, box, chroma, clarifai, confluence, couchbase, databricks-volumes, delta-table, discord, dropbox, elasticsearch, embed-huggingface, embed-octoai, embed-vertexai, embed-voyageai, gcs, github, gitlab, google-drive, hubspot, jira, kafka, kdbai, milvus, mongodb, notion, onedrive, openai, opensearch, outlook, pinecone, postgres, qdrant, reddit, remote, s3, salesforce, sftp, sharepoint, singlestore, slack, vectara, weaviate, wikipedia]>=0.2.1 +unstructured-ingest[airtable, astradb, azure, azure-cognitive-search, bedrock, biomed, box, chroma, confluence, couchbase, databricks-volumes, delta-table, discord, dropbox, elasticsearch, embed-huggingface, embed-octoai, embed-vertexai, embed-voyageai, gcs, github, gitlab, google-drive, hubspot, jira, kafka, kdbai, milvus, mongodb, notion, onedrive, openai, opensearch, outlook, pinecone, postgres, qdrant, reddit, remote, s3, salesforce, sftp, sharepoint, singlestore, slack, vectara, weaviate, wikipedia]>=0.2.1 s3fs>=2024.9.0 urllib3>=2.4.0 backoff>=2.2.1 diff --git a/requirements/test.txt b/requirements/test.txt index d28e3ae10f..328186bbd6 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -12,7 +12,7 @@ black==25.9.0 # via -r ./test.in click==8.3.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # black coverage[toml]==7.11.0 # via @@ -20,7 +20,7 @@ coverage[toml]==7.11.0 # pytest-cov exceptiongroup==1.3.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # pytest execnet==2.1.1 # via pytest-xdist @@ -44,12 +44,12 @@ mypy==1.18.2 # via -r ./test.in mypy-extensions==1.1.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # black # mypy packaging==25.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # black # pytest pathspec==0.12.1 @@ -89,7 +89,7 @@ pytest-xdist==3.8.0 # via -r ./test.in python-dateutil==2.9.0.post0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # freezegun pytokens==0.3.0 # via black @@ -99,7 +99,7 @@ semantic-version==2.10.0 # via liccheck six==1.17.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # python-dateutil toml==0.10.2 # via liccheck @@ -120,7 +120,7 @@ types-tabulate==0.9.0.20241207 # via -r ./test.in typing-extensions==4.15.0 # via - # -c ./base.txt + # -c /Users/luke/git/unstructured/requirements/base.txt # black # exceptiongroup # grpcio @@ -132,6 +132,6 @@ typing-inspection==0.4.2 # via pydantic urllib3==2.5.0 # via - # -c ./base.txt - # -c ./deps/constraints.txt + # -c /Users/luke/git/unstructured/requirements/base.txt + # -c /Users/luke/git/unstructured/requirements/deps/constraints.txt # types-requests diff --git a/test_unstructured_ingest/test-ingest-dest.sh b/test_unstructured_ingest/test-ingest-dest.sh index 380bcf9d63..4279abc0d7 100755 --- a/test_unstructured_ingest/test-ingest-dest.sh +++ b/test_unstructured_ingest/test-ingest-dest.sh @@ -20,7 +20,6 @@ all_tests=( 'azure-cognitive-search.sh' 'box.sh' 'chroma.sh' - 'clarifai.sh' 'delta-table.sh' 'dropbox.sh' 'elasticsearch.sh' diff --git a/unstructured/__version__.py b/unstructured/__version__.py index 08c0547590..7930d957b8 100644 --- a/unstructured/__version__.py +++ b/unstructured/__version__.py @@ -1 +1 @@ -__version__ = "0.18.17-dev0" # pragma: no cover +__version__ = "0.18.18-dev0" # pragma: no cover From f6c18c38b62074e3c4db5ec526deca8aab8306ed Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Fri, 7 Nov 2025 12:22:25 -0600 Subject: [PATCH 15/17] omit images --- unstructured/partition/pdf_image/pdfminer_processing.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index 7f228c3e94..dab82c6f09 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -435,7 +435,7 @@ def process_page_layout_from_pdfminer( element_class_ids=np.array(element_class), element_class_id_map={0: ElementType.UNCATEGORIZED_TEXT, 1: ElementType.IMAGE}, sources=np.array([Source.PDFMINER] * len(element_class)), - is_extracted_array=np.array([IsExtracted.TRUE] * len(element_class)), + is_extracted_array=np.array([IsExtracted.TRUE if (this_class == 0) else None for this_class in element_class]), ), urls_metadata, ) @@ -666,7 +666,8 @@ def merge_inferred_with_extracted_layout( target_region=merged_layout.slice([i]), source_regions=extracted_page_layout, ) - merged_layout.is_extracted_array[i] = IsExtracted.TRUE + if merged_layout.element_class_id_map[merged_layout.element_class_ids[i]] not in ("Image", "Picture"): + merged_layout.is_extracted_array[i] = IsExtracted.TRUE merged_layout.texts[i] = remove_control_characters(text) inferred_page.elements_array = merged_layout From 59b8130e280c02796594791d107e32d765341849 Mon Sep 17 00:00:00 2001 From: Alan Bertl Date: Fri, 7 Nov 2025 12:22:51 -0600 Subject: [PATCH 16/17] format --- unstructured/partition/pdf_image/pdfminer_processing.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/unstructured/partition/pdf_image/pdfminer_processing.py b/unstructured/partition/pdf_image/pdfminer_processing.py index dab82c6f09..65ecb1d69e 100644 --- a/unstructured/partition/pdf_image/pdfminer_processing.py +++ b/unstructured/partition/pdf_image/pdfminer_processing.py @@ -435,7 +435,9 @@ def process_page_layout_from_pdfminer( element_class_ids=np.array(element_class), element_class_id_map={0: ElementType.UNCATEGORIZED_TEXT, 1: ElementType.IMAGE}, sources=np.array([Source.PDFMINER] * len(element_class)), - is_extracted_array=np.array([IsExtracted.TRUE if (this_class == 0) else None for this_class in element_class]), + is_extracted_array=np.array( + [IsExtracted.TRUE if (this_class == 0) else None for this_class in element_class] + ), ), urls_metadata, ) @@ -666,7 +668,10 @@ def merge_inferred_with_extracted_layout( target_region=merged_layout.slice([i]), source_regions=extracted_page_layout, ) - if merged_layout.element_class_id_map[merged_layout.element_class_ids[i]] not in ("Image", "Picture"): + if merged_layout.element_class_id_map[merged_layout.element_class_ids[i]] not in ( + "Image", + "Picture", + ): merged_layout.is_extracted_array[i] = IsExtracted.TRUE merged_layout.texts[i] = remove_control_characters(text) From 58f9e4bb4cac8e3dc3615005581b17d54d76cdb1 Mon Sep 17 00:00:00 2001 From: ryannikolaidis <1208590+ryannikolaidis@users.noreply.github.com> Date: Fri, 7 Nov 2025 10:43:24 -0800 Subject: [PATCH 17/17] feat: track text source <- Ingest test fixtures update (#4118) This pull request includes updated ingest test fixtures. Please review and merge if appropriate. Co-authored-by: qued --- .../layout-parser-paper.pdf.json | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json b/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json index 8f147a6b38..27503a46a8 100644 --- a/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json +++ b/test_unstructured_ingest/expected-structured-output/local-single-file-with-pdf-infer-table-structure/layout-parser-paper.pdf.json @@ -1244,7 +1244,6 @@ "element_id": "285d83f3098b26302329b33637fd265f", "text": "Efficient Data Annotation Model Customization Document Images Community Platform ‘A >) ¥ DIA Model Hub a Customized Model Training] == | Layout Detection Models | ——= DIA Pipeline Sharing ~ OCR Module = { Layout Data stuctue ) = (store Visualization LY", "metadata": { - "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -1827,7 +1826,6 @@ "element_id": "fd2288e4e3cf90f109d1c1198cea4ca0", "text": "3 a a 4 a 3 Rectangle vada 4 8 4 iS v 2 [S) af : fa & o a 6 ‘ g 4 Coordinate g 2 8 3 + 4 * v 8 Extra features =| 9%) | Hock) Reading é ret | | Type | | order 2 & a ¢ @ [ coordinatel textblock1 , 8 » , ee 3 , textblock2 , layoutl ] 8 q ® A list of the layout elements ‘sf", "metadata": { - "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2445,7 +2443,6 @@ "element_id": "02a078081972f7bdb26f06a787773a30", "text": "0g Burpunog uayor Aeydsiq:1 vondo 10g Guypunog usyou ap:z uondo Mode I: Showing Layout on the Original Image Mode Il: Drawing OCR’ Text at the Correspoding Position", "metadata": { - "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2602,7 +2599,6 @@ "element_id": "747f46c43a88768fd543e10bac84203b", "text": "Intra-column reading order Token Categories tie (Adress tee Ewumber Variable Column reading order HEE company ype Column Categories (J tite Adress 1] ree [7] Section Header Maximum Allowed Height (b) Illustration of the recreated document with dense text structure for better OCR performance", "metadata": { - "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -2927,7 +2923,6 @@ "element_id": "2b90153124fb6f9e9f5539b9db75d240", "text": "———————_+ (| ‘Active Learning Layout Annotate Layout Dataset | + ‘Annotation Toolkit ¥ alae Deep Leaming Layout Model Training & Inference, ¥ ; Handy Data Structures & Post-processing Ee apis for Layout Dat a Ae ror yon Oats 4 Text Recognition | <—— Default ane Customized ¥ ee Layout Structure Visualization & Export | <—— | visualization & Storage The Japanese Document Helpful LayoutParser Digitization Pipeline Modules", "metadata": { - "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng" @@ -3239,7 +3234,6 @@ "element_id": "1359eaa601a24c083e143b8bf5114127", "text": "(@) Partial table at the bottom (6) Full page table (©) Partial table at the top (@) Mis-detected text line", "metadata": { - "is_extracted": "true", "filetype": "application/pdf", "languages": [ "eng"