Skip to content

Commit

Permalink
fix: refine conversion result
Browse files Browse the repository at this point in the history
Signed-off-by: Panos Vagenas <[email protected]>
  • Loading branch information
vagenas committed Aug 27, 2024
1 parent fe817b1 commit 6b2a3bc
Show file tree
Hide file tree
Showing 8 changed files with 95 additions and 89 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,10 +49,10 @@ To convert invidual PDF documents, use `convert_single()`, for example:
```python
from docling.document_converter import DocumentConverter

source = "https://arxiv.org/pdf/2206.01062" # PDF path or URL
source = "https://arxiv.org/pdf/2408.09869" # PDF path or URL
converter = DocumentConverter()
doc = converter.convert_single(source)
print(doc.render_as_markdown()) # output: "## DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis [...]"
result = converter.convert_single(source)
print(result.render_as_markdown()) # output: "## Docling Technical Report[...]"
```

### Convert a batch of documents
Expand Down Expand Up @@ -118,7 +118,7 @@ You can convert PDFs from a binary stream instead of from the filesystem as foll
buf = BytesIO(your_binary_stream)
docs = [DocumentStream(filename="my_doc.pdf", stream=buf)]
conv_input = DocumentConversionInput.from_streams(docs)
converted_docs = doc_converter.convert(conv_input)
results = doc_converter.convert(conv_input)
```
### Limit resource usage

Expand Down
6 changes: 3 additions & 3 deletions docling/datamodel/base_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,9 +247,9 @@ class PagePredictions(BaseModel):


class AssembledUnit(BaseModel):
elements: List[PageElement]
body: List[PageElement]
headers: List[PageElement]
elements: List[PageElement] = []
body: List[PageElement] = []
headers: List[PageElement] = []


class Page(BaseModel):
Expand Down
29 changes: 19 additions & 10 deletions docling/datamodel/document.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from docling_core.types import Table as DsSchemaTable
from docling_core.types import TableCell
from pydantic import BaseModel
from typing_extensions import deprecated

from docling.backend.abstract_backend import PdfDocumentBackend
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
Expand Down Expand Up @@ -49,6 +50,15 @@
"Text": "paragraph",
}

_EMPTY_DOC = DsDocument(
_name="",
description=DsDocumentDescription(logs=[]),
file_info=DsFileInfoObject(
filename="",
document_hash="",
),
)


class InputDocument(BaseModel):
file: PurePath = None
Expand Down Expand Up @@ -115,16 +125,17 @@ def __init__(
# raise


@deprecated("Use `ConversionResult` instead.")
class ConvertedDocument(BaseModel):
input: InputDocument

status: ConversionStatus = ConversionStatus.PENDING # failure, success
errors: List[ErrorItem] = [] # structure to keep errors

pages: List[Page] = []
assembled: Optional[AssembledUnit] = None
assembled: AssembledUnit = AssembledUnit()

output: Optional[DsDocument] = None
output: DsDocument = _EMPTY_DOC

def to_ds_document(self) -> DsDocument:
title = ""
Expand Down Expand Up @@ -297,16 +308,10 @@ def make_spans(cell):
return ds_doc

def render_as_dict(self):
if self.output:
return self.output.model_dump(by_alias=True, exclude_none=True)
else:
return {}
return self.output.model_dump(by_alias=True, exclude_none=True)

def render_as_markdown(self):
if self.output:
return self.output.export_to_markdown()
else:
return ""
return self.output.export_to_markdown()

def render_element_images(
self, element_types: Tuple[PageElement] = (FigureElement,)
Expand All @@ -323,6 +328,10 @@ def render_element_images(
yield element, cropped_im


class ConversionResult(ConvertedDocument):
pass


class DocumentConversionInput(BaseModel):

_path_or_stream_iterator: Iterable[Union[Path, DocumentStream]] = None
Expand Down
69 changes: 33 additions & 36 deletions docling/document_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
from typing import Iterable, Optional, Type, Union

import requests
from docling_core.types import Document
from PIL import ImageDraw
from pydantic import AnyHttpUrl, TypeAdapter, ValidationError

Expand All @@ -22,7 +21,7 @@
PipelineOptions,
)
from docling.datamodel.document import (
ConvertedDocument,
ConversionResult,
DocumentConversionInput,
InputDocument,
)
Expand Down Expand Up @@ -73,7 +72,7 @@ def download_models_hf(

return Path(download_path)

def convert(self, input: DocumentConversionInput) -> Iterable[ConvertedDocument]:
def convert(self, input: DocumentConversionInput) -> Iterable[ConversionResult]:

for input_batch in chunkify(
input.docs(pdf_backend=self.pdf_backend), settings.perf.doc_batch_size
Expand All @@ -86,9 +85,9 @@ def convert(self, input: DocumentConversionInput) -> Iterable[ConvertedDocument]
# yield from pool.map(self.process_document, input_batch)

# Note: Pdfium backend is not thread-safe, thread pool usage was disabled.
yield from map(self.process_document, input_batch)
yield from map(self._process_document, input_batch)

def convert_single(self, source: Path | AnyHttpUrl | str) -> ConvertedDocument:
def convert_single(self, source: Path | AnyHttpUrl | str) -> ConversionResult:
"""Convert a single document.
Args:
Expand All @@ -99,7 +98,7 @@ def convert_single(self, source: Path | AnyHttpUrl | str) -> ConvertedDocument:
RuntimeError: If conversion fails.
Returns:
Document: The converted document object.
ConversionResult: The conversion result object.
"""
with tempfile.TemporaryDirectory() as temp_dir:
try:
Expand Down Expand Up @@ -129,51 +128,49 @@ def convert_single(self, source: Path | AnyHttpUrl | str) -> ConvertedDocument:
f"Unexpected file path type encountered: {type(source)}"
)
conv_inp = DocumentConversionInput.from_paths(paths=[local_path])
converted_docs_iter = self.convert(conv_inp)
converted_doc: ConvertedDocument = next(converted_docs_iter)
if converted_doc.status not in {
conv_res_iter = self.convert(conv_inp)
conv_res: ConversionResult = next(conv_res_iter)
if conv_res.status not in {
ConversionStatus.SUCCESS,
ConversionStatus.PARTIAL_SUCCESS,
}:
raise RuntimeError(f"Conversion failed with status: {converted_doc.status}")
return converted_doc
raise RuntimeError(f"Conversion failed with status: {conv_res.status}")
return conv_res

def process_document(self, in_doc: InputDocument) -> ConvertedDocument:
def _process_document(self, in_doc: InputDocument) -> ConversionResult:
start_doc_time = time.time()
converted_doc = ConvertedDocument(input=in_doc)
conv_res = ConversionResult(input=in_doc)

_log.info(f"Processing document {in_doc.file.name}")

if not in_doc.valid:
converted_doc.status = ConversionStatus.FAILURE
return converted_doc
conv_res.status = ConversionStatus.FAILURE
return conv_res

for i in range(0, in_doc.page_count):
converted_doc.pages.append(Page(page_no=i))
conv_res.pages.append(Page(page_no=i))

all_assembled_pages = []

try:
# Iterate batches of pages (page_batch_size) in the doc
for page_batch in chunkify(
converted_doc.pages, settings.perf.page_batch_size
):
for page_batch in chunkify(conv_res.pages, settings.perf.page_batch_size):
start_pb_time = time.time()
# Pipeline

# 1. Initialise the page resources
init_pages = map(
functools.partial(self.initialize_page, in_doc), page_batch
functools.partial(self._initialize_page, in_doc), page_batch
)

# 2. Populate page image
pages_with_images = map(
functools.partial(self.populate_page_images, in_doc), init_pages
functools.partial(self._populate_page_images, in_doc), init_pages
)

# 3. Populate programmatic page cells
pages_with_cells = map(
functools.partial(self.parse_page_cells, in_doc),
functools.partial(self._parse_page_cells, in_doc),
pages_with_images,
)

Expand Down Expand Up @@ -202,13 +199,13 @@ def process_document(self, in_doc: InputDocument) -> ConvertedDocument:
# Free up mem resources of PDF backend
in_doc._backend.unload()

converted_doc.pages = all_assembled_pages
self.assemble_doc(converted_doc)
conv_res.pages = all_assembled_pages
self._assemble_doc(conv_res)

status = ConversionStatus.SUCCESS
for page in converted_doc.pages:
for page in conv_res.pages:
if not page._backend.is_valid():
converted_doc.errors.append(
conv_res.errors.append(
ErrorItem(
component_type=DoclingComponentType.PDF_BACKEND,
module_name=type(page._backend).__name__,
Expand All @@ -217,10 +214,10 @@ def process_document(self, in_doc: InputDocument) -> ConvertedDocument:
)
status = ConversionStatus.PARTIAL_SUCCESS

converted_doc.status = status
conv_res.status = status

except Exception as e:
converted_doc.status = ConversionStatus.FAILURE
conv_res.status = ConversionStatus.FAILURE
trace = "\n".join(traceback.format_exception(e))
_log.info(
f"Encountered an error during conversion of document {in_doc.document_hash}:\n"
Expand All @@ -232,18 +229,18 @@ def process_document(self, in_doc: InputDocument) -> ConvertedDocument:
f"Finished converting document time-pages={end_doc_time:.2f}/{in_doc.page_count}"
)

return converted_doc
return conv_res

# Initialise and load resources for a page, before downstream steps (populate images, cells, ...)
def initialize_page(self, doc: InputDocument, page: Page) -> Page:
def _initialize_page(self, doc: InputDocument, page: Page) -> Page:
page._backend = doc._backend.load_page(page.page_no)
page.size = page._backend.get_size()
page.page_hash = create_hash(doc.document_hash + ":" + str(page.page_no))

return page

# Generate the page image and store it in the page object
def populate_page_images(self, doc: InputDocument, page: Page) -> Page:
def _populate_page_images(self, doc: InputDocument, page: Page) -> Page:
# default scale
page.get_image(
scale=1.0
Expand All @@ -259,7 +256,7 @@ def populate_page_images(self, doc: InputDocument, page: Page) -> Page:
return page

# Extract and populate the page cells and store it in the page object
def parse_page_cells(self, doc: InputDocument, page: Page) -> Page:
def _parse_page_cells(self, doc: InputDocument, page: Page) -> Page:
page.cells = page._backend.get_text_cells()

# DEBUG code:
Expand All @@ -274,12 +271,12 @@ def draw_text_boxes(image, cells):

return page

def assemble_doc(self, converted_doc: ConvertedDocument):
def _assemble_doc(self, conv_res: ConversionResult):
all_elements = []
all_headers = []
all_body = []

for p in converted_doc.pages:
for p in conv_res.pages:

for el in p.assembled.body:
all_body.append(el)
Expand All @@ -288,8 +285,8 @@ def assemble_doc(self, converted_doc: ConvertedDocument):
for el in p.assembled.elements:
all_elements.append(el)

converted_doc.assembled = AssembledUnit(
conv_res.assembled = AssembledUnit(
elements=all_elements, headers=all_headers, body=all_body
)

converted_doc.output = self.glm_model(converted_doc)
conv_res.output = self.glm_model(conv_res)
10 changes: 5 additions & 5 deletions docling/models/ds_glm_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from PIL import ImageDraw

from docling.datamodel.base_models import BoundingBox, Cluster, CoordOrigin
from docling.datamodel.document import ConvertedDocument
from docling.datamodel.document import ConversionResult


class GlmModel:
Expand All @@ -20,8 +20,8 @@ def __init__(self, config):
model = init_nlp_model(model_names="language;term;reference")
self.model = model

def __call__(self, document: ConvertedDocument) -> DsDocument:
ds_doc = document.to_ds_document()
def __call__(self, conv_res: ConversionResult) -> DsDocument:
ds_doc = conv_res.to_ds_document()
ds_doc_dict = ds_doc.model_dump(by_alias=True)

glm_doc = self.model.apply_on_doc(ds_doc_dict)
Expand All @@ -34,7 +34,7 @@ def __call__(self, document: ConvertedDocument) -> DsDocument:
# DEBUG code:
def draw_clusters_and_cells(ds_document, page_no):
clusters_to_draw = []
image = copy.deepcopy(document.pages[page_no].image)
image = copy.deepcopy(conv_res.pages[page_no].image)
for ix, elem in enumerate(ds_document.main_text):
if isinstance(elem, BaseText):
prov = elem.prov[0]
Expand All @@ -56,7 +56,7 @@ def draw_clusters_and_cells(ds_document, page_no):
bbox=BoundingBox.from_tuple(
coord=prov.bbox,
origin=CoordOrigin.BOTTOMLEFT,
).to_top_left_origin(document.pages[page_no].size.height),
).to_top_left_origin(conv_res.pages[page_no].size.height),
)
)

Expand Down
Loading

0 comments on commit 6b2a3bc

Please sign in to comment.