Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions src/transformers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@
],
"hf_argparser": ["HfArgumentParser"],
"hyperparameter_search": [],
"image_processing_utils_fast": [],
"image_transforms": [],
"integrations": [
"is_clearml_available",
Expand Down Expand Up @@ -798,13 +799,15 @@
extra_objects={"__version__": __version__},
)

def _create_tokenization_alias(alias: str, target: str) -> None:
def _create_module_alias(alias: str, target: str) -> None:
"""
Lazily redirect legacy tokenization module paths to their replacements without importing heavy deps.
Lazily redirect legacy module paths to their replacements without importing heavy deps.
"""

module = types.ModuleType(alias)
module.__doc__ = f"Alias module for backward compatibility with `{target}`."
# Set __file__ explicitly so that inspect.py's hasattr(module, '__file__') check
# never falls through to __getattr__ and triggers a premature (possibly circular) import.
module.__file__ = None

def _get_target():
return importlib.import_module(target, __name__)
Expand All @@ -815,8 +818,9 @@ def _get_target():
sys.modules[alias] = module
setattr(sys.modules[__name__], alias.rsplit(".", 1)[-1], module)

_create_tokenization_alias(f"{__name__}.tokenization_utils_fast", ".tokenization_utils_tokenizers")
_create_tokenization_alias(f"{__name__}.tokenization_utils", ".tokenization_utils_sentencepiece")
_create_module_alias(f"{__name__}.tokenization_utils_fast", ".tokenization_utils_tokenizers")
_create_module_alias(f"{__name__}.tokenization_utils", ".tokenization_utils_sentencepiece")
_create_module_alias(f"{__name__}.image_processing_utils_fast", ".image_processing_backends")


if not is_torch_available():
Expand Down
5 changes: 5 additions & 0 deletions src/transformers/image_processing_backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
)
from .image_transforms import (
convert_to_rgb,
divide_to_patches, # noqa: F401 - re-exported for backward compat with image_processing_utils_fast
get_resize_output_image_size,
get_size_with_aspect_ratio,
group_images_by_shape,
Expand Down Expand Up @@ -664,3 +665,7 @@ def to_dict(self) -> dict[str, Any]:
if processor_dict.get("image_processor_type", "").endswith("Pil"):
processor_dict["image_processor_type"] = processor_dict["image_processor_type"][:-3]
return processor_dict


# Backward-compatible alias: allow referring to TorchvisionBackend as BaseImageProcessorFast
BaseImageProcessorFast = TorchvisionBackend
Loading