diff --git a/src/transformers/tokenization_marian.py b/src/transformers/tokenization_marian.py index e1ce86ca020b..712303cb15f9 100644 --- a/src/transformers/tokenization_marian.py +++ b/src/transformers/tokenization_marian.py @@ -7,7 +7,7 @@ import sentencepiece -from .file_utils import add_start_docstrings_to_callable +from .file_utils import add_start_docstrings from .tokenization_utils import BatchEncoding, PreTrainedTokenizer from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING @@ -125,7 +125,7 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> Lis # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] - @add_start_docstrings_to_callable(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) + @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], diff --git a/src/transformers/tokenization_mbart.py b/src/transformers/tokenization_mbart.py index a5c72576aa26..eed9f0bf71b3 100644 --- a/src/transformers/tokenization_mbart.py +++ b/src/transformers/tokenization_mbart.py @@ -15,7 +15,7 @@ from typing import List, Optional -from .file_utils import add_start_docstrings_to_callable +from .file_utils import add_start_docstrings from .tokenization_utils import BatchEncoding from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from .tokenization_xlm_roberta import XLMRobertaTokenizer @@ -156,7 +156,7 @@ def build_inputs_with_special_tokens( # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens - @add_start_docstrings_to_callable(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) + @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], diff --git a/src/transformers/tokenization_pegasus.py b/src/transformers/tokenization_pegasus.py index 384a70c63c9a..67ae882d8c90 100644 --- a/src/transformers/tokenization_pegasus.py +++ b/src/transformers/tokenization_pegasus.py @@ -16,7 +16,7 @@ from transformers.tokenization_reformer import ReformerTokenizer -from .file_utils import add_start_docstrings_to_callable +from .file_utils import add_start_docstrings from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING, BatchEncoding @@ -104,7 +104,7 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None) -> Lis # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_0 + token_ids_1 + [self.eos_token_id] - @add_start_docstrings_to_callable(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) + @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str],