diff --git a/docs/source/main_classes/pipelines.mdx b/docs/source/main_classes/pipelines.mdx index 5b2509c70d50..6f5b5b747065 100644 --- a/docs/source/main_classes/pipelines.mdx +++ b/docs/source/main_classes/pipelines.mdx @@ -78,7 +78,7 @@ GPU. If it doesn't don't hesitate to create an issue. ```python import datasets from transformers import pipeline -from transformers.pipelines.base import KeyDataset +from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) @@ -128,7 +128,7 @@ whenever the pipeline uses its streaming ability (so when passing lists or `Data ```python from transformers import pipeline -from transformers.pipelines.base import KeyDataset +from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") diff --git a/docs/source/quicktour.mdx b/docs/source/quicktour.mdx index 867170656ecd..1c4077c54999 100644 --- a/docs/source/quicktour.mdx +++ b/docs/source/quicktour.mdx @@ -118,7 +118,7 @@ Next, load a dataset (see the 🤗 Datasets [Quick Start](https://huggingface.co Now you can iterate over the dataset with the pipeline. `KeyDataset` retrieves the item in the dictionary returned by the dataset: ```py ->>> from transformers.pipelines.base import KeyDataset +>>> from transformers.pipelines.pt_utils import KeyDataset >>> from tqdm.auto import tqdm >>> for out in tqdm(speech_recognizer(KeyDataset(dataset, "file"))): @@ -316,4 +316,4 @@ One particularly cool 🤗 Transformers feature is the ability to save a model a >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) -``` \ No newline at end of file +``` diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 5445b718e344..fbfe56375d86 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -49,6 +49,9 @@ from torch.utils.data import DataLoader, Dataset from ..models.auto.modeling_auto import AutoModel + + # Re-export for backward compatibility + from .pt_utils import KeyDataset else: Dataset = None KeyDataset = None