Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 0 additions & 51 deletions keras_nlp/models/albert/albert_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,12 @@
"""ALBERT tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.albert.albert_presets import backbone_presets
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -89,52 +87,3 @@ def __init__(self, proto, **kwargs):
@classproperty
Copy link
Member

@mattdangerw mattdangerw Jan 23, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We will need some changes to the class level docstrings for our model specific tokenizers, we should document the from preset usage front and center in our code examples above. But I think that would best be done as a follow up anyway, just opened #688

def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate an ALBERT tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.AlbertTokenizer.from_preset(
"albert_base_en_uncased",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

spm_proto = keras.utils.get_file(
"vocab.spm",
metadata["spm_proto_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["spm_proto_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"proto": spm_proto,
},
)

return cls.from_config({**config, **kwargs})
51 changes: 0 additions & 51 deletions keras_nlp/models/bert/bert_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,13 @@
"""BERT tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.bert.bert_presets import backbone_presets
from keras_nlp.models.bert.bert_presets import classifier_presets
from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring

PRESET_NAMES = ", ".join(list(backbone_presets) + list(classifier_presets))

Expand Down Expand Up @@ -112,52 +110,3 @@ def __init__(
@classproperty
def presets(cls):
return copy.deepcopy({**backbone_presets, **classifier_presets})

@classmethod
@format_docstring(names=PRESET_NAMES)
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a BERT tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.BertTokenizer.from_preset(
"bert_base_en_uncased",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.txt",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
},
)

return cls.from_config({**config, **kwargs})
51 changes: 0 additions & 51 deletions keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,12 @@
"""DeBERTa tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -89,52 +87,3 @@ def __init__(self, proto, **kwargs):
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a DeBERTa tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.DebertaV3Tokenizer.from_preset(
"deberta_v3_base_en",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

spm_proto = keras.utils.get_file(
"vocab.spm",
metadata["spm_proto_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["spm_proto_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"proto": spm_proto,
},
)

return cls.from_config({**config, **kwargs})
51 changes: 0 additions & 51 deletions keras_nlp/models/distil_bert/distil_bert_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
"""DistilBERT tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.distil_bert.distil_bert_presets import backbone_presets
from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -109,52 +107,3 @@ def __init__(
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a DistilBERT tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.DistilBertTokenizer.from_preset(
"distil_bert_base_en_uncased",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.txt",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
},
)

return cls.from_config({**config, **kwargs})
57 changes: 0 additions & 57 deletions keras_nlp/models/gpt2/gpt2_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
"""GPT-2 preprocessing layers."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.gpt2.gpt2_presets import backbone_presets
from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -118,58 +116,3 @@ def __init__(
@classproperty
def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a GPT-2 tokenizer from preset vocabulary and merge rules.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.GPT2Tokenizer.from_preset(
"gpt2_base_en",
)
# Tokenize some input.
tokenizer("The quick brown fox tripped.")
# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""

if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.json",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)
merges = keras.utils.get_file(
"merges.txt",
metadata["merges_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["merges_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
"merges": merges,
},
)

return cls.from_config({**config, **kwargs})
Loading