Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 1 addition & 45 deletions keras_nlp/models/bert/bert_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,13 @@
"""BERT tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.bert.bert_presets import backbone_presets
from keras_nlp.models.bert.bert_presets import classifier_presets
from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring

PRESET_NAMES = ", ".join(list(backbone_presets) + list(classifier_presets))

Expand Down Expand Up @@ -113,51 +111,9 @@ def __init__(
def presets(cls):
return copy.deepcopy({**backbone_presets, **classifier_presets})

@classmethod
@format_docstring(names=PRESET_NAMES)
def from_preset(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After following the changes below re docstrings and __init__subclass__ you should be able to remove the from_preset method here and elsewhere entirely!

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@shivance, we don't need from_preset in subclasses anymore! See, for example, BertPreprocessor

cls,
preset,
**kwargs,
):
"""Instantiate a BERT tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.BertTokenizer.from_preset(
"bert_base_en_uncased",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.txt",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
},
)

return cls.from_config({**config, **kwargs})
return super().from_preset(cls, preset, **kwargs)
45 changes: 1 addition & 44 deletions keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,12 @@
"""DeBERTa tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.deberta_v3.deberta_v3_presets import backbone_presets
from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -91,50 +89,9 @@ def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a DeBERTa tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.DebertaV3Tokenizer.from_preset(
"deberta_v3_base_en",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

spm_proto = keras.utils.get_file(
"vocab.spm",
metadata["spm_proto_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["spm_proto_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"proto": spm_proto,
},
)

return cls.from_config({**config, **kwargs})
return super().from_preset(cls, preset, **kwargs)
45 changes: 1 addition & 44 deletions keras_nlp/models/distil_bert/distil_bert_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
"""DistilBERT tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.distil_bert.distil_bert_presets import backbone_presets
from keras_nlp.tokenizers.word_piece_tokenizer import WordPieceTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -111,50 +109,9 @@ def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a DistilBERT tokenizer from preset vocabulary.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.DistilBertTokenizer.from_preset(
"distil_bert_base_en_uncased",
)

# Tokenize some input.
tokenizer("The quick brown fox tripped.")

# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""
if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.txt",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
},
)

return cls.from_config({**config, **kwargs})
return super().from_preset(cls, preset, **kwargs)
51 changes: 1 addition & 50 deletions keras_nlp/models/gpt2/gpt2_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
"""GPT-2 preprocessing layers."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.gpt2.gpt2_presets import backbone_presets
from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -120,56 +118,9 @@ def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a GPT-2 tokenizer from preset vocabulary and merge rules.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.GPT2Tokenizer.from_preset(
"gpt2_base_en",
)
# Tokenize some input.
tokenizer("The quick brown fox tripped.")
# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""

if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.json",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)
merges = keras.utils.get_file(
"merges.txt",
metadata["merges_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["merges_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
"merges": merges,
},
)

return cls.from_config({**config, **kwargs})
return super().from_preset(cls, preset, **kwargs)
51 changes: 1 addition & 50 deletions keras_nlp/models/roberta/roberta_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,12 @@
"""RoBERTa tokenizer."""

import copy
import os

from tensorflow import keras

from keras_nlp.models.roberta.roberta_presets import backbone_presets
from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer
from keras_nlp.utils.python_utils import classproperty
from keras_nlp.utils.python_utils import format_docstring


@keras.utils.register_keras_serializable(package="keras_nlp")
Expand Down Expand Up @@ -128,56 +126,9 @@ def presets(cls):
return copy.deepcopy(backbone_presets)

@classmethod
@format_docstring(names=", ".join(backbone_presets))
def from_preset(
cls,
preset,
**kwargs,
):
"""Instantiate a RoBERTa tokenizer from preset vocabulary and merge rules.

Args:
preset: string. Must be one of {{names}}.

Examples:
```python
# Load a preset tokenizer.
tokenizer = keras_nlp.models.RobertaTokenizer.from_preset(
"roberta_base_en",
)
# Tokenize some input.
tokenizer("The quick brown fox tripped.")
# Detokenize some input.
tokenizer.detokenize([5, 6, 7, 8, 9])
```
"""

if preset not in cls.presets:
raise ValueError(
"`preset` must be one of "
f"""{", ".join(cls.presets)}. Received: {preset}."""
)
metadata = cls.presets[preset]

vocabulary = keras.utils.get_file(
"vocab.json",
metadata["vocabulary_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["vocabulary_hash"],
)
merges = keras.utils.get_file(
"merges.txt",
metadata["merges_url"],
cache_subdir=os.path.join("models", preset),
file_hash=metadata["merges_hash"],
)

config = metadata["preprocessor_config"]
config.update(
{
"vocabulary": vocabulary,
"merges": merges,
},
)

return cls.from_config({**config, **kwargs})
return super().from_preset(cls, preset, **kwargs)
Loading