diff --git a/keras_nlp/models/bert/bert_backbone.py b/keras_nlp/models/bert/bert_backbone.py index 4adfcf21d9..06f2a15b09 100644 --- a/keras_nlp/models/bert/bert_backbone.py +++ b/keras_nlp/models/bert/bert_backbone.py @@ -33,7 +33,7 @@ def bert_kernel_initializer(stddev=0.02): @keras_nlp_export("keras_nlp.models.BertBackbone") class BertBackbone(Backbone): - """BERT encoder network. + """A BERT encoder network. This class implements a bi-directional Transformer-based encoder as described in ["BERT: Pre-training of Deep Bidirectional Transformers for @@ -41,9 +41,9 @@ class BertBackbone(Backbone): embedding lookups and transformer layers, but not the masked language model or next sentence prediction heads. - The default constructor gives a fully customizable, randomly initialized BERT - encoder with any number of layers, heads, and embedding dimensions. To load - preset architectures and weights, use the `from_preset` constructor. + The default constructor gives a fully customizable, randomly initialized + BERT encoder with any number of layers, heads, and embedding dimensions. To + load preset architectures and weights, use the `from_preset()` constructor. Disclaimer: Pre-trained models are provided on an "as is" basis, without warranties or conditions of any kind. @@ -76,20 +76,20 @@ class BertBackbone(Backbone): ), } - # Pretrained BERT encoder + # Pretrained BERT encoder. model = keras_nlp.models.BertBackbone.from_preset("bert_base_en_uncased") - output = model(input_data) + model(input_data) - # Randomly initialized BERT encoder with a custom config + # Randomly initialized BERT encoder with a custom config. model = keras_nlp.models.BertBackbone( vocabulary_size=30552, - num_layers=12, - num_heads=12, - hidden_dim=768, - intermediate_dim=3072, - max_sequence_length=12, + num_layers=4, + num_heads=4, + hidden_dim=256, + intermediate_dim=512, + max_sequence_length=128, ) - output = model(input_data) + model(input_data) ``` """ diff --git a/keras_nlp/models/bert/bert_classifier.py b/keras_nlp/models/bert/bert_classifier.py index 5528b4e2c3..a47fab4c6e 100644 --- a/keras_nlp/models/bert/bert_classifier.py +++ b/keras_nlp/models/bert/bert_classifier.py @@ -30,12 +30,12 @@ @keras_nlp_export("keras_nlp.models.BertClassifier") class BertClassifier(Task): - """An end-to-end BERT model for classification tasks + """An end-to-end BERT model for classification tasks. - This model attaches a classification head to a `keras_nlp.model.BertBackbone` - backbone, mapping from the backbone outputs to logit output suitable for - a classification task. For usage of this model with pre-trained weights, see - the `from_preset()` method. + This model attaches a classification head to a + `keras_nlp.model.BertBackbone` instance, mapping from the backbone outputs + to logits suitable for a classification task. For usage of this model with + pre-trained weights, use the `from_preset()` constructor. This model can optionally be configured with a `preprocessor` layer, in which case it will automatically apply preprocessing to raw inputs during @@ -56,90 +56,34 @@ class BertClassifier(Task): Examples: - Example usage. + Raw string data. ```python - # Define the preprocessed inputs. - preprocessed_features = { - "token_ids": tf.ones(shape=(2, 12), dtype=tf.int64), - "segment_ids": tf.constant( - [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12) - ), - "padding_mask": tf.constant( - [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12) - ), - } - labels = [0, 3] - - # Randomly initialize a BERT backbone. - backbone = keras_nlp.models.BertBackbone( - vocabulary_size=30552, - num_layers=12, - num_heads=12, - hidden_dim=768, - intermediate_dim=3072, - max_sequence_length=12 - ) - - # Create a BERT classifier and fit your data. - classifier = keras_nlp.models.BertClassifier( - backbone, - num_classes=4, - preprocessor=None, - ) - classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), - ) - classifier.fit(x=preprocessed_features, y=labels, batch_size=2) - - # Access backbone programatically (e.g., to change `trainable`) - classifier.backbone.trainable = False - ``` - - Raw string inputs. - ```python - # Create a dataset with raw string features in an `(x, y)` format. features = ["The quick brown fox jumped.", "I forgot my homework."] labels = [0, 3] - # Create a BertClassifier and fit your data. + # Pretrained classifier. classifier = keras_nlp.models.BertClassifier.from_preset( "bert_base_en_uncased", num_classes=4, ) - classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), - ) classifier.fit(x=features, y=labels, batch_size=2) - ``` - - Raw string inputs with customized preprocessing. - ```python - # Create a dataset with raw string features in an `(x, y)` format. - features = ["The quick brown fox jumped.", "I forgot my homework."] - labels = [0, 3] - - # Use a shorter sequence length. - preprocessor = keras_nlp.models.BertPreprocessor.from_preset( - "bert_base_en_uncased", - sequence_length=128, - ) + classifier.predict(x=features, batch_size=2) - # Create a BertClassifier and fit your data. - classifier = keras_nlp.models.BertClassifier.from_preset( - "bert_base_en_uncased", - num_classes=4, - preprocessor=preprocessor, - ) + # Re-compile (e.g., with a new learning rate). classifier.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + optimizer=keras.optimizers.Adam(5e-5), + jit_compile=True, ) + # Access backbone programatically (e.g., to change `trainable`). + classifier.backbone.trainable = False + # Fit again. classifier.fit(x=features, y=labels, batch_size=2) ``` - Preprocessed inputs. + Preprocessed integer data. ```python - # Create a dataset with preprocessed features in an `(x, y)` format. - preprocessed_features = { + features = { "token_ids": tf.ones(shape=(2, 12), dtype=tf.int64), "segment_ids": tf.constant( [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]] * 2, shape=(2, 12) @@ -150,16 +94,43 @@ class BertClassifier(Task): } labels = [0, 3] - # Create a BERT classifier and fit your data. + # Pretrained classifier without preprocessing. classifier = keras_nlp.models.BertClassifier.from_preset( "bert_base_en_uncased", num_classes=4, preprocessor=None, ) - classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + classifier.fit(x=features, y=labels, batch_size=2) + ``` + + Custom backbone and vocabulary. + ```python + features = ["The quick brown fox jumped.", "I forgot my homework."] + labels = [0, 3] + + vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] + vocab += ["The", "quick", "brown", "fox", "jumped", "."] + tokenizer = keras_nlp.models.BertTokenizer( + vocabulary=vocab, + ) + preprocessor = keras_nlp.models.BertPreprocessor( + tokenizer=tokenizer, + sequence_length=128, ) - classifier.fit(x=preprocessed_features, y=labels, batch_size=2) + backbone = keras_nlp.models.BertBackbone( + vocabulary_size=30552, + num_layers=4, + num_heads=4, + hidden_dim=256, + intermediate_dim=512, + max_sequence_length=128, + ) + classifier = keras_nlp.models.BertClassifier( + backbone=backbone, + preprocessor=preprocessor, + num_classes=4, + ) + classifier.fit(x=features, y=labels, batch_size=2) ``` """ diff --git a/keras_nlp/models/bert/bert_masked_lm.py b/keras_nlp/models/bert/bert_masked_lm.py index a79be436d8..aa2c1a0a51 100644 --- a/keras_nlp/models/bert/bert_masked_lm.py +++ b/keras_nlp/models/bert/bert_masked_lm.py @@ -37,7 +37,7 @@ class BertMaskedLM(Task): This model will train BERT on a masked language modeling task. The model will predict labels for a number of masked tokens in the input data. For usage of this model with pre-trained weights, see the - `from_preset()` method. + `from_preset()` constructor. This model can optionally be configured with a `preprocessor` layer, in which case inputs can be raw string features during `fit()`, `predict()`, @@ -56,26 +56,32 @@ class BertMaskedLM(Task): Example usage: - Raw string inputs and pretrained backbone. + Raw string data. ```python - # Create a dataset with raw string features. Labels are inferred. features = ["The quick brown fox jumped.", "I forgot my homework."] - # Create a BertMaskedLM with a pretrained backbone and further train - # on an MLM task. + # Pretrained language model. masked_lm = keras_nlp.models.BertMaskedLM.from_preset( - "bert_base_en", + "bert_base_en_uncased", ) + masked_lm.fit(x=features, batch_size=2) + + # Re-compile (e.g., with a new learning rate). masked_lm.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), + optimizer=keras.optimizers.Adam(5e-5), + jit_compile=True, ) + # Access backbone programatically (e.g., to change `trainable`). + masked_lm.backbone.trainable = False + # Fit again. masked_lm.fit(x=features, batch_size=2) ``` - Preprocessed inputs and custom backbone. + Preprocessed integer data. ```python - # Create a preprocessed dataset where 0 is the mask token. - preprocessed_features = { + # Create preprocessed batch where 0 is the mask token. + features = { "token_ids": tf.constant( [[1, 2, 0, 4, 0, 6, 7, 8]] * 2, shape=(2, 8) ), @@ -88,24 +94,11 @@ class BertMaskedLM(Task): # Labels are the original masked values. labels = [[3, 5]] * 2 - # Randomly initialize a BERT encoder - backbone = keras_nlp.models.BertBackbone( - vocabulary_size=50265, - num_layers=12, - num_heads=12, - hidden_dim=768, - intermediate_dim=3072, - max_sequence_length=12 - ) - # Create a BERT masked LM model and fit the data. - masked_lm = keras_nlp.models.BertMaskedLM( - backbone, + masked_lm = keras_nlp.models.BertMaskedLM.from_preset( + "bert_base_en_uncased", preprocessor=None, ) - masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), - ) - masked_lm.fit(x=preprocessed_features, y=labels, batch_size=2) + masked_lm.fit(x=features, y=labels, batch_size=2) ``` """ diff --git a/keras_nlp/models/bert/bert_masked_lm_preprocessor.py b/keras_nlp/models/bert/bert_masked_lm_preprocessor.py index e6564c8d73..084d814fe8 100644 --- a/keras_nlp/models/bert/bert_masked_lm_preprocessor.py +++ b/keras_nlp/models/bert/bert_masked_lm_preprocessor.py @@ -25,44 +25,95 @@ @keras_nlp_export("keras_nlp.models.BertMaskedLMPreprocessor") class BertMaskedLMPreprocessor(BertPreprocessor): """BERT preprocessing for the masked language modeling task. + This preprocessing layer will prepare inputs for a masked language modeling task. It is primarily intended for use with the `keras_nlp.models.BertMaskedLM` task model. Preprocessing will occur in multiple steps. - - Tokenize any number of input segments using the `tokenizer`. - - Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`. - with the appropriate `"[CLS]"`, `"[SEP]"`, `"[SEP]"`, `"[SEP]"` and `"[PAD]"` tokens. - - Randomly select non-special tokens to mask, controlled by + + 1. Tokenize any number of input segments using the `tokenizer`. + 2. Pack the inputs together with the appropriate `"[CLS]"`, `"[SEP]"` and + `"[PAD]"` tokens. + 3. Randomly select non-special tokens to mask, controlled by `mask_selection_rate`. - - Construct a `(x, y, sample_weight)` tuple suitable for training with a + 4. Construct a `(x, y, sample_weight)` tuple suitable for training with a `keras_nlp.models.BertMaskedLM` task model. + + Args: + tokenizer: A `keras_nlp.models.BertTokenizer` instance. + sequence_length: int. The length of the packed inputs. + truncate: string. The algorithm to truncate a list of batched segments + to fit within `sequence_length`. The value can be either + `round_robin` or `waterfall`: + - `"round_robin"`: Available space is assigned one token at a + time in a round-robin fashion to the inputs that still need + some, until the limit is reached. + - `"waterfall"`: The allocation of the budget is done using a + "waterfall" algorithm that allocates quota in a + left-to-right manner and fills up the buckets until we run + out of budget. It supports an arbitrary number of segments. + mask_selection_rate: float. The probability an input token will be + dynamically masked. + mask_selection_length: int. The maximum number of masked tokens + in a given sample. + mask_token_rate: float. The probability the a selected token will be + replaced with the mask token. + random_token_rate: float. The probability the a selected token will be + replaced with a random token from the vocabulary. A selected token + will be left as is with probability + `1 - mask_token_rate - random_token_rate`. + + Call arguments: + x: A tensor of single string sequences, or a tuple of multiple + tensor sequences to be packed together. Inputs may be batched or + unbatched. For single sequences, raw python inputs will be converted + to tensors. For multiple sequences, pass tensors directly. + y: Label data. Should always be `None` as the layer generates labels. + sample_weight: Label weights. Should always be `None` as the layer + generates label weights. + Examples: + + Directly calling the layer on data. ```python - # Load the preprocessor from a preset. preprocessor = keras_nlp.models.BertMaskedLMPreprocessor.from_preset( - "bert_base_en" + "bert_base_en_uncased" ) + # Tokenize and mask a single sentence. - sentence = tf.constant("The quick brown fox jumped.") - preprocessor(sentence) - # Tokenize and mask a batch of sentences. - sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - preprocessor(sentences) - # Tokenize and mask a dataset of sentences. - features = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] + preprocessor("The quick brown fox jumped.") + + # Tokenize and mask a batch of single sentences. + preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) + + # Tokenize and mask sentence pairs. + # In this case, always convert input to tensors before calling the layer. + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + preprocessor((first, second)) + ``` + + Mapping with `tf.data.Dataset`. + ```python + preprocessor = keras_nlp.models.BertMaskedLMPreprocessor.from_preset( + "bert_base_en_uncased" ) - ds = tf.data.Dataset.from_tensor_slices((features)) + + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + + # Map single sentences. + ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) - # Alternatively, you can create a preprocessor from your own vocabulary. - # The usage is exactly the same as above. - vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] - vocab += ["THE", "QUICK", "BROWN", "FOX"] - vocab += ["Call", "me", "Ishmael"] - tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) - preprocessor = keras_nlp.models.BertMaskedLMPreprocessor(tokenizer) + + # Map sentence pairs. + ds = tf.data.Dataset.from_tensor_slices((first, second)) + # Watch out for tf.data's default unpacking of tuples here! + # Best to invoke the `preprocessor` directly in this case. + ds = ds.map( + lambda first, second: preprocessor(x=(first, second)), + num_parallel_calls=tf.data.AUTOTUNE, + ) ``` """ diff --git a/keras_nlp/models/bert/bert_preprocessor.py b/keras_nlp/models/bert/bert_preprocessor.py index 5dca85941e..79cb644e70 100644 --- a/keras_nlp/models/bert/bert_preprocessor.py +++ b/keras_nlp/models/bert/bert_preprocessor.py @@ -36,29 +36,16 @@ class BertPreprocessor(Preprocessor): This preprocessing layer will do three things: - - Tokenize any number of input segments using the `tokenizer`. - - Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`. + 1. Tokenize any number of input segments using the `tokenizer`. + 2. Pack the inputs together using a `keras_nlp.layers.MultiSegmentPacker`. with the appropriate `"[CLS]"`, `"[SEP]"` and `"[PAD]"` tokens. - - Construct a dictionary with keys `"token_ids"`, `"segment_ids"`, + 3. Construct a dictionary with keys `"token_ids"`, `"segment_ids"`, `"padding_mask"`, that can be passed directly to a BERT model. This layer can be used directly with `tf.data.Dataset.map` to preprocess string data in the `(x, y, sample_weight)` format used by `keras.Model.fit`. - The call method of this layer accepts three arguments, `x`, `y`, and - `sample_weight`. `x` can be a python string or tensor representing a single - segment, a list of python strings representing a batch of single segments, - or a list of tensors representing multiple segments to be packed together. - `y` and `sample_weight` are both optional, can have any format, and will be - passed through unaltered. - - Special care should be taken when using `tf.data` to map over an unlabeled - tuple of string segments. `tf.data.Dataset.map` will unpack this tuple - directly into the call arguments of this layer, rather than forward all - argument to `x`. To handle this case, it is recommended to explicitly call - the layer, e.g. `ds.map(lambda seg1, seg2: preprocessor(x=(seg1, seg2)))`. - Args: tokenizer: A `keras_nlp.models.BertTokenizer` instance. sequence_length: The length of the packed inputs. @@ -73,79 +60,72 @@ class BertPreprocessor(Preprocessor): left-to-right manner and fills up the buckets until we run out of budget. It supports an arbitrary number of segments. + Call arguments: + x: A tensor of single string sequences, or a tuple of multiple + tensor sequences to be packed together. Inputs may be batched or + unbatched. For single sequences, raw python inputs will be converted + to tensors. For multiple sequences, pass tensors directly. + y: Any label data. Will be passed through unaltered. + sample_weight: Any label weight data. Will be passed through unaltered. + Examples: + + Directly calling the layer on data. ```python - # Load the preprocessor from a preset. - preprocessor = keras_nlp.models.BertPreprocessor.from_preset("bert_base_en_uncased") + preprocessor = keras_nlp.models.BertPreprocessor.from_preset( + "bert_base_en_uncased" + ) # Tokenize and pack a single sentence. - sentence = tf.constant("The quick brown fox jumped.") - preprocessor(sentence) - # Same output. preprocessor("The quick brown fox jumped.") - # Tokenize and a batch of single sentences. - sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - preprocessor(sentences) - # Same output. - preprocessor( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) + # Tokenize a batch of single sentences. + preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) - # Tokenize and pack a sentence pair. - first_sentence = tf.constant("The quick brown fox jumped.") - second_sentence = tf.constant("The fox tripped.") - preprocessor((first_sentence, second_sentence)) + # Preprocess a batch of sentence pairs. + # When handling multiple sequences, always convert to tensors first! + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + preprocessor((first, second)) - # Map a dataset to preprocess a single sentence. - features = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] + # Custom vocabulary. + vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] + vocab += ["The", "quick", "brown", "fox", "jumped", "."] + tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) + preprocessor = keras_nlp.models.BertPreprocessor(tokenizer) + preprocessor("The quick brown fox jumped.") + ``` + + Mapping with `tf.data.Dataset`. + ```python + preprocessor = keras_nlp.models.BertPreprocessor.from_preset( + "bert_base_en_uncased" ) - labels = tf.constant([0, 1]) - ds = tf.data.Dataset.from_tensor_slices((features, labels)) + + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + label = tf.constant([1, 1]) + + # Map labeled single sentences. + ds = tf.data.Dataset.from_tensor_slices((first, label)) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) - # Map a dataset to preprocess sentence pairs. - first_sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - second_sentences = tf.constant( - ["The fox tripped.", "Oh look, a whale."] - ) - labels = tf.constant([1, 1]) - ds = tf.data.Dataset.from_tensor_slices( - ( - (first_sentences, second_sentences), labels - ) - ) + # Map unlabeled single sentences. + ds = tf.data.Dataset.from_tensor_slices(first) ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) - # Map a dataset to preprocess unlabeled sentence pairs. - first_sentences = tf.constant( - ["The quick brown fox jumped.", "Call me Ishmael."] - ) - second_sentences = tf.constant( - ["The fox tripped.", "Oh look, a whale."] - ) - ds = tf.data.Dataset.from_tensor_slices((first_sentences, second_sentences)) + # Map labeled sentence pairs. + ds = tf.data.Dataset.from_tensor_slices(((first, second), label)) + ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) + + # Map unlabeled sentence pairs. + ds = tf.data.Dataset.from_tensor_slices((first, second)) # Watch out for tf.data's default unpacking of tuples here! # Best to invoke the `preprocessor` directly in this case. ds = ds.map( - lambda s1, s2: preprocessor(x=(s1, s2)), + lambda first, second: preprocessor(x=(first, second)), num_parallel_calls=tf.data.AUTOTUNE, ) - - # Alternatively, you can create a preprocessor from your own vocabulary. - # The usage is exactly the same as shown above. - vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]"] - vocab += ["The", "qu", "##ick", "br", "##own", "fox", "tripped"] - vocab += ["Call", "me", "Ish", "##mael", "."] - vocab += ["Oh", "look", "a", "whale"] - vocab += ["I", "forgot", "my", "home", "##work"] - tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) - preprocessor = keras_nlp.models.BertPreprocessor(tokenizer) ``` """ diff --git a/keras_nlp/models/bert/bert_tokenizer.py b/keras_nlp/models/bert/bert_tokenizer.py index a50ac44e57..f5acf8eb4f 100644 --- a/keras_nlp/models/bert/bert_tokenizer.py +++ b/keras_nlp/models/bert/bert_tokenizer.py @@ -52,30 +52,25 @@ class BertTokenizer(WordPieceTokenizer): tokenization. Examples: - - Batched input. - >>> vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] - >>> vocab += ["The", "quick", "brown", "fox.", "jumped", "over"] - >>> inputs = ["The quick brown fox.", "The fox."] - >>> tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) - >>> tokenizer(inputs) - - - Unbatched input. - >>> vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] - >>> vocab += ["The", "qu", "##ick", "brown", "fox", "."] - >>> inputs = "The fox." - >>> tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) - >>> tokenizer(inputs) - - - Detokenization. - >>> vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] - >>> vocab += ["The", "qu", "##ick", "brown", "fox", "."] - >>> inputs = "The quick brown fox." - >>> tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) - >>> tokenizer.detokenize(tokenizer.tokenize(inputs)).numpy().decode('utf-8') - 'The quick brown fox .' + ```python + # Unbatched input. + tokenizer = keras_nlp.models.BertTokenizer.from_preset( + "bert_base_en_uncased", + ) + tokenizer("The quick brown fox jumped.") + + # Batched input. + tokenizer(["The quick brown fox jumped.", "The fox slept."]) + + # Detokenization. + tokenizer.detokenize(tokenizer("The quick brown fox jumped.")) + + # Custom vocabulary. + vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] + vocab += ["The", "quick", "brown", "fox", "jumped", "."] + tokenizer = keras_nlp.models.BertTokenizer(vocabulary=vocab) + tokenizer("The quick brown fox jumped.") + ``` """ def __init__(