Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add jax_padding support driver and server lib #54

Merged
merged 1 commit into from
Apr 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions jetstream/core/orchestrator.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,12 +201,16 @@ class Driver:
_generate_slots: list[queue.Queue[int]] = []
_active_requests: list[queue.Queue[tuple[int, ActiveRequest | None]]] = []

# todo: remove jax_padding after all then engine migrate to np padding
_jax_padding = True

def __init__(
self,
prefill_engines: Optional[list[engine_api.Engine]] = None,
generate_engines: Optional[list[engine_api.Engine]] = None,
prefill_params: Optional[list[Any]] = None,
generate_params: Optional[list[Any]] = None,
jax_padding: bool = True,
):
if prefill_engines is None:
prefill_engines = []
Expand Down Expand Up @@ -283,6 +287,8 @@ def __init__(
for idx, engine in enumerate(self._generate_engines)
]

self._jax_padding = jax_padding

# Create all threads
self._prefill_threads = [
JetThread(
Expand Down Expand Up @@ -428,6 +434,7 @@ def _prefill_thread(self, idx: int):
vocab,
is_bos=is_bos,
max_prefill_length=prefill_engine.max_prefill_length,
jax_padding=self._jax_padding,
)
# Compute new kv cache for the prefill_text, conditional on
# history.
Expand Down
2 changes: 2 additions & 0 deletions jetstream/core/server_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ def run(
devices: Any,
credentials: Any = grpc.insecure_server_credentials(),
threads: int | None = None,
jax_padding: bool = True,
) -> JetStreamServer:
"""Runs a server with a specified config.

Expand All @@ -116,6 +117,7 @@ def run(
generate_engines=engines.generate_engines + engines.interleaved_engines,
prefill_params=prefill_params + shared_params,
generate_params=generate_params + shared_params,
jax_padding=jax_padding,
)
# We default threads to the total number of concurrent allowed decodes,
# to make sure we can fully saturate the model. Set default minimum to 64.
Expand Down
22 changes: 22 additions & 0 deletions jetstream/tests/engine/test_mock_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,21 @@ def _prefill(self):
)
return engine, params, prefill_result, true_length

def _prefill_np(self):
"""Performs prefill and returns a kv cache."""
engine, params = self._setup()
# A 2 will be pre-pended as 'bos' token from the vocab.
text = "AB"
metadata = engine.get_tokenizer()
vocab = token_utils.load_vocab(metadata.path, metadata.extra_ids)
tokens, true_length = token_utils.tokenize_and_pad(
text, vocab, is_bos=True, jax_padding=False
)
prefill_result = engine.prefill(
params=params, padded_tokens=tokens, true_length=3
)
return engine, params, prefill_result, true_length

def _generate(self, slot=1):
"""Performs a single generation step."""
engine, params, prefill_result, _ = self._prefill()
Expand All @@ -83,6 +98,13 @@ def test_prefill(self):
prefill_result[:, :true_length], np.array([[4.0, 130.0, 132.0]])
)

def test_prefill_np(self):
"""Tests prefill with weight = 2."""
_, _, prefill_result, true_length = self._prefill_np()
np.testing.assert_array_equal(
prefill_result[:, :true_length], np.array([[4.0, 130.0, 132.0]])
)

def test_generate(self, slot=1):
"""Tests multiple generation steps."""
engine, params, decode_state, sampled_tokens = self._generate(slot=slot)
Expand Down
Loading