From 7a5a00bfb9b7a3e368ccc40c9a4a8ee6c44a0402 Mon Sep 17 00:00:00 2001 From: co63oc Date: Mon, 1 Sep 2025 10:32:07 +0800 Subject: [PATCH 1/3] add test_draft_model_postprocess.py --- .../operators/test_draft_model_postprocess.py | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 tests/operators/test_draft_model_postprocess.py diff --git a/tests/operators/test_draft_model_postprocess.py b/tests/operators/test_draft_model_postprocess.py new file mode 100644 index 00000000000..3f93bd1615e --- /dev/null +++ b/tests/operators/test_draft_model_postprocess.py @@ -0,0 +1,48 @@ +# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +import paddle + +from fastdeploy.model_executor.ops.gpu import draft_model_postprocess + + +class TestDraftModelPostProcess(unittest.TestCase): + def _test_draft_model_postprocess(self, batch_size=1, base_model_draft_token_len=8192): + paddle.seed(66) + base_model_draft_tokens = paddle.randint( + low=-1, + high=1, + shape=[batch_size, base_model_draft_token_len], + dtype="int64", + ) + base_model_seq_lens_encoder = paddle.randint(low=0, high=2, shape=[batch_size], dtype="int32") + random_floats = paddle.rand(shape=[batch_size]) + base_model_stop_flags = random_floats >= 0.5 + + base_model_seq_lens_this_time_gpu = paddle.ones((batch_size), dtype=paddle.int32) # noqa: F841 + draft_model_postprocess( + base_model_draft_tokens, + base_model_seq_lens_this_time_gpu, + base_model_seq_lens_encoder, + base_model_stop_flags, + ) + + def test_enough_cases(self): + self._test_draft_model_postprocess(1, 11) + self._test_draft_model_postprocess(2, 2048) + + +if __name__ == "__main__": + unittest.main() From d632b823e16c9a4fedad046c1db618990817e931 Mon Sep 17 00:00:00 2001 From: co63oc Date: Wed, 3 Sep 2025 08:26:55 +0800 Subject: [PATCH 2/3] fix --- .../operators/test_draft_model_postprocess.py | 32 ++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/tests/operators/test_draft_model_postprocess.py b/tests/operators/test_draft_model_postprocess.py index 3f93bd1615e..6a0aeb16133 100644 --- a/tests/operators/test_draft_model_postprocess.py +++ b/tests/operators/test_draft_model_postprocess.py @@ -13,11 +13,35 @@ # limitations under the License. import unittest +import numpy as np import paddle from fastdeploy.model_executor.ops.gpu import draft_model_postprocess +def draft_model_postprocess_cpu( + base_model_draft_tokens, + base_model_seq_lens_encoder, + base_model_stop_flags, +): + bsz = base_model_draft_tokens.shape[0] + base_model_draft_token_len = base_model_draft_tokens.shape[1] + base_model_seq_lens_this_time = paddle.ones((bsz), dtype=paddle.int32) + for tid in range(bsz): + if (not base_model_stop_flags[tid]) and (base_model_seq_lens_encoder[tid] == 0): + base_model_draft_tokens_now = base_model_draft_tokens[tid] + token_num = 0 + for i in range(base_model_draft_token_len): + if base_model_draft_tokens_now[i] != -1: + token_num += 1 + + base_model_seq_lens_this_time[tid] = token_num + elif base_model_stop_flags[tid]: + base_model_seq_lens_this_time[tid] = 0 + + return base_model_seq_lens_this_time + + class TestDraftModelPostProcess(unittest.TestCase): def _test_draft_model_postprocess(self, batch_size=1, base_model_draft_token_len=8192): paddle.seed(66) @@ -31,13 +55,19 @@ def _test_draft_model_postprocess(self, batch_size=1, base_model_draft_token_len random_floats = paddle.rand(shape=[batch_size]) base_model_stop_flags = random_floats >= 0.5 - base_model_seq_lens_this_time_gpu = paddle.ones((batch_size), dtype=paddle.int32) # noqa: F841 + base_model_seq_lens_this_time = draft_model_postprocess_cpu( + base_model_draft_tokens, + base_model_seq_lens_encoder, + base_model_stop_flags, + ) + base_model_seq_lens_this_time_gpu = paddle.ones((batch_size), dtype=paddle.int32) draft_model_postprocess( base_model_draft_tokens, base_model_seq_lens_this_time_gpu, base_model_seq_lens_encoder, base_model_stop_flags, ) + np.testing.assert_allclose(base_model_seq_lens_this_time.numpy(), base_model_seq_lens_this_time_gpu.numpy()) def test_enough_cases(self): self._test_draft_model_postprocess(1, 11) From 89aa0639ee315717714563c477fb213013743417 Mon Sep 17 00:00:00 2001 From: co63oc Date: Wed, 3 Sep 2025 08:33:22 +0800 Subject: [PATCH 3/3] fix --- tests/operators/test_draft_model_postprocess.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/operators/test_draft_model_postprocess.py b/tests/operators/test_draft_model_postprocess.py index 6a0aeb16133..d7ae34a3bf9 100644 --- a/tests/operators/test_draft_model_postprocess.py +++ b/tests/operators/test_draft_model_postprocess.py @@ -70,8 +70,14 @@ def _test_draft_model_postprocess(self, batch_size=1, base_model_draft_token_len np.testing.assert_allclose(base_model_seq_lens_this_time.numpy(), base_model_seq_lens_this_time_gpu.numpy()) def test_enough_cases(self): + self._test_draft_model_postprocess(100, 1024) self._test_draft_model_postprocess(1, 11) + self._test_draft_model_postprocess(1, 8192) self._test_draft_model_postprocess(2, 2048) + self._test_draft_model_postprocess(3, 1023) + self._test_draft_model_postprocess(4, 2047) + self._test_draft_model_postprocess(5, 4095) + self._test_draft_model_postprocess(10, 9191) if __name__ == "__main__":