From ec2c0043d2773dd3965ce3c3aafa49fdbc662776 Mon Sep 17 00:00:00 2001 From: co63oc Date: Tue, 15 Jul 2025 14:23:59 +0800 Subject: [PATCH 1/2] Fix --- paddle/phi/kernels/fusion/gpu/fused_layernorm_kernel.cu | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/paddle/phi/kernels/fusion/gpu/fused_layernorm_kernel.cu b/paddle/phi/kernels/fusion/gpu/fused_layernorm_kernel.cu index ed9cbfab49d06f..142f15b77bfe4e 100644 --- a/paddle/phi/kernels/fusion/gpu/fused_layernorm_kernel.cu +++ b/paddle/phi/kernels/fusion/gpu/fused_layernorm_kernel.cu @@ -1044,6 +1044,13 @@ void FusedLayerNormKernel(const Context& dev_ctx, } using U = phi::funcs::LayerNormParamType; + if (x.numel() == 0) { + dev_ctx.template Alloc(out); + if (residual_out) dev_ctx.template Alloc(residual_out); + if (mean) dev_ctx.template Alloc(mean); + if (variance) dev_ctx.template Alloc(variance); + return; + } const T* x_data = x.data(); const U* norm_weight_data = norm_weight ? norm_weight.get().data() : nullptr; From f9807468a28b859c867faa0713cc63c63ce06369 Mon Sep 17 00:00:00 2001 From: co63oc Date: Tue, 15 Jul 2025 15:51:37 +0800 Subject: [PATCH 2/2] Fix --- test/legacy_test/test_fused_layernorm_op.py | 25 +++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/test/legacy_test/test_fused_layernorm_op.py b/test/legacy_test/test_fused_layernorm_op.py index b8af12ae270d85..e44efa3c39067f 100644 --- a/test/legacy_test/test_fused_layernorm_op.py +++ b/test/legacy_test/test_fused_layernorm_op.py @@ -1204,5 +1204,30 @@ def test_residual_bias_add_layernorm(self): ) +@unittest.skipIf( + not core.is_compiled_with_cuda() and not paddle.is_compiled_with_rocm(), + "core is not compiled with CUDA or ROCM", +) +class TestlayernormOp_ZeroSize(TestlayernormOp): + def setUp(self): + np.random.seed(20) + # 0-size + batch = 0 + cols = 256 + + self.x_np = np.random.uniform(-0.05, 0.05, [batch, cols]) + self.residual_np = np.random.uniform(-0.05, 0.05, [batch, cols]) + self.bias_np = np.random.uniform(-0.05, 0.05, [cols]) + self.norm_weight_np = np.random.uniform(-0.05, 0.05, [cols]) + self.norm_bias_np = np.random.uniform(-0.05, 0.05, [cols]) + self.epsilon = 1e-5 + self.residual_alpha = np.random.uniform(low=0.1, high=1.1, size=[1]) + + self.quant_scale = 0.15 + self.quant_round_type = 1 + self.quant_max_bound = 127 + self.quant_min_bound = -127 + + if __name__ == "__main__": unittest.main()