@@ -2719,6 +2719,7 @@ class TestGPTOSS(LlmapiAccuracyTestHarness):
27192719        (True , True ), 
27202720    ]) 
27212721    def  test_w4_1gpu (self , moe_backend , cuda_graph , overlap_scheduler , mocker ):
2722+         pytest .skip ("https://nvbugs/5481087" )
27222723        if  moe_backend  ==  "TRITON"  and  not  IS_TRITON_KERNELS_AVAILABLE :
27232724            pytest .skip ("Triton kernels are not available" )
27242725
@@ -2736,7 +2737,7 @@ def test_w4_1gpu(self, moe_backend, cuda_graph, overlap_scheduler, mocker):
27362737
27372738        with  llm :
27382739            model_name  =  "GPT-OSS/MXFP4" 
2739-             mocker .patch .object (GSM8K , { "MAX_OUTPUT_LEN" :  8192 } )
2740+             mocker .patch .object (GSM8K , "MAX_OUTPUT_LEN" ,  8192 )
27402741            task  =  GSM8K (model_name )
27412742            task .evaluate (llm ,
27422743                          extra_evaluator_kwargs = self .extra_evaluator_kwargs )
@@ -2756,6 +2757,7 @@ def test_w4_1gpu(self, moe_backend, cuda_graph, overlap_scheduler, mocker):
27562757        ids = ["tp4" , "ep4" , "dp4" ]) 
27572758    def  test_w4_4gpus (self , moe_backend , tp_size , pp_size , ep_size ,
27582759                      attention_dp , cuda_graph , overlap_scheduler , mocker ):
2760+         pytest .skip ("https://nvbugs/5481087" )
27592761        if  moe_backend  ==  "TRITON" :
27602762            if  not  IS_TRITON_KERNELS_AVAILABLE :
27612763                pytest .skip ("Triton kernels are not available" )
@@ -2776,7 +2778,7 @@ def test_w4_4gpus(self, moe_backend, tp_size, pp_size, ep_size,
27762778        with  llm :
27772779            model_name  =  "GPT-OSS/MXFP4" 
27782780            task  =  GSM8K (model_name )
2779-             mocker .patch .object (GSM8K , { "MAX_OUTPUT_LEN" :  8192 } )
2781+             mocker .patch .object (GSM8K , "MAX_OUTPUT_LEN" ,  8192 )
27802782            task .evaluate (llm ,
27812783                          extra_evaluator_kwargs = self .extra_evaluator_kwargs )
27822784
0 commit comments