Skip to content

Commit d9a5ed1

Browse files
NicolasHugfacebook-github-bot
authored andcommitted
[fbsync] Add opchecks for RoiAlign (#8057)
Reviewed By: vmoens Differential Revision: D50789095 fbshipit-source-id: 0fce78ae86366f72961a11068d785002fd000b73
1 parent db14062 commit d9a5ed1

File tree

2 files changed

+25
-1
lines changed

2 files changed

+25
-1
lines changed

test/optests_failures_dict.json

+12-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,16 @@
11
{
22
"_description": "This is a dict containing failures for tests autogenerated by generate_opcheck_tests. For more details, please see https://docs.google.com/document/d/1Pj5HRZvdOq3xpFpbEjUZp2hBovhy7Wnxw14m6lF2154/edit",
33
"_version": 1,
4-
"data": {}
4+
"data": {
5+
"torchvision::roi_align": {
6+
"TestRoIAlign.test_aot_dispatch_dynamic__test_mps_error_inputs": {
7+
"comment": "RuntimeError: MPS does not support roi_align backward with float16 inputs",
8+
"status": "xfail"
9+
},
10+
"TestRoIAlign.test_autograd_registration__test_mps_error_inputs": {
11+
"comment": "NotImplementedError: autograd_registration_check: NYI devices other than CPU/CUDA, got {'mps'}",
12+
"status": "xfail"
13+
}
14+
}
15+
}
516
}

test/test_ops.py

+13
Original file line numberDiff line numberDiff line change
@@ -474,6 +474,7 @@ def test_boxes_shape(self):
474474
@pytest.mark.parametrize("x_dtype", (torch.float16, torch.float32, torch.float64)) # , ids=str)
475475
@pytest.mark.parametrize("contiguous", (True, False))
476476
@pytest.mark.parametrize("deterministic", (True, False))
477+
@pytest.mark.opcheck_only_one()
477478
def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois_dtype=None):
478479
if deterministic and device == "cpu":
479480
pytest.skip("cpu is always deterministic, don't retest")
@@ -491,6 +492,7 @@ def test_forward(self, device, contiguous, deterministic, aligned, x_dtype, rois
491492
@pytest.mark.parametrize("deterministic", (True, False))
492493
@pytest.mark.parametrize("x_dtype", (torch.float, torch.half))
493494
@pytest.mark.parametrize("rois_dtype", (torch.float, torch.half))
495+
@pytest.mark.opcheck_only_one()
494496
def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
495497
with torch.cuda.amp.autocast():
496498
self.test_forward(
@@ -506,6 +508,7 @@ def test_autocast(self, aligned, deterministic, x_dtype, rois_dtype):
506508
@pytest.mark.parametrize("device", cpu_and_cuda_and_mps())
507509
@pytest.mark.parametrize("contiguous", (True, False))
508510
@pytest.mark.parametrize("deterministic", (True, False))
511+
@pytest.mark.opcheck_only_one()
509512
def test_backward(self, seed, device, contiguous, deterministic):
510513
if deterministic and device == "cpu":
511514
pytest.skip("cpu is always deterministic, don't retest")
@@ -520,6 +523,7 @@ def _make_rois(self, img_size, num_imgs, dtype, num_rois=1000):
520523
@pytest.mark.parametrize("aligned", (True, False))
521524
@pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 10), (0.1, 50)))
522525
@pytest.mark.parametrize("qdtype", (torch.qint8, torch.quint8, torch.qint32))
526+
@pytest.mark.opcheck_only_one()
523527
def test_qroialign(self, aligned, scale, zero_point, qdtype):
524528
"""Make sure quantized version of RoIAlign is close to float version"""
525529
pool_size = 5
@@ -589,6 +593,15 @@ def test_jit_boxes_list(self):
589593
self._helper_jit_boxes_list(model)
590594

591595

596+
optests.generate_opcheck_tests(
597+
testcase=TestRoIAlign,
598+
namespaces=["torchvision"],
599+
failures_dict_path=os.path.join(os.path.dirname(__file__), "optests_failures_dict.json"),
600+
additional_decorators=[],
601+
test_utils=OPTESTS,
602+
)
603+
604+
592605
class TestPSRoIAlign(RoIOpTester):
593606
mps_backward_atol = 5e-2
594607

0 commit comments

Comments
 (0)