Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Add Large Dim Checks for linalg Operators #18816

Merged
merged 12 commits into from
Jul 31, 2020
23 changes: 23 additions & 0 deletions src/operator/tensor/la_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,14 @@ struct LaTrianParam : public dmlc::Parameter<LaTrianParam> {
}
};

// check if any dim will overflow 32-bit int
Zha0q1 marked this conversation as resolved.
Show resolved Hide resolved
inline void check_large_dim(std::vector<dim_t> dims) {
for (dim_t dim : dims) {
CHECK_LE(dim, INT_MAX)
<< "Large matrix dimensions (>= 2^31) are not supported";
}
}

// Common function for shape inference for matrix mult and matrix mac.
inline bool LaMatrixMultMacOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector* in_attrs,
Expand All @@ -181,6 +189,11 @@ inline bool LaMatrixMultMacOpShape(const nnvm::NodeAttrs& attrs,
const int ndim((*in_attrs)[0].ndim()), axis(axis_param < 0 ? ndim + axis_param : axis_param);
CHECK(axis >= 0 && axis < ndim-1)
<< "Invalid row axis (" << axis_param << ")";
// Check if input matrix dims are too large
check_large_dim({(*in_attrs)[0][axis],
(*in_attrs)[0][ndim-1],
(*in_attrs)[1][axis],
(*in_attrs)[1][ndim-1]});
std::vector<int> oshape(ndim);
for ( int i = 0; i < ndim-1; ++i ) {
if (i != axis) {
Expand Down Expand Up @@ -225,6 +238,10 @@ inline bool LaTriangMatrixMultOpShape(const nnvm::NodeAttrs& attrs,
<< "Shapes of inputs 0, 1 must be the same, except on last two dimensions";
oshape[i] = (*in_attrs)[0][i];
}
// Check if the input matrix dims are too large; it suffices to check the second
// input only because the first is square whose size is bounded by memory
check_large_dim({(*in_attrs)[1][ndim-1],
(*in_attrs)[1][ndim-2]});
if ( param.rightside ) {
// We compute B * A where A is the first and B the second input.
CHECK_EQ((*in_attrs)[0][ndim-2], (*in_attrs)[1][ndim-1])
Expand Down Expand Up @@ -341,6 +358,9 @@ inline bool LaSyrkShape(const nnvm::NodeAttrs& attrs,
bool transpose = nnvm::get<LaSyrkParam>(attrs.parsed).transpose;
const int ndim = in_attr.ndim();
if ( ndim >= 2 ) {
// Check if input matrix dims are too large
check_large_dim({in_attr[ndim-1],
in_attr[ndim-2]});
// Forward shape inference.
std::vector<int> oshape(ndim);
for ( int i = 0; i < ndim-2; ++i ) {
Expand Down Expand Up @@ -371,6 +391,9 @@ inline bool LaLQFactShape(const nnvm::NodeAttrs& attrs,
const int ndim(in_a.ndim());
CHECK_LE(in_a[ndim-2], in_a[ndim-1])
<< "Input A shape wrong: Last dimension must be >= than second to last";
// Check if the last dimension is too large; it suffices to check the last dim
// only since the second to last dim <= last dim
check_large_dim({in_a[ndim-1]});
// Q must have same shape as A
SHAPE_ASSIGN_CHECK(*out_attrs, 0, in_a);
std::vector<int> oshape_l(ndim);
Expand Down
49 changes: 47 additions & 2 deletions tests/nightly/test_large_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@

from mxnet.test_utils import rand_ndarray, assert_almost_equal, rand_coord_2d, default_context, check_symbolic_forward, create_2d_tensor, get_identity_mat, get_identity_mat_batch
from mxnet import gluon, nd
from common import with_seed, with_post_test_cleanup
from common import with_seed, with_post_test_cleanup, assertRaises
from mxnet.base import MXNetError
from nose.tools import with_setup
import unittest

Expand All @@ -41,7 +42,7 @@
LARGE_SIZE = LARGE_X * SMALL_Y
LARGE_TENSOR_SHAPE = 2**32
RNN_LARGE_TENSOR = 2**28

INT32_MAX = 2**31-1

def test_nn():
def check_gluon_embedding():
Expand Down Expand Up @@ -1350,6 +1351,50 @@ def run_trsm(inp):
check_batch_trsm()


def test_linalg_large_dim():
def check_gemm():
A = nd.ones(shape=(1, INT32_MAX + 1, 1))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this should go to test_large_vector since the input contains 1 dimension which has large while rest dimensions are small. @access2rohit plz confirm

Copy link
Contributor

@ChaiBapchya ChaiBapchya Jul 30, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Name of the file can be made better.
Basically the idea was to have 2 separate files

  1. test_large_array.py [more like test_large_size.py]
    testing input whose individual dimensions are less than 2**32 but size of the input is > 2**32

  2. test_large_vector.py [more like test_large_shape.py]
    testing input whose atleast 1 individual dimensions is > 2**32

Copy link
Contributor Author

@Zha0q1 Zha0q1 Jul 30, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we should make more explicit comments on what those test do? I can do so in my next commit. I still think the two cases both fall into the same category which is testing with tensors of large dimensions.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In comments I would say like 1. those tests are for overflowing total size 2. those other tests are for overflowing index calculation i.e. row dim, col dim, etc

Copy link
Contributor

@ChaiBapchya ChaiBapchya Jul 30, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

From consistency standpoint, I'd

  1. put these tests in test_large_vector.py file.
  2. Rename that file to [whatever sounds right I just gave a suggestion above]
  3. add a comment in that file.

All dimensions in test_large_array.py are less than INT32_MAX
Large dimension [>2**32] was introduced in test_large_vector.py for the same reason.

So to keep the testing approach consistent I'd do that.

Even if both files play with "Large tensors" one does it for large "size" other specifically for large "shape".

Copy link
Contributor Author

@Zha0q1 Zha0q1 Jul 30, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well one thing to note is that they are not vectors per se. The inputs are all 3D whereas in test_large_vector they are all 1D. The dim checks happen on both row and col dim so you can see I used both (1, 1, x) and (1, x, 1).

I will add more comments in next commit

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes I know they aren't "vectors" and hence recommended "renaming the file"

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

@access2rohit access2rohit Jul 30, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Zha0q1 the vector file generally houses tests for operators with a single dimension that exceeds 2^32 range. Please address what is suggested by @ChaiBapchya and move it to vector tests. Feel free to rename the file to test_large_dimensions.py

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah that would make sense. I have moved the tests to test_large_vector.py, which I kept the original name to avoid naming discrepancy with master. Also comments were added to the tests

B = nd.ones(shape=(1, INT32_MAX + 1, 1))
C = nd.ones(shape=(1, 1, 1))
assertRaises(MXNetError, nd.linalg.gemm, \
A, B, C, transpose_b=True)

def check_gemm2():
A = nd.ones(shape=(1, 1, INT32_MAX + 1))
B = nd.ones(shape=(1, 1, INT32_MAX + 1))
assertRaises(MXNetError, nd.linalg.gemm2, \
A, B, transpose_b=True)

def check_trmm():
A = nd.ones(shape=(1, 1, 1))
B = nd.ones(shape=(1, INT32_MAX + 1, 1))
assertRaises(MXNetError, nd.linalg.trmm, \
A, B, rightside=True)

def check_trsm():
A = nd.ones(shape=(1, 1, 1))
B = nd.ones(shape=(1, 1, INT32_MAX + 1))
assertRaises(MXNetError, nd.linalg.trsm, \
A, B, rightside=False)

def check_syrk():
A = nd.ones(shape=(1, INT32_MAX + 1, 1))
assertRaises(MXNetError, nd.linalg.syrk, A)
assertRaises(MXNetError, nd.linalg.syrk, A, transpose=True)

def check_gelqf():
A = nd.ones(shape=(1, 1, INT32_MAX + 1))
assertRaises(MXNetError, nd.linalg.gelqf, A)

# batch input
check_gemm()
check_gemm2()
check_trmm()
check_trsm()
check_syrk()
check_gelqf()


def test_basic():
def check_elementwise():
a = nd.ones(shape=(LARGE_X, SMALL_Y))
Expand Down