From 05c8ac1851fdf58c13555e349be16e6e97fa74c5 Mon Sep 17 00:00:00 2001 From: Rohit Kumar Srivastava Date: Thu, 24 Oct 2019 02:05:15 +0000 Subject: [PATCH] bug fix --- python/mxnet/ndarray/ndarray.py | 6 +++--- src/c_api/c_api.cc | 14 +++++++------ src/c_api/c_api_ndarray.cc | 6 +++--- src/ndarray/ndarray.cc | 17 ++++++++++------ src/ndarray/ndarray_function.cc | 5 +++++ src/operator/elemwise_op_common.h | 7 ------- src/operator/tensor/init_op.h | 33 +++++++++++++++++++------------ 7 files changed, 50 insertions(+), 38 deletions(-) diff --git a/python/mxnet/ndarray/ndarray.py b/python/mxnet/ndarray/ndarray.py index 206190b57c5f..7b570b111d68 100644 --- a/python/mxnet/ndarray/ndarray.py +++ b/python/mxnet/ndarray/ndarray.py @@ -158,9 +158,9 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): size = 1 for idx in shape: size = size * idx - if size > 2**32: - raise Exception("[Python] Size of tensor you are trying to allocate is larger than 2^32 elements. " + - "Please build with flag USE_INT64_TENSOR_SIZE=1") + if size > 2**31: + raise Exception("[_new_alloc_handle] Size of tensor you are trying to allocate is larger than 2^31 elements. " + + "Please build with flag USE_INT64_TENSOR_SIZE=1") check_call(_LIB.MXNDArrayCreateEx( c_array_buf(mx_uint, native_array('I', shape)), mx_uint(len(shape)), diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index abde87d4b8ae..5b3ed556e07b 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -199,9 +199,9 @@ void CreateNDArray(const DataType* shape, NDArrayHandle* out) { mxnet::TShape requested_shape = mxnet::TShape(shape, shape + ndim); if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(requested_shape.Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + CHECK_LT(requested_shape.Size(), (int64_t{1} << 31) - 1) << + "[CreateNDArray] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } *out = new NDArray(requested_shape, Context::Create(static_cast(dev_type), dev_id), @@ -591,6 +591,11 @@ inline void GetShape(NDArrayHandle handle, const dtype** out_pdata, int* out_dim MXAPIThreadLocalEntry* ret) { NDArray* arr = static_cast(handle); if (!arr->is_none()) { + if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { + CHECK_LT(arr->shape().Size(), (int64_t{1} << 31) - 1) << + "Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + } mxnet::TShape s = arr->shape(); if (!Imperative::Get()->is_np_shape()) { common::ConvertToLegacyShape(&s); @@ -616,9 +621,6 @@ int MXNDArrayGetShapeEx(NDArrayHandle handle, const int **out_pdata) { MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get(); API_BEGIN(); - CHECK_LT(static_cast(handle)->shape().Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; GetShape(handle, out_pdata, out_dim, ret); API_END(); } diff --git a/src/c_api/c_api_ndarray.cc b/src/c_api/c_api_ndarray.cc index ae58a1c871f0..6bfb3b35743d 100644 --- a/src/c_api/c_api_ndarray.cc +++ b/src/c_api/c_api_ndarray.cc @@ -56,9 +56,9 @@ void SetNDInputsOutputs(const nnvm::Op* op, for (int i = 0; i < num_inputs; ++i) { NDArray* inp = reinterpret_cast(inputs[i]); if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(inp->shape().Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + CHECK_LT(inp->shape().Size(), (int64_t{1} << 31) - 1) << + "[SetNDInputsOutputs] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } ndinputs->emplace_back(inp); } diff --git a/src/ndarray/ndarray.cc b/src/ndarray/ndarray.cc index 8e11b20e5fa4..14311a8795dc 100644 --- a/src/ndarray/ndarray.cc +++ b/src/ndarray/ndarray.cc @@ -142,6 +142,11 @@ void NDArray::Chunk::CheckAndAllocData(const mxnet::TShape &shape, int dtype) { CHECK_NE(aux_shapes.size(), 0) << "data is expected to be allocated after aux_data"; auto dbytes = shape.Size() * mshadow::mshadow_sizeof(dtype); + if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { + CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) << + "[CheckAndAllocData] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + } if (shandle.size < dbytes) { // free storage Storage::Get()->Free(shandle); @@ -1883,9 +1888,9 @@ NDArray NDArray::Copy(Context ctx) const { void NDArray::SyncCopyFromCPU(const void *data, size_t size) const { mxnet::TShape dshape = this->shape(); if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(size, (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + CHECK_LT(size, (int64_t{1} << 31) - 1) << + "[SyncCopyFromCPU] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } CHECK_EQ(dshape.Size(), size) << "Memory size do not match"; @@ -2023,9 +2028,9 @@ void NDArray::SyncCopyFromNDArray(const NDArray& src, int i, int j) { void NDArray::SyncCopyToCPU(void *data, size_t size) const { mxnet::TShape dshape = this->shape(); if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(size, (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + CHECK_LT(size, (int64_t{1} << 31) - 1) << + "[SyncCopyToCPU] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } CHECK_EQ(dshape.Size(), size) << "Memory size do not match"; diff --git a/src/ndarray/ndarray_function.cc b/src/ndarray/ndarray_function.cc index 34429446bd62..ed121899436a 100644 --- a/src/ndarray/ndarray_function.cc +++ b/src/ndarray/ndarray_function.cc @@ -38,6 +38,11 @@ void Copy(const TBlob &from, TBlob *to, RunContext ctx) { MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { + if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { + CHECK_LT(from.Size(), (int64_t{1} << 31) - 1) << + "Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + } const index_t size = static_cast(from.Size()); CHECK_EQ(size, to->Size()) << "copying size mismatch, from: " << size * sizeof(DType) << " bytes, to: " << to->Size() * sizeof(DType) << " bytes."; diff --git a/src/operator/elemwise_op_common.h b/src/operator/elemwise_op_common.h index 7094e657d066..dc83a4b1f87f 100644 --- a/src/operator/elemwise_op_common.h +++ b/src/operator/elemwise_op_common.h @@ -164,13 +164,6 @@ inline bool ElemwiseShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *out_attrs) { if (n_in != -1) { CHECK_EQ(in_attrs->size(), static_cast(n_in)) << " in operator " << attrs.name; - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - for(mxnet::TShape shape : *in_attrs){ - CHECK_LT(shape.Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } - } } if (n_out != -1) { CHECK_EQ(out_attrs->size(), static_cast(n_out)) << " in operator " << attrs.name; diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h index 4ea3e2f7a0d7..ac7be8366155 100644 --- a/src/operator/tensor/init_op.h +++ b/src/operator/tensor/init_op.h @@ -272,15 +272,22 @@ inline bool InitShape(const nnvm::NodeAttrs& attrs, CHECK_EQ(in_attrs->size(), 0U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape param_shape = param.shape; - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(param_shape.Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + if (shape_is_known(param_shape) && !features::is_enabled(features::INT64_TENSOR_SIZE)) { + CHECK_LT(param_shape.Size(), (int64_t{1} << 31) - 1) << + "[InitShape-input] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } if (!Imperative::Get()->is_np_shape()) { common::ConvertToNumpyShape(¶m_shape); } - if (shape_is_known((*out_attrs)[0]) && !shape_is_known(param_shape)) return true; + if (shape_is_known((*out_attrs)[0]) && !shape_is_known(param_shape)) { + if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { + CHECK_LT(out_attrs->at(0).Size() , (int64_t{1} << 31) - 1) << + "[InitShape-output] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + } + return true; + } SHAPE_ASSIGN_CHECK(*out_attrs, 0, param_shape); return shape_is_known(out_attrs->at(0)); } @@ -342,9 +349,9 @@ template void Fill(mshadow::Stream *s, const TBlob& b, const OpReqType req, ValueType val) { // If b is a zero-size tensor, do nothing. if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(b.Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + CHECK_LT(b.Size(), (int64_t{1} << 31) - 1) << + "[Fill] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } if (b.Size() == 0) return; if (req != kNullOp) { @@ -592,9 +599,9 @@ inline bool RangeShape(const nnvm::NodeAttrs& attrs, * param.repeat; mxnet::TShape output_shape = mxnet::TShape({static_cast(out_size)}); if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(output_shape.Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; + CHECK_LT(output_shape.Size(), (int64_t{1} << 31) - 1) << + "[RangeShape] Size of tensor you are trying to allocate is larger than " + "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, output_shape); return true; @@ -640,8 +647,8 @@ inline bool LinspaceShape(const nnvm::NodeAttrs& attrs, << "Number of sequence should be non-negative, received " << param.num; mxnet::TShape shape = mxnet::TShape({static_cast(param.num)}); if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(shape.Size(), (int64_t{1} << 32) - 1) << - "Size of tensor you are trying to allocate is larger than " + CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) << + "[LinspaceShape] Size of tensor you are trying to allocate is larger than " "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);