25 #ifndef MXNET_NDARRAY_H_ 26 #define MXNET_NDARRAY_H_ 28 #include <dmlc/base.h> 29 #include <dmlc/logging.h> 31 #include <dmlc/type_traits.h> 32 #include <dmlc/registry.h> 33 #include <nnvm/node.h> 40 #if MXNET_USE_MKLDNN == 1 47 #if DMLC_USE_CXX11 == 0 48 #error "cxx11 was required for ndarray module" 95 bool delay_alloc =
false,
int dtype = mshadow::default_type_flag)
96 : ptr_(std::make_shared<Chunk>(shape, ctx, delay_alloc, dtype)),
98 entry_({
nullptr, 0, 0}) {
103 bool delay_alloc =
true,
int dtype = mshadow::default_type_flag,
104 std::vector<int> aux_types = {}, std::vector<TShape> aux_shapes = {},
113 ptr_ = std::make_shared<Chunk>(
TShape(mshadow::Shape1(0)), ctx,
true, dtype);
116 entry_ = {
nullptr, 0, 0};
126 : ptr_(std::make_shared<Chunk>(data, dev_id)), shape_(data.shape_),
128 entry_({
nullptr, 0, 0}) {
139 NDArray(
const TBlob &data,
int dev_id,
const std::function<
void()>& deleter)
140 : ptr_(new Chunk(data, dev_id),
141 [deleter](Chunk *p) {
147 entry_({
nullptr, 0, 0}) {
152 : ptr_(std::make_shared<Chunk>(shared_pid, shared_id, shape, dtype)), shape_(shape),
153 dtype_(dtype), storage_type_(
kDefaultStorage), entry_({
nullptr, 0, 0}) {
167 const TBlob &data,
const std::vector<TBlob> &aux_data,
int dev_id)
168 : ptr_(std::make_shared<Chunk>(stype, data, aux_data, dev_id)), shape_(shape),
169 dtype_(data.type_flag_), storage_type_(stype), entry_({
nullptr, 0, 0}) {
176 ptr_->Init(shape, this->dtype_);
177 this->shape_ = shape;
183 shape_ = ptr_->storage_shape;
199 return byte_offset_ > 0 || shape() != ptr_->storage_shape;
204 return ptr_ == other.ptr_ &&
205 shape_ == other.shape_ &&
206 byte_offset_ == other.byte_offset_ &&
207 dtype_ == other.dtype_;
222 CHECK(ptr_ !=
nullptr);
224 <<
"storage_shape() is not intended for kDefaultStorage.";
225 return ptr_->storage_shape;
235 <<
"aux_shape() is not intended for kDefaultStorage.";
236 return ptr_->aux_shapes[index];
242 <<
"aux_shapes() is not intended for kDefaultStorage.";
243 return ptr_->aux_shapes;
249 <<
"aux_types() is not intended for kDefaultStorage.";
250 return ptr_->aux_types;
262 <<
"set_aux_shape() is not intended for kDefaultStorage.";
263 ptr_->set_aux_shape(index, shape);
283 auto stype = storage_type();
285 auto shape = aux_shape(i);
286 auto type = aux_type(i);
287 MSHADOW_TYPE_SWITCH(type, DType, {
288 auto dptr =
static_cast<DType*
>(ptr_->aux_handles[i].dptr);
290 <<
"Unexpected storage type: " << stype;
291 res =
TBlob(dptr, shape, ptr_->aux_handles[i].ctx.dev_mask(), type);
300 return ptr_->shandle.ctx;
310 return ptr_->aux_types[i];
314 return storage_type_;
318 return ptr_.get() ==
nullptr;
321 bool fresh_out_grad()
const;
323 void set_fresh_out_grad(
bool state)
const;
329 if (is_none())
return false;
330 auto stype = storage_type();
332 <<
"storage_initialized() is not intended for kDefaultStorage.";
335 <<
"inconsistent storage shape " << storage_shape()
339 CHECK_EQ(aux_shape(
csr::kIdx)[0], storage_shape()[0])
340 <<
"inconsistent storage shape " << storage_shape()
341 <<
" vs. aux shape " << aux_shape(
csr::kIdx);
344 LOG(FATAL) <<
"Unknown storage type";
353 return ptr_->shandle;
360 if (is_none())
return;
368 if (is_none())
return;
376 },
Context{}, {}, {ptr_->var});
389 return var()->version();
395 void Save(dmlc::Stream *strm)
const;
401 bool LegacyLoad(dmlc::Stream *strm,
const uint32_t magic);
407 bool Load(dmlc::Stream *strm);
486 void SyncCopyFromCPU(
const void *data,
size_t size)
const;
491 void SyncCopyFromNDArray(
const NDArray &src,
int i = -1,
int j = -1);
503 void SyncCopyToCPU(
void *data,
size_t size)
const;
509 void SyncCheckFormat(
const bool full_check)
const;
540 NDArray aux_ndarray(
size_t i)
const;
557 <<
"AsArray is intended only for kDefaultStorage.";
558 CHECK_GE(ptr_->shandle.size,
559 shape.Size() * mshadow::mshadow_sizeof(dtype))
560 <<
"NDArray.AsArray: target memory size is bigger";
575 DLManagedTensor* ToDLPack()
const;
588 static NDArray FromDLPack(
const DLManagedTensor* tensor);
598 CHECK(shape_ == arr.shape_) <<
"ndarray shape is different from the target";
599 CHECK(dtype_ == arr.dtype_) <<
"ndarray dtype is different from the target";
602 <<
"Only to be used with CSR and RSP storage types";
605 arr.ptr_->shandle = ptr_->shandle;
606 ptr_->shandle = shandle_dst;
608 ptr_->storage_shape = arr.ptr_->storage_shape;
609 ptr_->storage_type = arr.ptr_->storage_type;
610 ptr_->ctx = arr.ptr_->ctx;
614 CHECK(ptr_->aux_handles.size() == arr.ptr_->aux_handles.size())
615 <<
"ndarray number of aux_handles is different from target";
616 for (
auto &aux_handle : arr.ptr_->aux_handles) {
618 ptr_->aux_handles[aux_idx] = aux_handle;
619 aux_handle = aux_dst;
622 ptr_->aux_types = arr.ptr_->aux_types;
623 ptr_->aux_shapes = arr.ptr_->aux_shapes;
643 ret.entry_ = nnvm::NodeEntry{
nullptr, 0, 0};
647 nnvm::Symbol get_autograd_symbol()
const;
654 ptr_->CheckAndAlloc();
670 ptr_->CheckAndAlloc(shape.Size() * mshadow::mshadow_sizeof(dtype_));
679 <<
"CheckAndAlloc(aux_shapes) is not intended for kDefaultStorage";
680 ptr_->CheckAndAlloc(shape_, aux_shapes, dtype_);
684 <<
"CheckAndAllocData is not intended for kDefaultStorage";
685 ptr_->CheckAndAllocData(storage_shape, dtype_);
689 <<
"CheckAndAllocAuxData is not intended for kDefaultStorage";
690 ptr_->CheckAndAllocAuxData(i, aux_shape);
693 #if MXNET_USE_MKLDNN == 1 698 explicit NDArray(
const std::shared_ptr<mkldnn::memory> &mkldnn_mem);
703 explicit NDArray(mkldnn::memory::primitive_desc mem_pd);
707 bool IsMKLDNNData()
const {
708 return ptr_->IsMKLDNN();
713 bool IsDefaultData()
const {
714 return ptr_->IsDefault();
726 const mkldnn::memory *GetMKLDNNData()
const;
731 const mkldnn::memory *GetMKLDNNData(
732 const mkldnn::memory::primitive_desc &desc)
const;
738 const mkldnn::memory *GetMKLDNNDataReorder(
739 const mkldnn::memory::primitive_desc &desc)
const;
744 void CopyFrom(
const mkldnn::memory &mem);
749 mkldnn::memory *CreateMKLDNNData(
750 const mkldnn::memory::primitive_desc &desc);
757 void Reorder2DefaultAsync();
758 void MKLDNNDataReorderAsync(
const mkldnn::memory::primitive_desc &desc);
764 NDArray Reorder2Default()
const;
766 void InvalidateMKLDNNData();
783 void UpdateMKLDNNMemDesc(mkldnn::memory::format format);
792 static void Save(dmlc::Stream* fo,
793 const std::vector<NDArray>& data,
794 const std::vector<std::string>& names);
801 static void Load(dmlc::Stream* fi,
802 std::vector<NDArray>* data,
803 std::vector<std::string>* keys);
819 std::vector<Storage::Handle> aux_handles;
821 #if MXNET_USE_MKLDNN == 1 824 std::shared_ptr<MKLDNNMemory> mkl_mem_;
841 std::vector<int> aux_types;
850 std::vector<TShape> aux_shapes;
853 Chunk() : static_data(true), delay_alloc(false) {}
856 Chunk(
TShape shape,
Context ctx_,
bool delay_alloc_,
int dtype)
857 : static_data(false), delay_alloc(true), ctx(ctx_) {
858 auto size = shape.Size();
859 storage_shape = shape;
861 shandle.
size = size * mshadow::mshadow_sizeof(dtype);
863 if (!delay_alloc_) this->CheckAndAlloc();
866 Chunk(
const TBlob &data,
int dev_id)
867 : static_data(true), delay_alloc(false) {
870 if (data.
dev_mask() == cpu::kDevMask) {
873 CHECK_EQ(data.
dev_mask(), gpu::kDevMask);
880 storage_shape = data.
shape_;
883 Chunk(
int shared_pid,
int shared_id,
const TShape& shape,
int dtype)
884 : static_data(false), delay_alloc(false) {
887 shandle.
size = shape.Size() * mshadow::mshadow_sizeof(dtype);
892 storage_shape = shape;
896 bool delay_alloc_,
int dtype,
const std::vector<int> &aux_types_,
897 const std::vector<TShape> &aux_shapes_)
898 : static_data(false), delay_alloc(delay_alloc_), storage_type(storage_type_),
899 aux_types(aux_types_), ctx(ctx_), storage_shape(storage_shape_),
900 aux_shapes(aux_shapes_) {
904 for (
size_t i = 0; i < aux_shapes.size(); i++) {
905 CheckAndAllocAuxData(i, aux_shapes[i]);
908 aux_handles[i].ctx = ctx;
911 CheckAndAllocData(storage_shape, dtype);
916 const std::vector<TBlob> &aux_data,
int dev_id)
917 : static_data(true), delay_alloc(false), storage_type(storage_type_) {
918 using namespace mshadow;
923 if (data.
dev_mask() == cpu::kDevMask) {
926 CHECK_EQ(data.
dev_mask(), gpu::kDevMask);
933 storage_shape = data.
shape_;
935 for (
const auto &aux : aux_data) {
937 aux_handle.
ctx = ctx;
938 aux_handle.
dptr = aux.dptr_;
939 aux_handle.
size = aux.shape_.Size() * mshadow_sizeof(aux.type_flag_);
940 aux_handles.push_back(aux_handle);
941 aux_types.emplace_back(aux.type_flag_);
942 aux_shapes.emplace_back(aux.shape_);
947 inline void set_aux_shape(
const size_t i,
const TShape& shape) {
948 aux_shapes[i] = shape;
949 if (storage_shape.ndim() > 0) {
951 storage_shape[0] = shape[0];
953 storage_shape[0] = shape[0];
959 inline void CheckAndAlloc(
void) {
962 #if MXNET_USE_MKLDNN == 1 971 void CheckAndAlloc(uint64_t dbytes) {
973 <<
"CheckAndAlloc(dbytes) is only intended for kDefaultStorage";
974 dbytes =
std::max(dbytes, static_cast<uint64_t>(shandle.
size));
977 #if MXNET_USE_MKLDNN == 1 981 }
else if (shandle.
size < dbytes) {
986 #if MXNET_USE_MKLDNN == 1 992 void Init(
const TShape &shape,
int dtype) {
993 auto size = shape.Size();
994 storage_shape = shape;
995 shandle.
size = size * mshadow::mshadow_sizeof(dtype);
996 this->CheckAndAlloc();
998 inline void CheckAndAlloc(
const TShape &shape,
const std::vector<TShape> &aux_shapes,
1005 TShape storage_shape(shape);
1006 storage_shape[0] = aux_shape[0];
1007 CheckAndAllocData(storage_shape, dtype);
1011 CheckAndAllocData(aux_shapes[
csr::kIdx], dtype);
1013 LOG(FATAL) <<
"Storage type " << storage_type <<
" not implemented for CheckAndAlloc";
1020 void CheckAndAllocData(
const TShape &shape,
int dtype);
1022 #if MXNET_USE_MKLDNN == 1 1025 void SetMKLMem(
const TShape &shape,
int dtype);
1028 void Reorder2Default();
1030 void MKLDNNDataReorder(
const mkldnn::memory::primitive_desc &desc);
1031 bool IsMKLDNN()
const;
1032 bool IsDefault()
const;
1040 inline void CheckAndAllocAuxData(
size_t i,
const TShape &shape) {
1041 CHECK_EQ(shape.ndim(), 1) <<
"shape must be 1D in CheckAndAllocAuxData";
1043 <<
"storage type cannot be kUndefinedStorage in CheckAndAllocAuxData";
1045 <<
"storage type cannot be kDefaultStorage in CheckAndAllocAuxData";
1046 if (aux_handles.size() <= i) {
1047 aux_handles.resize(i + 1);
1049 size_t aux_bytes = shape.Size() * mshadow::mshadow_sizeof(aux_types[i]);
1050 if (aux_handles[i].size < aux_bytes) {
1057 set_aux_shape(i, shape);
1063 void SetTBlob()
const;
1066 std::shared_ptr<Chunk> ptr_{
nullptr};
1070 size_t byte_offset_ = 0;
1074 bool reuse_ =
false;
1078 nnvm::NodeEntry entry_;
1086 mutable TBlob tblob_;
1251 typedef std::function<void (
NDArray **used_vars,
1275 :
public dmlc::FunctionRegEntryBase<NDArrayFunctionReg,
1276 NDArrayAPIFunction> {
1302 int num_params,
char **param_keys,
char **param_vals) {
1303 (*fsetvalue)(s[0], mutate_vars[0]);
1305 num_mutate_vars = 1; num_scalars = 1;
1306 this->add_argument(
"src",
"real_t",
"Source input to the function.");
1319 body = [fternary](
NDArray **used_vars,
1321 int num_params,
char **param_keys,
char **param_vals) {
1322 (*fternary)(*used_vars[0], *used_vars[1], *used_vars[2], mutate_vars[0]);
1324 num_use_vars = 3; num_mutate_vars = 1;
1326 this->add_argument(
"lhs",
"NDArray",
"Left operand to the function.");
1327 this->add_argument(
"mhs",
"NDArray",
"Middle operand to the function.");
1328 this->add_argument(
"rhs",
"NDArray",
"Right operand to the function.");
1341 int num_params,
char **param_keys,
char **param_vals) {
1342 (*fbinary)(*used_vars[0], *used_vars[1], mutate_vars[0]);
1344 num_use_vars = 2; num_mutate_vars = 1;
1346 this->add_argument(
"lhs",
"NDArray",
"Left operand to the function.");
1347 this->add_argument(
"rhs",
"NDArray",
"Right operand to the function.");
1360 int num_params,
char **param_keys,
char **param_vals) {
1361 (*fscalar)(*used_vars[0], s[0], mutate_vars[0]);
1363 num_use_vars = 1; num_mutate_vars = 1; num_scalars = 1;
1365 this->add_argument(
"lhs",
"NDArray",
"Left operand to the function.");
1366 this->add_argument(
"rhs",
"real_t",
"Right operand to the function.");
1378 int num_params,
char **param_keys,
char **param_vals) {
1379 (*funary)(*used_vars[0], mutate_vars[0]);
1381 num_use_vars = 1; num_mutate_vars = 1;
1383 this->add_argument(
"src",
"NDArray",
"Source input to the function.");
1393 void (*fgeneric)(
NDArray **used_vars,
1396 const std::map<std::string, std::string>& param)) {
1398 int num_params,
char **param_keys,
char **param_vals) {
1399 std::map<std::string, std::string> param;
1400 for (
int i = 0; i < num_params; ++i) {
1401 param[param_keys[i]] = param_vals[i];
1403 fgeneric(used_vars, s, mutate_vars, param);
1413 num_use_vars = n;
return *
this;
1421 num_mutate_vars = n;
return *
this;
1429 num_scalars = n;
return *
this;
1437 type_mask = tmask;
return *
this;
1452 #define MXNET_REGISTER_NDARRAY_FUN(name) \ 1453 DMLC_REGISTRY_REGISTER(::mxnet::NDArrayFunctionReg, NDArrayFunctionReg, name) 1461 #endif // MXNET_NDARRAY_H_
NDArrayStorageType
Definition: ndarray.h:61
NDArrayFunctionReg & set_num_mutate_vars(unsigned n)
set the number of mutate variables
Definition: ndarray.h:1420
NDArrayFormatErr
Definition: ndarray.h:68
Engine::VarHandle var() const
Definition: ndarray.h:380
void RandomSeed(uint32_t seed)
Seed all random number generator in mxnet.
NDArrayStorageType storage_type() const
Definition: ndarray.h:313
Engine that schedules all the operations according to dependency.
TShape shape_
shape of the tensor
Definition: tensor_blob.h:72
const TShape & storage_shape() const
Definition: ndarray.h:221
NDArrayFunctionReg()
constructor
Definition: ndarray.h:1288
namespace of mxnet
Definition: base.h:118
void ReshapeAndAlloc(const TShape &shape)
Allocate the space if the allocation has been delayed or the requested size is bigger than the availa...
Definition: ndarray.h:666
NDArray operator*(const NDArray &lhs, const NDArray &rhs)
elementwise multiplication
virtual void Free(Handle handle)=0
Free storage.
NDArrayFunctionReg & set_num_use_vars(unsigned n)
set the number of mutate variables
Definition: ndarray.h:1412
DMLC_DECLARE_TRAITS(has_saveload, mxnet::NDArray, true)
traits
mshadow::default_real_t real_t
data type that will be used to store ndarray
Definition: base.h:126
static Context GPU(int32_t dev_id=-1)
int type_mask
information on how function should be called from API
Definition: ndarray.h:1284
NDArrayFunctionReg & set_function(void(*funary)(const NDArray &src, NDArray *out))
set the function body to a unary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1375
NDArray Detach() const
Return a copy of this NDArray without autograd history.
Definition: ndarray.h:641
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:74
NDArrayFunctionReg & set_num_scalars(unsigned n)
set the number of scalar arguments
Definition: ndarray.h:1428
nnvm::TShape TShape
Shape data structure used to record shape information.
Definition: base.h:128
unsigned num_mutate_vars
number of variable mutated by this function
Definition: ndarray.h:1280
execution time context. The information needed in runtime for actual execution.
Definition: base.h:257
void * dptr
Pointer to the data.
Definition: storage.h:45
NDArrayFunctionReg & set_function(void(*fscalar)(const NDArray &lhs, const real_t &rhs, NDArray *out))
set the function body to a binary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1356
base class of engine variables.
Definition: engine.h:44
Context ctx
Context information about device and ID.
Definition: storage.h:53
Storage::Handle storage_handle() const
get storage handle
Definition: ndarray.h:349
NDArray()
default constructor
Definition: ndarray.h:85
unsigned num_use_vars
number of variable used by this function
Definition: ndarray.h:1278
int shared_id
Definition: storage.h:58
NDArrayFunctionReg & set_function(void(*fternary)(const NDArray &lhs, const NDArray &mhs, const NDArray &rhs, NDArray *out))
set the function body to a ternary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1315
Symbol max(const std::string &symbol_name, Symbol data, dmlc::optional< Shape > axis=dmlc::optional< Shape >(), bool keepdims=false, bool exclude=false)
Definition: op.h:2756
RowSparseAuxType
Definition: ndarray.h:58
bool is_none() const
Definition: ndarray.h:317
all the scalar should go before use_vars
Definition: ndarray.h:1262
void SampleExponential(real_t lambda, NDArray *out)
Sample exponential distribution for each elements of out.
void SparseUpdateChunk(const NDArray &arr) const
Update ndarray chunk storage handles using existing ndarray storage handles Also update the aux_handl...
Definition: ndarray.h:597
void * dptr_
pointer to the data
Definition: tensor_blob.h:70
virtual VarHandle NewVariable()=0
Allocate a new variable, the variable can then be used to schedule the operation concurrently via dep...
whether this function allows the handles in the target to be empty NDArray that are not yet initializ...
Definition: ndarray.h:1271
const TShape & shape() const
Definition: ndarray.h:213
Definition: ndarray.h:1457
virtual void WaitForVar(VarHandle var)=0
Wait for a variable.
const std::vector< TShape > & aux_shapes() const
Definition: ndarray.h:240
bool IsView() const
Definition: ndarray.h:191
Context ctx() const
Definition: ndarray.h:298
void CopyFromTo(const NDArray &from, const NDArray *to, int priority=0)
issue an copy operation from one NDArray to another the two ndarray can sit on different devices this...
CSRAuxType
Definition: ndarray.h:54
void SampleGaussian(real_t mu, real_t sigma, NDArray *out)
Sample gaussian distribution for each elements of out.
Storage manager across multiple devices.
void WaitToRead() const
Block until all the pending write operations with respect to current NDArray are finished, and read can be performed.
Definition: ndarray.h:359
virtual void PushAsync(AsyncFn exec_fun, Context exec_ctx, std::vector< VarHandle > const &const_vars, std::vector< VarHandle > const &mutable_vars, FnProperty prop=FnProperty::kNormal, int priority=0, const char *opr_name=nullptr, bool wait=false)=0
Push an asynchronous operation to the engine.
int dtype() const
Definition: ndarray.h:305
bool storage_initialized() const
Returns true if a sparse ndarray's aux_data and storage are initialized Throws an exception if the in...
Definition: ndarray.h:328
Storage handle.
Definition: storage.h:41
static Context CPUShared(int32_t dev_id=0)
void set_aux_shape(size_t index, const TShape &shape) const
For a sparse operation on a csr matrix for example, the size of the column index array is an estimate...
Definition: ndarray.h:260
void CheckAndAllocData(const TShape &storage_shape) const
Definition: ndarray.h:682
size_t num_aux_data(NDArrayStorageType stype)
NDArrayFunctionReg & set_type_mask(int tmask)
set type mask
Definition: ndarray.h:1436
void WaitToWrite() const
Block until all the pending read/write operations with respect to current NDArray are finished...
Definition: ndarray.h:367
NDArray(const TBlob &data, int dev_id, const std::function< void()> &deleter)
constructing a static NDArray that shares data with TBlob which is with deleter Use with caution: all...
Definition: ndarray.h:139
Handle Alloc(size_t size, Context ctx)
Allocate a new contiguous memory for a given size.
Definition: storage.h:66
NDArray operator-(const NDArray &lhs, const NDArray &rhs)
elementwise subtraction
NDArrayFunctionReg & set_function(void(*fsetvalue)(const real_t &rhs, NDArray *out))
set the function body to a NDArray setvalue function this will also auto set the parameters correctly...
Definition: ndarray.h:1299
void Init(const TShape &shape)
initialize the NDArray, assuming it is not assigned a meaningful shape before
Definition: ndarray.h:175
NDArray(int shared_pid, int shared_id, const TShape &shape, int dtype)
create ndarray from shared memory
Definition: ndarray.h:151
NDArray operator+(const NDArray &lhs, const NDArray &rhs)
elementwise add
size_t byte_offset() const
Definition: ndarray.h:384
void SampleUniform(real_t begin, real_t end, NDArray *out)
Sample uniform distribution for each elements of out.
Registry entry for NDArrayFunction.
Definition: ndarray.h:1274
NDArrayFunctionReg & set_function(void(*fbinary)(const NDArray &lhs, const NDArray &rhs, NDArray *out))
set the function body to a binary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1337
static Context CPU(int32_t dev_id=0)
runtime functions for NDArray
Definition: imperative.h:39
int aux_type(size_t i) const
Definition: ndarray.h:308
OnComplete Callback to the engine, called by AsyncFn when action completes.
Definition: engine.h:73
all the use_vars should go before scalar
Definition: ndarray.h:1260
NDArray AsArray(const TShape &shape, int dtype) const
Create a NDArray that shares memory with current one The new array must have smaller memory size than...
Definition: ndarray.h:555
size_t version() const
return var version of the NDArray
Definition: ndarray.h:388
void CheckAndAlloc(const std::vector< TShape > &aux_shapes) const
Definition: ndarray.h:677
unsigned num_scalars
number of scalars used by this function
Definition: ndarray.h:1282
const TBlob & data() const
Definition: ndarray.h:269
void CheckAndAllocAuxData(size_t i, const TShape &aux_shape) const
Definition: ndarray.h:687
NDArray(const NDArrayStorageType stype, const TShape &shape, const TBlob &data, const std::vector< TBlob > &aux_data, int dev_id)
constructing a static NDArray of non-default storage that shares data with TBlob Use with caution: al...
Definition: ndarray.h:166
void CheckAndAlloc() const
Allocate the space if it is delayed allocated. This is an internal function used by system that norma...
Definition: ndarray.h:652
mshadow::index_t index_t
index type usually use unsigned
Definition: base.h:124
size_t size
Size of the storage.
Definition: storage.h:49
void SetShapeFromChunk()
set the correct shape of NDArray directly from the storage_shape of its own chunk.
Definition: ndarray.h:182
TBlob aux_data(size_t i) const
Definition: ndarray.h:282
void SampleGenNegBinomial(real_t mu, real_t alpha, NDArray *out)
Sample generalized negative binomial distribution for each elements of out.
Context information about the execution environment.
Definition: base.h:133
void SamplePoisson(real_t lambda, NDArray *out)
Sample Poisson distribution for each elements of out.
const TShape & aux_shape(size_t index) const
get the shape of aux_data(index)
Definition: ndarray.h:233
ndarray interface
Definition: ndarray.h:82
NDArray(Context ctx, int dtype=mshadow::default_type_flag)
constructs a new dynamic NDArray whose shape is unknown, hence the NDArray is inherently lazily creat...
Definition: ndarray.h:112
NDArray(const TBlob &data, int dev_id)
constructing a static NDArray that shares data with TBlob Use with caution: allocate ONLY ONE NDArray...
Definition: ndarray.h:125
int dev_mask() const
device mask of the corresponding device
Definition: tensor_blob.h:242
Symbol Reshape(const std::string &symbol_name, Symbol data, Shape shape=Shape(), bool reverse=false, Shape target_shape=Shape(), bool keep_highest=false)
Definition: op.h:302
void ElementwiseSum(const std::vector< NDArray > &source, NDArray *out, int priority=0)
Perform elementwise sum over each data from source, store result into out.
std::function< void(NDArray **used_vars, real_t *scalars, NDArray **mutate_vars, int num_params, char **param_keys, char **param_vals)> NDArrayAPIFunction
definition of NDArray function
Definition: ndarray.h:1256
void SampleNegBinomial(int32_t k, real_t p, NDArray *out)
Sample negative binomial distribution for each elements of out.
NDArrayFunctionReg & set_function(void(*fgeneric)(NDArray **used_vars, real_t *s, NDArray **mutate_vars, const std::map< std::string, std::string > ¶m))
set the function body to a unary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1392
bool IsSame(const NDArray &other) const
Definition: ndarray.h:203
int shared_pid
Id for IPC shared memory.
Definition: storage.h:57
tensor blob class that can be used to hold tensor of any dimension, any device and any data type...
Definition: tensor_blob.h:66
const std::vector< int > & aux_types() const
Definition: ndarray.h:247
void SampleGamma(real_t alpha, real_t beta, NDArray *out)
Sample gamma distribution for each elements of out.
NDArray(const TShape &shape, Context ctx, bool delay_alloc=false, int dtype=mshadow::default_type_flag)
constructs a new dynamic NDArray
Definition: ndarray.h:94
NDArray operator/(const NDArray &lhs, const NDArray &rhs)
elementwise division
NDArrayFunctionTypeMask
mask information on how functions can be exposed
Definition: ndarray.h:1258