25 #ifndef MXNET_NDARRAY_H_    26 #define MXNET_NDARRAY_H_    28 #include <dmlc/base.h>    29 #include <dmlc/logging.h>    31 #include <dmlc/type_traits.h>    32 #include <dmlc/registry.h>    33 #include <nnvm/node.h>    41 #if MKL_EXPERIMENTAL == 1    42 #include <mkl_memory.h>    45 #if DMLC_USE_CXX11 == 0    46 #error "cxx11 was required for ndarray module"    83 #if MKL_EXPERIMENTAL == 1    84     Mkl_mem_ = MKLMemHolder::create();
    95           bool delay_alloc = 
false, 
int dtype = mshadow::default_type_flag)
    96       : ptr_(std::make_shared<Chunk>(shape, ctx, delay_alloc, dtype)),
    98         entry_({
nullptr, 0, 0}) {
    99 #if MKL_EXPERIMENTAL == 1   100     Mkl_mem_ = std::make_shared<MKLMemHolder>();
   106           bool delay_alloc = 
true, 
int dtype = mshadow::default_type_flag,
   107           std::vector<int> aux_types = {}, std::vector<TShape> aux_shapes = {},
   109       : shape_(shape), dtype_(dtype), storage_type_(stype),
   110         entry_({
nullptr, 0, 0}) {
   112       if (aux_types.size() == 0) {
   114           aux_types = {mshadow::kInt64};
   116           aux_types = {mshadow::kInt64, mshadow::kInt64};
   118           LOG(FATAL) << 
"Unknown storage type " << stype;
   123       if (aux_shapes.size() == 0) {
   125           aux_shapes = {
TShape(mshadow::Shape1(0))};
   128           aux_shapes = {
TShape(mshadow::Shape1(0)), 
TShape(mshadow::Shape1(0))};
   130           LOG(FATAL) << 
"Unknown storage type " << stype;
   133       if (storage_shape.Size() == 0) {
   135           storage_shape = shape;
   140           LOG(FATAL) << 
"Unknown storage type " << stype;
   143       ptr_ = std::make_shared<Chunk>(stype, storage_shape, ctx, delay_alloc,
   144                                      dtype, aux_types, aux_shapes);
   145 #if MKL_EXPERIMENTAL == 1   146       Mkl_mem_ = std::make_shared<MKLMemHolder>();
   157       : ptr_(std::make_shared<Chunk>(data, dev_id)), shape_(data.shape_),
   159         entry_({
nullptr, 0, 0}) {
   160 #if MKL_EXPERIMENTAL == 1   161     Mkl_mem_ = std::make_shared<MKLMemHolder>();
   166       : ptr_(std::make_shared<Chunk>(shared_pid, shared_id, shape, dtype)), shape_(shape),
   167         dtype_(dtype), storage_type_(
kDefaultStorage), entry_({
nullptr, 0, 0}) {
   168 #if MKL_EXPERIMENTAL == 1   169     Mkl_mem_ = std::make_shared<MKLMemHolder>();
   184           const TBlob &data, 
const std::vector<TBlob> &aux_data, 
int dev_id)
   185       : ptr_(std::make_shared<Chunk>(stype, data, aux_data, dev_id)), shape_(shape),
   186         dtype_(data.type_flag_), storage_type_(stype), entry_({
nullptr, 0, 0}) {
   187 #if MKL_EXPERIMENTAL == 1   188     Mkl_mem_ = std::make_shared<MKLMemHolder>();
   205     CHECK(ptr_ != 
nullptr);
   207              << 
"storage_shape() is not intended for kDefaultStorage.";
   208     return ptr_->storage_shape;
   218              << 
"aux_shape() is not intended for kDefaultStorage.";
   219     return ptr_->aux_shapes[index];
   225              << 
"aux_shapes() is not intended for kDefaultStorage.";
   226     return ptr_->aux_shapes;
   232              << 
"aux_types() is not intended for kDefaultStorage.";
   233     return ptr_->aux_types;
   244     ptr_->set_aux_shape(index, shape);
   264     auto stype = storage_type();
   266     auto shape = aux_shape(i);
   267     auto type = aux_type(i);
   268     MSHADOW_TYPE_SWITCH(type, DType, {
   269       auto dptr = 
static_cast<DType*
>(ptr_->aux_handles[i].dptr);
   271             << 
"Unexpected storage type: " << stype;
   272       res = 
TBlob(dptr, shape, ptr_->aux_handles[i].ctx.dev_mask(), type);
   274 #if MKL_EXPERIMENTAL == 1   275     res.Mkl_mem_ = Mkl_mem_;
   284     return ptr_->shandle.ctx;
   294     return ptr_->aux_types[i];
   298     return storage_type_;
   302     return ptr_.get() == 
nullptr;
   305   bool fresh_out_grad() 
const;
   307   void set_fresh_out_grad(
bool state) 
const;
   313     if (is_none()) 
return false;
   314     auto stype = storage_type();
   316              << 
"storage_initialized() is not intended for kDefaultStorage.";
   319                << 
"inconsistent storage shape " << storage_shape()
   321       return aux_shape(0).Size() != 0;
   323       CHECK_EQ(aux_shape(
csr::kIdx)[0], storage_shape()[0])
   324                << 
"inconsistent storage shape " << storage_shape()
   325                << 
" vs. aux shape " << aux_shape(
csr::kIdx);
   326       return aux_shape(0).Size() != 0;
   328       LOG(FATAL) << 
"Unknown storage type";
   337     return ptr_->shandle;
   344     if (is_none()) 
return;
   352     if (is_none()) 
return;
   360       }, 
Context{}, {}, {ptr_->var});
   371   void Save(dmlc::Stream *strm) 
const;
   377   bool LegacyLoad(dmlc::Stream *strm, 
const uint32_t magic);
   383   bool Load(dmlc::Stream *strm);
   462   void SyncCopyFromCPU(
const void *data, 
size_t size) 
const;
   467   void SyncCopyFromNDArray(
const NDArray &src, 
int i = -1, 
int j = -1);
   479   void SyncCopyToCPU(
void *data, 
size_t size) 
const;
   485   void SyncCheckFormat(
const bool full_check) 
const;
   516   NDArray aux_ndarray(
size_t i) 
const;
   533              << 
"AsArray is intended only for kDefaultStorage.";
   534     CHECK_GE(ptr_->shandle.size,
   535              shape.Size() * mshadow::mshadow_sizeof(dtype))
   536         << 
"NDArray.AsArray: target memory size is bigger";
   537 #if MKL_EXPERIMENTAL == 1   538     if (Mkl_mem_ != 
nullptr) {
   540       Mkl_mem_->check_and_prv_to_cpu(ptr_->shandle.dptr);
   565     ret.entry_ = nnvm::NodeEntry{
nullptr, 0, 0};
   569   nnvm::Symbol get_autograd_symbol() 
const;
   576     ptr_->CheckAndAlloc();
   592     ptr_->CheckAndAlloc(shape.Size() * mshadow::mshadow_sizeof(dtype_));
   601              << 
"CheckAndAlloc(aux_shapes) is not intended for kDefaultStorage";
   602     ptr_->CheckAndAlloc(shape_, aux_shapes, dtype_);
   606              << 
"CheckAndAllocData is not intended for kDefaultStorage";
   607     ptr_->CheckAndAllocData(storage_shape, dtype_);
   611              << 
"CheckAndAllocAuxData is not intended for kDefaultStorage";
   612     ptr_->CheckAndAllocAuxData(i, aux_shape);
   620   static void Save(dmlc::Stream* fo,
   621                    const std::vector<NDArray>& data,
   622                    const std::vector<std::string>& names);
   629   static void Load(dmlc::Stream* fi,
   630                    std::vector<NDArray>* data,
   631                    std::vector<std::string>* keys);
   647     std::vector<Storage::Handle> aux_handles;
   663     std::vector<int> aux_types;
   672     std::vector<TShape> aux_shapes;
   675     Chunk() : static_data(true), delay_alloc(false) {}
   678     Chunk(
TShape shape, 
Context ctx_, 
bool delay_alloc_, 
int dtype)
   679         : static_data(false), delay_alloc(true), ctx(ctx_) {
   680       auto size = shape.Size();
   681       storage_shape = shape;
   683       shandle.
size = size * mshadow::mshadow_sizeof(dtype);
   685       if (!delay_alloc_) this->CheckAndAlloc();
   688     Chunk(
const TBlob &data, 
int dev_id)
   689         : static_data(true), delay_alloc(false) {
   692       if (data.
dev_mask() == cpu::kDevMask) {
   695         CHECK_EQ(data.
dev_mask(), gpu::kDevMask);
   702       storage_shape = data.
shape_;
   705     Chunk(
int shared_pid, 
int shared_id, 
const TShape& shape, 
int dtype)
   706         : static_data(false), delay_alloc(false) {
   709       shandle.
size = shape.Size() * mshadow::mshadow_sizeof(dtype);;
   714       storage_shape = shape;
   718           bool delay_alloc_, 
int dtype, 
const std::vector<int> &aux_types_,
   719           const std::vector<TShape> &aux_shapes_)
   720         : static_data(false), delay_alloc(delay_alloc_), storage_type(storage_type_),
   721           aux_types(aux_types_), ctx(ctx_), storage_shape(storage_shape_),
   722           aux_shapes(aux_shapes_) {
   726       for (
size_t i = 0; i < aux_shapes.size(); i++) {
   727         CheckAndAllocAuxData(i, aux_shapes[i]);
   730         aux_handles[i].ctx = ctx;
   733         CheckAndAllocData(storage_shape, dtype);
   738           const std::vector<TBlob> &aux_data, 
int dev_id)
   739         : static_data(true), delay_alloc(false), storage_type(storage_type_) {
   740       using namespace mshadow;
   745       if (data.
dev_mask() == cpu::kDevMask) {
   748         CHECK_EQ(data.
dev_mask(), gpu::kDevMask);
   755       storage_shape = data.
shape_;
   757       for (
const auto &aux : aux_data) {
   759         aux_handle.
ctx = ctx;
   760         aux_handle.
dptr = aux.dptr_;
   761         aux_handle.
size = aux.shape_.Size() * mshadow_sizeof(aux.type_flag_);
   762         aux_handles.push_back(aux_handle);
   763         aux_types.emplace_back(aux.type_flag_);
   764         aux_shapes.emplace_back(aux.shape_);
   769     inline void set_aux_shape(
const size_t i, 
const TShape& shape) {
   770       aux_shapes[i] = shape;
   771       if (storage_shape.ndim() > 0) {
   773           storage_shape[0] = shape[0];
   775           storage_shape[0] = shape[0];
   781     inline void CheckAndAlloc(
void) {
   790     void CheckAndAlloc(uint64_t dbytes) {
   792               << 
"CheckAndAlloc(dbytes) is not intended for kDefaultStorage";
   796       } 
else if (shandle.
size < dbytes) {
   804     inline void CheckAndAlloc(
const TShape &shape, 
const std::vector<TShape> &aux_shapes,
   811         TShape storage_shape(shape);
   812         storage_shape[0] = aux_shape[0];
   813         CheckAndAllocData(storage_shape, dtype);
   817         CheckAndAllocData(aux_shapes[
csr::kIdx], dtype);
   819         LOG(FATAL) << 
"Storage type " << storage_type << 
" not implemented for CheckAndAlloc";
   826     inline void CheckAndAllocData(
const TShape &shape, 
int dtype) {
   827       CHECK_NE(aux_shapes.size(), 0) << 
"data is expected to be allocated after aux_data";
   828       auto dbytes = shape.Size() * mshadow::mshadow_sizeof(dtype);
   829       if (shandle.
size < dbytes) {
   836       storage_shape = shape;
   845     inline void CheckAndAllocAuxData(
size_t i, 
const TShape &shape) {
   846       CHECK_EQ(shape.ndim(), 1) << 
"shape must be 1D in CheckAndAllocAuxData";
   848         << 
"storage type cannot be kUndefinedStorage in CheckAndAllocAuxData";
   850         << 
"storage type cannot be kDefaultStorage in CheckAndAllocAuxData";
   851       if (aux_handles.size() <= i) {
   852         aux_handles.resize(i + 1);
   854       size_t aux_bytes = shape.Size() * mshadow::mshadow_sizeof(aux_types[i]);
   855       if (aux_handles[i].size < aux_bytes) {
   862       set_aux_shape(i, shape);
   866       bool skip_free = static_data || delay_alloc;
   868       std::vector<Storage::Handle> aux_h = this->aux_handles;
   870         if (skip_free == 
false) {
   872           for (
size_t i = 0; i < aux_h.size(); i++) {
   876       }, shandle.
ctx, var);
   880   void SetTBlob()
 const {
   881     CHECK(ptr_ != 
nullptr);
   883     char *dptr = 
static_cast<char*
>(ptr_->shandle.dptr);
   884     auto stype = storage_type();
   886       dptr += byte_offset_;
   888       shape = storage_shape();
   890       LOG(FATAL) << 
"unknown storage type " << stype;
   893     tblob_.shape_ = shape;
   894     tblob_.type_flag_ = dtype_;
   895     tblob_.SetDLTensor(ptr_->shandle.ctx.dev_mask(), ptr_->shandle.ctx.dev_id);
   896 #if MKL_EXPERIMENTAL == 1   897     tblob_.Mkl_mem_ = Mkl_mem_;
   901 #if MKL_EXPERIMENTAL == 1   902   std::shared_ptr<MKLMemHolder> Mkl_mem_;
   905   std::shared_ptr<Chunk> ptr_{
nullptr};
   909   size_t byte_offset_ = 0;
   915   nnvm::NodeEntry entry_;
   923   mutable TBlob tblob_;
  1081 typedef std::function<void (
NDArray **used_vars,
  1105     : 
public dmlc::FunctionRegEntryBase<NDArrayFunctionReg,
  1106                                         NDArrayAPIFunction> {
  1132                         int num_params, 
char **param_keys, 
char **param_vals) {
  1133       (*fsetvalue)(s[0], mutate_vars[0]);
  1135     num_mutate_vars = 1; num_scalars = 1;
  1136     this->add_argument(
"src", 
"real_t", 
"Source input to the function.");
  1149     body = [fternary](
NDArray **used_vars,
  1151       int num_params, 
char **param_keys, 
char **param_vals) {
  1152       (*fternary)(*used_vars[0], *used_vars[1], *used_vars[2], mutate_vars[0]);
  1154     num_use_vars = 3; num_mutate_vars = 1;
  1156     this->add_argument(
"lhs", 
"NDArray", 
"Left operand to the function.");
  1157     this->add_argument(
"mhs", 
"NDArray", 
"Middle operand to the function.");
  1158     this->add_argument(
"rhs", 
"NDArray", 
"Right operand to the function.");
  1171                       int num_params, 
char **param_keys, 
char **param_vals) {
  1172       (*fbinary)(*used_vars[0], *used_vars[1], mutate_vars[0]);
  1174     num_use_vars = 2; num_mutate_vars = 1;
  1176     this->add_argument(
"lhs", 
"NDArray", 
"Left operand to the function.");
  1177     this->add_argument(
"rhs", 
"NDArray", 
"Right operand to the function.");
  1190                       int num_params, 
char **param_keys, 
char **param_vals) {
  1191       (*fscalar)(*used_vars[0], s[0], mutate_vars[0]);
  1193     num_use_vars = 1; num_mutate_vars = 1; num_scalars = 1;
  1195     this->add_argument(
"lhs", 
"NDArray", 
"Left operand to the function.");
  1196     this->add_argument(
"rhs", 
"real_t", 
"Right operand to the function.");
  1208                      int num_params, 
char **param_keys, 
char **param_vals) {
  1209       (*funary)(*used_vars[0], mutate_vars[0]);
  1211     num_use_vars = 1; num_mutate_vars = 1;
  1213     this->add_argument(
"src", 
"NDArray", 
"Source input to the function.");
  1223     void (*fgeneric)(
NDArray **used_vars,
  1226                      const std::map<std::string, std::string>& param)) {
  1228                        int num_params, 
char **param_keys, 
char **param_vals) {
  1229       std::map<std::string, std::string> param;
  1230       for (
int i = 0; i < num_params; ++i) {
  1231         param[param_keys[i]] = param_vals[i];
  1233       fgeneric(used_vars, s, mutate_vars, param);
  1243     num_use_vars = n; 
return *
this;
  1251     num_mutate_vars = n; 
return *
this;
  1259     num_scalars = n; 
return *
this;
  1267     type_mask = tmask; 
return *
this;
  1282 #define MXNET_REGISTER_NDARRAY_FUN(name)                                 \  1283   DMLC_REGISTRY_REGISTER(::mxnet::NDArrayFunctionReg, NDArrayFunctionReg, name)  1291 #endif  // MXNET_NDARRAY_H_ 
NDArrayStorageType
Definition: ndarray.h:59
 
NDArrayFunctionReg & set_num_mutate_vars(unsigned n)
set the number of mutate variables 
Definition: ndarray.h:1250
 
NDArrayFormatErr
Definition: ndarray.h:66
 
Engine::VarHandle var() const 
Definition: ndarray.h:364
 
void RandomSeed(uint32_t seed)
Seed the random number generator. 
 
NDArrayStorageType storage_type() const 
Definition: ndarray.h:297
 
Engine that schedules all the operations according to dependency. 
 
TShape shape_
shape of the tensor 
Definition: tensor_blob.h:65
 
const TShape & storage_shape() const 
Definition: ndarray.h:204
 
NDArrayFunctionReg()
constructor 
Definition: ndarray.h:1118
 
namespace of mxnet 
Definition: base.h:127
 
void ReshapeAndAlloc(const TShape &shape)
Allocate the space if the allocation has been delayed or the requested size is bigger than the availa...
Definition: ndarray.h:588
 
NDArray operator*(const NDArray &lhs, const NDArray &rhs)
elementwise multiplication 
 
virtual void Free(Handle handle)=0
Free storage. 
 
NDArrayFunctionReg & set_num_use_vars(unsigned n)
set the number of mutate variables 
Definition: ndarray.h:1242
 
DMLC_DECLARE_TRAITS(has_saveload, mxnet::NDArray, true)
traits 
 
mshadow::default_real_t real_t
data type that will be used to store ndarray 
Definition: base.h:135
 
static Context GPU(int32_t dev_id=-1)
 
int type_mask
information on how function should be called from API 
Definition: ndarray.h:1114
 
NDArrayFunctionReg & set_function(void(*funary)(const NDArray &src, NDArray *out))
set the function body to a unary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1205
 
NDArray Detach() const 
Return a copy of this NDArray without autograd history. 
Definition: ndarray.h:563
 
int type_flag_
type flag of the tensor blob 
Definition: tensor_blob.h:67
 
NDArrayFunctionReg & set_num_scalars(unsigned n)
set the number of scalar arguments 
Definition: ndarray.h:1258
 
nnvm::TShape TShape
Shape data structure used to record shape information. 
Definition: base.h:137
 
unsigned num_mutate_vars
number of variable mutated by this function 
Definition: ndarray.h:1110
 
execution time context. The information needed in runtime for actual execution. 
Definition: base.h:253
 
void * dptr
Pointer to the data. 
Definition: storage.h:45
 
NDArrayFunctionReg & set_function(void(*fscalar)(const NDArray &lhs, const real_t &rhs, NDArray *out))
set the function body to a binary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1186
 
Context ctx
Context information about device and ID. 
Definition: storage.h:53
 
Storage::Handle storage_handle() const 
get storage handle 
Definition: ndarray.h:333
 
NDArray()
default constructor 
Definition: ndarray.h:82
 
unsigned num_use_vars
number of variable used by this function 
Definition: ndarray.h:1108
 
int shared_id
Definition: storage.h:58
 
NDArrayFunctionReg & set_function(void(*fternary)(const NDArray &lhs, const NDArray &mhs, const NDArray &rhs, NDArray *out))
set the function body to a ternary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1145
 
RowSparseAuxType
Definition: ndarray.h:56
 
bool is_none() const 
Definition: ndarray.h:301
 
all the scalar should go before use_vars 
Definition: ndarray.h:1092
 
void SampleExponential(real_t lambda, NDArray *out)
Sample exponential distribution for each elements of out. 
 
void * dptr_
pointer to the data 
Definition: tensor_blob.h:63
 
virtual VarHandle NewVariable()=0
Allocate a new variable, the variable can then be used to schedule the operation concurrently via dep...
 
whether this function allows the handles in the target to be empty NDArray that are not yet initializ...
Definition: ndarray.h:1101
 
const TShape & shape() const 
Definition: ndarray.h:196
 
Definition: ndarray.h:1287
 
virtual void WaitForVar(VarHandle var)=0
Wait for a variable. 
 
const std::vector< TShape > & aux_shapes() const 
Definition: ndarray.h:223
 
Context ctx() const 
Definition: ndarray.h:282
 
void CopyFromTo(const NDArray &from, const NDArray *to, int priority=0)
issue an copy operation from one NDArray to another the two ndarray can sit on different devices this...
 
CSRAuxType
Definition: ndarray.h:52
 
void SampleGaussian(real_t mu, real_t sigma, NDArray *out)
Sample gaussian distribution for each elements of out. 
 
Storage manager across multiple devices. 
 
void WaitToRead() const 
Block until all the pending write operations with respect to current NDArray are finished, and read can be performed. 
Definition: ndarray.h:343
 
int dtype() const 
Definition: ndarray.h:289
 
bool storage_initialized() const 
Returns true if a sparse ndarray's aux_data and storage are initialized Throws an exception if the in...
Definition: ndarray.h:312
 
Storage handle. 
Definition: storage.h:41
 
static Context CPUShared(int32_t dev_id=0)
 
void set_aux_shape(size_t index, const TShape &shape) const 
For a sparse operation on a csr matrix for example, the size of the column index array is an estimate...
Definition: ndarray.h:243
 
virtual void DeleteVariable(SyncFn delete_fn, Context exec_ctx, VarHandle var)=0
Schedule the deletion of a variable. 
 
void CheckAndAllocData(const TShape &storage_shape) const 
Definition: ndarray.h:604
 
size_t num_aux_data(NDArrayStorageType stype)
 
NDArrayFunctionReg & set_type_mask(int tmask)
set type mask 
Definition: ndarray.h:1266
 
engine::VarHandle VarHandle
Variable pointer. 
Definition: engine.h:105
 
virtual void PushAsync(AsyncFn exec_fun, Context exec_ctx, std::vector< VarHandle > const &const_vars, std::vector< VarHandle > const &mutable_vars, FnProperty prop=FnProperty::kNormal, int priority=0, const char *opr_name=nullptr)=0
Push an asynchronous operation to the engine. 
 
void WaitToWrite() const 
Block until all the pending read/write operations with respect to current NDArray are finished...
Definition: ndarray.h:351
 
Handle Alloc(size_t size, Context ctx)
Allocate a new contiguous memory for a given size. 
Definition: storage.h:66
 
NDArray operator-(const NDArray &lhs, const NDArray &rhs)
elementwise subtraction 
 
NDArray(const NDArrayStorageType stype, const TShape &shape, Context ctx, bool delay_alloc=true, int dtype=mshadow::default_type_flag, std::vector< int > aux_types={}, std::vector< TShape > aux_shapes={}, TShape storage_shape=TShape(mshadow::Shape1(0)))
constructor for NDArray with storage type 
Definition: ndarray.h:105
 
NDArrayFunctionReg & set_function(void(*fsetvalue)(const real_t &rhs, NDArray *out))
set the function body to a NDArray setvalue function this will also auto set the parameters correctly...
Definition: ndarray.h:1129
 
NDArray(int shared_pid, int shared_id, const TShape &shape, int dtype)
create ndarray from shared memory 
Definition: ndarray.h:165
 
NDArray operator+(const NDArray &lhs, const NDArray &rhs)
elementwise add 
 
void SampleUniform(real_t begin, real_t end, NDArray *out)
Sample uniform distribution for each elements of out. 
 
Registry entry for NDArrayFunction. 
Definition: ndarray.h:1104
 
NDArrayFunctionReg & set_function(void(*fbinary)(const NDArray &lhs, const NDArray &rhs, NDArray *out))
set the function body to a binary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1167
 
static Context CPU(int32_t dev_id=0)
 
runtime functions for NDArray 
Definition: imperative.h:56
 
int aux_type(size_t i) const 
Definition: ndarray.h:292
 
OnComplete Callback to the engine, called by AsyncFn when action completes. 
Definition: engine.h:56
 
all the use_vars should go before scalar 
Definition: ndarray.h:1090
 
NDArray AsArray(const TShape &shape, int dtype) const 
Create a NDArray that shares memory with current one The new array must have smaller memory size than...
Definition: ndarray.h:531
 
void CheckAndAlloc(const std::vector< TShape > &aux_shapes) const 
Definition: ndarray.h:599
 
unsigned num_scalars
number of scalars used by this function 
Definition: ndarray.h:1112
 
const TBlob & data() const 
Definition: ndarray.h:250
 
void CheckAndAllocAuxData(size_t i, const TShape &aux_shape) const 
Definition: ndarray.h:609
 
NDArray(const NDArrayStorageType stype, const TShape &shape, const TBlob &data, const std::vector< TBlob > &aux_data, int dev_id)
constructing a static NDArray of non-default storage that shares data with TBlob Use with caution: al...
Definition: ndarray.h:183
 
void CheckAndAlloc() const 
Allocate the space if it is delayed allocated. This is an internal function used by system that norma...
Definition: ndarray.h:574
 
mshadow::index_t index_t
index type usually use unsigned 
Definition: base.h:133
 
size_t size
Size of the storage. 
Definition: storage.h:49
 
TBlob aux_data(size_t i) const 
Definition: ndarray.h:263
 
void SampleGenNegBinomial(real_t mu, real_t alpha, NDArray *out)
Sample generalized negative binomial distribution for each elements of out. 
 
Context information about the execution environment. 
Definition: base.h:142
 
void SamplePoisson(real_t lambda, NDArray *out)
Sample Poisson distribution for each elements of out. 
 
const TShape & aux_shape(size_t index) const 
get the shape of aux_data(index) 
Definition: ndarray.h:216
 
ndarray interface 
Definition: ndarray.h:79
 
NDArray(const TBlob &data, int dev_id)
constructing a static NDArray that shares data with TBlob Use with caution: allocate ONLY ONE NDArray...
Definition: ndarray.h:156
 
int dev_mask() const 
device mask of the corresponding device 
Definition: tensor_blob.h:228
 
Symbol Reshape(const std::string &symbol_name, Symbol data, Shape shape=Shape(), bool reverse=false, Shape target_shape=Shape(), bool keep_highest=false)
Definition: op.h:302
 
void ElementwiseSum(const std::vector< NDArray > &source, NDArray *out, int priority=0)
Perform elementwise sum over each data from source, store result into out. 
 
std::function< void(NDArray **used_vars, real_t *scalars, NDArray **mutate_vars, int num_params, char **param_keys, char **param_vals)> NDArrayAPIFunction
definition of NDArray function 
Definition: ndarray.h:1086
 
void SampleNegBinomial(int32_t k, real_t p, NDArray *out)
Sample negative binomial distribution for each elements of out. 
 
NDArrayFunctionReg & set_function(void(*fgeneric)(NDArray **used_vars, real_t *s, NDArray **mutate_vars, const std::map< std::string, std::string > ¶m))
set the function body to a unary NDArray function this will also auto set the parameters correctly ...
Definition: ndarray.h:1222
 
int shared_pid
Id for IPC shared memory. 
Definition: storage.h:57
 
tensor blob class that can be used to hold tensor of any dimension, any device and any data type...
Definition: tensor_blob.h:59
 
const std::vector< int > & aux_types() const 
Definition: ndarray.h:230
 
void SampleGamma(real_t alpha, real_t beta, NDArray *out)
Sample gamma distribution for each elements of out. 
 
NDArray(const TShape &shape, Context ctx, bool delay_alloc=false, int dtype=mshadow::default_type_flag)
constructs a new dynamic NDArray 
Definition: ndarray.h:94
 
NDArray operator/(const NDArray &lhs, const NDArray &rhs)
elementwise division 
 
NDArrayFunctionTypeMask
mask information on how functions can be exposed 
Definition: ndarray.h:1088