mxnet
tensor_inspector.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
26 #ifndef MXNET_COMMON_TENSOR_INSPECTOR_H_
27 #define MXNET_COMMON_TENSOR_INSPECTOR_H_
28 
29 #include <algorithm>
30 #include <cmath>
31 #include <string>
32 #include <vector>
33 #include <fstream>
34 #include "../../3rdparty/mshadow/mshadow/base.h"
35 
36 namespace mxnet {
37 
43  static InspectorManager* get() {
44  static std::mutex mtx;
45  static std::unique_ptr<InspectorManager> im = nullptr;
46  if (!im) {
47  std::unique_lock<std::mutex> lk(mtx);
48  if (!im)
49  im = std::make_unique<InspectorManager>();
50  }
51  return im.get();
52  }
53  /* !\brief mutex used to lock interactive_print() and check_value() */
54  std::mutex mutex_;
55  /* !\brief skip all interactive prints */
57  /* !\brief skip all value checks */
58  bool check_value_skip_all_ = false;
59  /* !\brief visit count for interactive print tags */
60  std::unordered_map<std::string, int> interactive_print_tag_counter_;
61  /* !\brief visit count for check value tags */
62  std::unordered_map<std::string, int> check_value_tag_counter_;
63  /* !\brief visit count for dump value tags */
64  std::unordered_map<std::string, int> dump_to_file_tag_counter_;
65 };
66 
71  NegativeChecker, // check if is negative
72  PositiveChecker, // check if is positive
73  ZeroChecker, // check if is zero
74  NaNChecker, // check if is NaN, will always return false if DType is not a float type
75  InfChecker, // check if is infinity, will always return false if DType is not a float type
76  PositiveInfChecker, // check if is positive infinity,
77  // will always return false if DType is not a float type
78  NegativeInfChecker, // check if is nagative infinity,
79  // will always return false if DType is not a float type
80  FiniteChecker, // check if is finite, will always return false if DType is not a float type
81  NormalChecker, // check if is neither infinity nor NaN
82  AbnormalChecker, // chekck if is infinity or nan
83 };
84 
103  private:
110  template<typename DType, typename StreamType>
111  void tensor_info_to_string(StreamType* os) {
112  const int dimension = tb_.ndim();
113  *os << "<" << infer_type_string(typeid(DType)) << " Tensor ";
114  *os << tb_.shape_[0];
115  for (int i = 1; i < dimension; ++i) {
116  *os << 'x' << tb_.shape_[i];
117  }
118  *os << ">" << std::endl;
119  }
120 
128  template<typename DType, typename StreamType>
129  void tensor_info_to_string(StreamType* os, const std::vector<index_t>& shape) {
130  const int dimension = shape.size();
131  *os << "<" << infer_type_string(typeid(DType)) << " Tensor ";
132  *os << shape[0];
133  for (int i = 1; i < dimension; ++i) {
134  *os << 'x' << shape[i];
135  }
136  *os << ">" << std::endl;
137  }
138 
145  template<typename DType, typename StreamType>
146  void to_string_helper(StreamType* os) {
147 #if MXNET_USE_CUDA
148  if (tb_.dev_mask() == gpu::kDevMask) {
149  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
150  .to_string_helper<DType>(os);
151  return;
152  }
153 #endif // MXNET_USE_CUDA
154  const int dimension = tb_.ndim();
155  std::vector<index_t> offsets;
156  index_t multiple = 1;
157  for (int i = dimension - 1; i >= 0; --i) {
158  multiple *= tb_.shape_[i];
159  offsets.push_back(multiple);
160  }
161  *os << std::string(dimension, '[');
162  *os << tb_.dptr<DType>()[0];
163  for (index_t i = 1; i < static_cast<index_t>(tb_.shape_.Size()); ++i) {
164  int n = 0;
165  for (auto off : offsets) {
166  n += (i % off == 0);
167  }
168  if (n) {
169  *os << std::string(n, ']') << ", " << std::string(n, '[');
170  } else {
171  *os << ", ";
172  }
173  *os << tb_.dptr<DType>()[i];
174  }
175  *os << std::string(dimension, ']') << std::endl;
176  tensor_info_to_string<DType>(os);
177  }
178 
186  template<typename DType, typename StreamType>
187  void to_string_helper(StreamType* os, const DType* dptr) {
188 #if MXNET_USE_CUDA
189  if (tb_.dev_mask() == gpu::kDevMask) {
190  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
191  .to_string_helper<DType>(os, dptr);
192  return;
193  }
194 #endif // MXNET_USE_CUDA
195  *os << *dptr << std::endl;
196  *os << "<" << typeid(*dptr).name() << ">" << std::endl;
197  }
198 
207  template<typename DType, typename StreamType>
208  void to_string_helper(StreamType* os, const std::vector<index_t>& sub_shape, index_t offset) {
209 #if MXNET_USE_CUDA
210  if (tb_.dev_mask() == gpu::kDevMask) {
211  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
212  .to_string_helper<DType>(os, sub_shape, offset);
213  return;
214  }
215 #endif // MXNET_USE_CUDA
216  DType* dptr = tb_.dptr<DType>() + offset;
217  if (sub_shape.size() == 0) {
218  to_string_helper<DType>(os, dptr);
219  return;
220  }
221  const int dimension = sub_shape.size();
222  std::vector<index_t> offsets;
223  index_t multiple = 1;
224  for (int i = dimension - 1; i >= 0; --i) {
225  multiple *= sub_shape[i];
226  offsets.push_back(multiple);
227  }
228  std::stringstream ss;
229  *os << std::string(dimension, '[');
230  *os << dptr[0];
231  for (index_t i = 1; i < multiple; ++i) {
232  int n = 0;
233  for (auto off : offsets) {
234  n += (i % off == 0);
235  }
236  if (n) {
237  *os << std::string(n, ']') << ", " << std::string(n, '[');
238  } else {
239  *os << ", ";
240  }
241  *os << dptr[i];
242  }
243  *os << std::string(dimension, ']') << std::endl;
244  tensor_info_to_string<DType>(os, sub_shape);
245  }
246 
254  void print_locator(const std::vector<index_t>& pos, std::vector<index_t>* sub_shape,
255  index_t* offset) {
256  const int dimension = tb_.ndim();
257  const int sub_dim = dimension - pos.size();
258  sub_shape->resize(sub_dim);
259  index_t multiple = 1;
260  for (size_t i = pos.size(), j = 0; i < static_cast<size_t>(dimension); ++i, ++j) {
261  (*sub_shape)[j] = tb_.shape_[i];
262  multiple *= tb_.shape_[i];
263  }
264  index_t sum = 0;
265  index_t m = 1;
266  for (index_t i = pos.size() - 1; i >= 0; --i) {
267  sum += pos[i] * m;
268  m *= tb_.shape_[i];
269  }
270  *offset = sum * multiple;
271  }
272 
279  bool parse_position(std::vector<index_t>* pos, const std::string& str) {
280  const int dimension = tb_.ndim();
281  std::istringstream ss(str);
282  index_t n;
283  while (ss >> n) {
284  pos->push_back(n);
285  if (ss.peek() == ',') {
286  ss.ignore();
287  }
288  }
289  if (pos->size() > static_cast<size_t>(dimension)) {
290  return false;
291  }
292  for (size_t i = 0; i < pos->size(); ++i) {
293  if ((*pos)[i] > (tb_.shape_[i] - 1) || (*pos)[i] < 0) {
294  return false;
295  }
296  }
297  return !pos->empty();
298  }
299 
305  template<typename DType>
306  void interactive_print_helper(std::string tag) {
307 #if MXNET_USE_CUDA
308  if (tb_.dev_mask() == gpu::kDevMask) {
309  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
310  .interactive_print_helper<DType>(tag);
311  return;
312  }
313 #endif // MXNET_USE_CUDA
314  std::lock_guard<std::mutex> lock(InspectorManager::get()->mutex_);
317  std::cout << "----------Interactive Print----------" << std::endl;
318  if (tag != "") {
319  std::cout << "Tag: " << tag << " Visit: " <<
321  }
322  tensor_info_to_string<DType>(&std::cout);
323  std::cout << "To print a part of the tensor, " <<
324  "please specify a position, seperated by \",\"" << std::endl;
325  std::cout << "\"e\" for the entire tensor, " <<
326  "\"d\" to dump value to file, " <<
327  "\"b\" to break, " <<
328  "\"s\" to skip all: ";
329  std::string str;
330  std::cin >> str;
331  if (str == "b") {
332  break;
333  } else if (str == "e") {
334  to_string_helper<DType>(&std::cout);
335  continue;
336  } else if (str == "s") {
338  break;
339  } else if (str == "d") {
340  while (true) {
341  std::cout << "Please enter a tag: ";
342  std::cin >> str;
343  if (str.find(' ') != std::string::npos) {
344  std::cout << "Invalid tag name. No space allowed.";
345  continue;
346  }
347  dump_to_file_helper<DType>(str);
348  break;
349  }
350  continue;
351  }
352  std::vector<index_t> pos;
353  if (parse_position(&pos, str)) {
354  std::vector<index_t> sub_shape;
355  index_t offset;
356  print_locator(pos, &sub_shape, &offset);
357  to_string_helper<DType>(&std::cout, sub_shape, offset);
358  } else {
359  std::cout << "invalid command/indices" << std::endl;
360  }
361  }
362  }
363 
369  template<typename DType>
370  std::function<bool(DType)> get_checker(CheckerType ct) {
371  switch (ct) {
372  case NegativeChecker:
373  return [] (DType x) {
374  return x < 0;
375  };
376  case PositiveChecker:
377  return [] (DType x) {
378  return x > 0;
379  };
380  case ZeroChecker:
381  return [] (DType x) {
382  return x == 0;
383  };
384  case NaNChecker:
385  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
386  std::is_same<DType, mshadow::half::half_t>::value) {
387  return [] (DType x) {
388  return x != x;
389  };
390  } else {
391  LOG(WARNING) << "NaNChecker only applies to float types. " <<
392  "Lambda will always return false.";
393  }
394  break;
395  case InfChecker:
396  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
397  std::is_same<DType, mshadow::half::half_t>::value) {
398  return [] (DType x) {
399  return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f;
400  };
401  } else {
402  LOG(WARNING) << "InfChecker only applies to float types. " <<
403  "Lambda will always return false.";
404  }
405  break;
406  case PositiveInfChecker:
407  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
408  std::is_same<DType, mshadow::half::half_t>::value) {
409  return [] (DType x) {
410  return x == (DType)1.0 / 0.0f;
411  };
412  } else {
413  LOG(WARNING) << "PositiveInfChecker only applies to float types. " <<
414  "Lambda will always return false.";
415  }
416  break;
417  case NegativeInfChecker:
418  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
419  std::is_same<DType, mshadow::half::half_t>::value) {
420  return [] (DType x) {
421  return x == -(DType)1.0 / 0.0f;
422  };
423  } else {
424  LOG(WARNING) << "NegativeInfChecker only applies to float types. " <<
425  "Lambda will always return false.";
426  }
427  break;
428  case FiniteChecker:
429  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
430  std::is_same<DType, mshadow::half::half_t>::value) {
431  return [] (DType x) {
432  return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f;
433  };
434  } else {
435  LOG(WARNING) << "FiniteChecker only applies to float types. " <<
436  "Lambda will always return false.";
437  }
438  break;
439  case NormalChecker:
440  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
441  std::is_same<DType, mshadow::half::half_t>::value) {
442  return [] (DType x) {
443  return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f &&
444  x == x;
445  };
446  } else {
447  LOG(WARNING) << "NormalChecker only applies to float types. " <<
448  "Lambda will always return false.";
449  }
450  break;
451  case AbnormalChecker:
452  if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
453  std::is_same<DType, mshadow::half::half_t>::value) {
454  return [] (DType x) {
455  return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f ||
456  x != x;
457  };
458  } else {
459  LOG(WARNING) << "AbnormalChecker only applies to float types. " <<
460  "Lambda will always return false.";
461  }
462  break;
463  default:
464  return [] (DType x) {
465  return false;
466  };
467  }
468  return [] (DType x) {return false;};
469  }
470 
475  std::vector<index_t> index_to_coordinates(index_t idx) {
476  const int dimension = tb_.ndim();
477  std::vector<index_t> ret;
478  for (int i = dimension - 1; i >= 0; --i) {
479  ret.push_back(idx % tb_.shape_[i]);
480  idx /= tb_.shape_[i];
481  }
482  std::reverse(ret.begin(), ret.end());
483  return ret;
484  }
485 
495  template<typename DType>
496  void check_value_helper(std::vector<std::vector<index_t>>* ret,
497  const std::function<bool(DType)>& checker, bool interactive, std::string tag) {
498 #if MXNET_USE_CUDA
499  if (tb_.dev_mask() == gpu::kDevMask) {
500  return TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
501  .check_value_helper<DType>(ret, checker, interactive, tag);
502  }
503 #endif // MXNET_USE_CUDA
504  index_t count = 0;
505  std::stringstream ss;
506  ss << "[";
507  bool first_pass = true;
508  for (index_t i = 0; i <static_cast<index_t>(tb_.shape_.Size()); ++i) {
509  if (checker(tb_.dptr<DType>()[i])) {
510  ++count;
511  if (!first_pass) {
512  ss << ", ";
513  }
514  first_pass = false;
515  std::vector<index_t> coords = index_to_coordinates(i);
516  ss << "(" << coords[0];
517  for (size_t i = 1; i < coords.size(); ++i) {
518  ss << ", " << coords[i];
519  }
520  ss << ")";
521  ret->push_back(coords);
522  }
523  }
524  ss << "]" << std::endl;
525  if (interactive) {
526  std::lock_guard<std::mutex> lock(InspectorManager::get()->mutex_);
529  std::cout << "----------Value Check----------" << std::endl;
530  tensor_info_to_string<DType>(&std::cout);
531  if (tag != "") {
532  std::cout << "Tag: " << tag << " Visit: " <<
533  InspectorManager::get()->check_value_tag_counter_[tag] << std::endl;
534  }
535  std::cout << count << " value(s) found." << std::endl;
536  std::cout << "To print a part of the tensor," <<
537  " please specify a position, seperated by \",\"" << std::endl;
538  std::cout << "\"e\" for the entire tensor, " <<
539  "\"p\" to print the coordinates of the values found, " <<
540  "\"b\" to break, " <<
541  "\"s\" to skip all: ";
542  std::string str;
543  std::cin >> str;
544  if (str == "b") {
545  break;
546  } else if (str == "e") {
547  to_string_helper<DType>(&std::cout);
548  continue;
549  } else if (str == "p") {
550  std::cout << ss.str() << std::endl;
551  continue;
552  } else if (str == "s") {
554  break;
555  }
556  std::vector<index_t> pos;
557  if (parse_position(&pos, str)) {
558  std::vector<index_t> sub_shape;
559  index_t offset;
560  print_locator(pos, &sub_shape, &offset);
561  to_string_helper<DType>(&std::cout, sub_shape, offset);
562  } else {
563  std::cout << "invalid command/indices" << std::endl;
564  }
565  }
566  }
567  }
568 
573  inline char infer_type(const std::type_info& ti) {
574  if (ti == typeid(float)) return 'f';
575  else if (ti == typeid(double)) return 'f';
576  else if (ti == typeid(mshadow::half::half_t) ) return 'f';
577  else if (ti == typeid(uint8_t)) return 'u';
578  else if (ti == typeid(int32_t)) return 'i';
579  else if (ti == typeid(int64_t)) return 'i';
580  else
581  return '?';
582  }
583 
588  inline std::string infer_type_string(const std::type_info& ti) {
589  if (ti == typeid(float)) return "float";
590  else if (ti == typeid(double)) return "double";
591  else if (ti == typeid(mshadow::half::half_t) ) return "mshasow::half::half_t";
592  else if (ti == typeid(uint8_t)) return "uint8_t";
593  else if (ti == typeid(int32_t)) return "int32_t";
594  else if (ti == typeid(int64_t)) return "int64_t";
595  else
596  return "unknown tyoe";
597  }
598 
602  inline char endian_test() {
603  int x = 1;
604  return (reinterpret_cast<char*>(&x)[0]) ? '<' : '>';
605  }
606 
611  template<typename DType>
612  std::string get_header() {
613  const int dimension = tb_.ndim();
614  std::string dict;
615  dict += "{'descr':'";
616  dict += endian_test();
617  dict += infer_type(typeid(DType));
618  dict += std::to_string(sizeof(DType));
619  dict += "','fortran_order':False,'shape':(";
620  dict += std::to_string(tb_.shape_[0]);
621  for (int i = 1; i < dimension; ++i) {
622  dict += ',';
623  dict += std::to_string(tb_.shape_[i]);
624  }
625  if (dimension == 1) {
626  dict += ",";
627  }
628  dict += ")} ";
629  int padding_size = 64 - ((10 + dict.size()) % 64);
630  dict += std::string(padding_size, ' ');
631  dict.back() = '\n';
632  std::string header;
633  header += static_cast<char>(0x93);
634  header += "NUMPY";
635  header += static_cast<char>(0x01);
636  header += static_cast<char>(0x00);
637  header += static_cast<char>((uint16_t)dict.size() & 0x00ff);
638  header += static_cast<char>(((uint16_t)dict.size() >> 8) & 0x00ff);
639  header += dict;
640  return header;
641  }
642 
649  template<typename DType>
650  void write_npy(const std::string& header, const std::string& filename) {
651  std::ofstream file;
652  file.exceptions(std::ofstream::failbit | std::ofstream::badbit);
653  try {
654  file.open(filename, std::ios::out | std::ios::binary);
655  file.write(header.c_str(), header.size());
656  file.write(reinterpret_cast<char*>(tb_.dptr<DType>()), sizeof(DType) * tb_.shape_.Size());
657  file.close();
658  std::cout << "Tensor dumped to file: " << filename << std::endl;
659  } catch (std::ofstream::failure e) {
660  std::cerr << "Exception opening/writing/closing file " << filename << std::endl;
661  }
662  }
663 
670  template<typename DType>
671  void dump_to_file_helper(const std::string& tag) {
672 #if MXNET_USE_CUDA
673  if (tb_.dev_mask() == gpu::kDevMask) {
674  TensorInspector(test::CAccessAsCPU(ctx_, tb_, false)(), ctx_)
675  .dump_to_file_helper<DType>(tag);
676  return;
677  }
678 #endif // MXNET_USE_CUDA
679  std::string header = get_header<DType>();
681  const int visit = InspectorManager::get()->dump_to_file_tag_counter_[tag];
682  std::string filename = tag + "_" + std::to_string(visit) + ".npy";
683  write_npy<DType>(header, filename);
684  }
685 
689  inline void validate_shape() {
690  const int dimension = tb_.ndim();
691  CHECK(dimension > 0) << "Tensor Inspector does not support empty tensors " <<
692  "or tensors of unknow shape.";
693  for (int i = 0; i < dimension; ++i) {
694  CHECK(tb_.shape_[i] != 0) << "Invalid tensor shape: shape_[" << i << "] is 0";
695  }
696  }
697 
698  /* !\brief the tensor blob */
699  const TBlob tb_;
700  /* !\brief the run context of the tensor */
701  const RunContext& ctx_;
702 
703  public:
712  template<typename Device, int dimension, typename DType>
714  tb_(ts), ctx_(ctx) {
715  validate_shape();
716  }
717 
723  TensorInspector(const TBlob& tb, const RunContext& ctx):
724  tb_(tb), ctx_(ctx) {
725  validate_shape();
726  }
727 
733  TensorInspector(const NDArray& arr, const RunContext& ctx):
734  tb_(arr.data()), ctx_(ctx) {
735  validate_shape();
736  }
737 
741  void print_string() {
742  std::cout << to_string() << std::endl;
743  }
744 
748  std::string to_string() {
749  std::stringstream ss;
750  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
751  to_string_helper<DType>(&ss);
752  });
753  return ss.str();
754  }
755 
760  void interactive_print(std::string tag = "") {
761  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
762  interactive_print_helper<DType>(tag);
763  });
764  }
765 
774  template<typename ValueChecker>
775  std::vector<std::vector<index_t>> check_value(const ValueChecker& checker,
776  bool interactive = false, std::string tag = "") {
777  std::vector<std::vector<index_t>> ret;
778  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
779  check_value_helper<DType>(&ret, checker, ret, interactive, tag);
780  });
781  return ret;
782  }
783 
791  std::vector<std::vector<index_t>> check_value(CheckerType ct, bool interactive = false,
792  std::string tag = "") {
793  std::vector<std::vector<index_t>> ret;
794  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
795  check_value_helper<DType>(&ret, get_checker<DType>(ct), interactive, tag);
796  });
797  return ret;
798  }
799 
804  void dump_to_file(std::string tag) {
805  MSHADOW_TYPE_SWITCH(tb_.type_flag_, DType, {
806  dump_to_file_helper<DType>(tag);
807  });
808  }
809 };
810 
811 } // namespace mxnet
812 
813 #endif // MXNET_COMMON_TENSOR_INSPECTOR_H_
std::unordered_map< std::string, int > dump_to_file_tag_counter_
Definition: tensor_inspector.h:64
std::mutex mutex_
Definition: tensor_inspector.h:54
this singleton struct mediates individual TensorInspector objects so that we can control the global b...
Definition: tensor_inspector.h:42
Definition: tensor_inspector.h:78
Definition: tensor_inspector.h:81
namespace of mxnet
Definition: api_registry.h:33
TensorInspector(const mshadow::Tensor< Device, dimension, DType > &ts, const RunContext &ctx)
construct from Tensor object
Definition: tensor_inspector.h:713
std::unordered_map< std::string, int > check_value_tag_counter_
Definition: tensor_inspector.h:62
void print_string()
print the tensor to std::cout
Definition: tensor_inspector.h:741
Definition: tensor_inspector.h:75
std::string to_string()
return a string which contains the values and other info of the tensor
Definition: tensor_inspector.h:748
int type_flag_
type flag of the tensor blob
Definition: tensor_blob.h:73
std::vector< std::vector< index_t > > check_value(CheckerType ct, bool interactive=false, std::string tag="")
check/validate the values within the tensor, return the coordinates where the lambda evaluates to tru...
Definition: tensor_inspector.h:791
execution time context. The information needed in runtime for actual execution.
Definition: base.h:349
bool interactive_print_skip_all_
Definition: tensor_inspector.h:56
CheckerType
Enum for building value checkers for TensorInspector::check_value()
Definition: tensor_inspector.h:70
This class provides a unified interface to inspect the value of all data types including Tensor...
Definition: tensor_inspector.h:102
Definition: tensor_inspector.h:73
void dump_to_file(std::string tag)
dump the value of the tensor to a file with name "tag_[visit count].npy" in npy format ...
Definition: tensor_inspector.h:804
static const int kDevMask
device flag number, identifies this device
Definition: tensor.h:50
Definition: tensor_inspector.h:71
static InspectorManager * get()
Definition: tensor_inspector.h:43
TensorInspector(const NDArray &arr, const RunContext &ctx)
construct from NDArray object. Currently this only works with kDefaultStorage
Definition: tensor_inspector.h:733
bool check_value_skip_all_
Definition: tensor_inspector.h:58
TensorInspector(const TBlob &tb, const RunContext &ctx)
construct from TBlob object
Definition: tensor_inspector.h:723
Definition: tensor_inspector.h:82
Definition: tensor_inspector.h:76
#define MSHADOW_TYPE_SWITCH(type, DType,...)
Definition: base.h:1074
void interactive_print(std::string tag="")
interactively print the tensor value
Definition: tensor_inspector.h:760
mshadow::index_t index_t
index type usually use unsigned
Definition: base.h:94
ndarray interface
Definition: ndarray.h:81
std::unordered_map< std::string, int > interactive_print_tag_counter_
Definition: tensor_inspector.h:60
std::vector< std::vector< index_t > > check_value(const ValueChecker &checker, bool interactive=false, std::string tag="")
check/validate the values within the tensor, return the coordinates where the value checker evaluates...
Definition: tensor_inspector.h:775
Definition: tensor_inspector.h:80
tensor blob class that can be used to hold tensor of any dimension, any device and any data type...
Definition: tensor_blob.h:65
Definition: tensor_inspector.h:74
Definition: tensor_inspector.h:72