All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
kaldi::nnet1 Namespace Reference

Classes

class  AddShift
 Adds shift to all the lines of the matrix (can be used for global mean normalization) More...
 
class  AffineTransform
 
class  AveragePooling2DComponent
 AveragePoolingComponent : The input/output matrices are split to submatrices with width 'pool_stride_'. More...
 
class  AveragePoolingComponent
 AveragePoolingComponent : The input/output matrices are split to submatrices with width 'pool_stride_'. More...
 
class  BlockSoftmax
 
class  BlstmProjected
 
class  Component
 Abstract class, building block of the network. More...
 
class  Convolutional2DComponent
 Convolutional2DComponent implements convolution over 2-axis (frequency and temporal) (i.e. More...
 
class  ConvolutionalComponent
 ConvolutionalComponent implements convolution over single axis (i.e. More...
 
class  CopyComponent
 Rearrange the matrix columns according to the indices in copy_from_indices_. More...
 
class  Dropout
 
class  FramePoolingComponent
 FramePoolingComponent : The input/output matrices are split to frames of width 'feature_dim_'. More...
 
class  HiddenSoftmax
 
class  KlHmm
 
class  LengthNormComponent
 Rescale the matrix-rows to have unit length (L2-norm). More...
 
class  LinearTransform
 
class  LossItf
 
class  LstmProjected
 
class  MatrixBuffer
 A buffer for caching (utterance-key, feature-matrix) pairs. More...
 
struct  MatrixBufferOptions
 
class  MatrixRandomizer
 Shuffles rows of a matrix according to the indices in the mask,. More...
 
class  MaxPooling2DComponent
 MaxPoolingComponent : The input/output matrices are split to submatrices with width 'pool_stride_'. More...
 
class  MaxPoolingComponent
 MaxPoolingComponent : The input/output matrices are split to submatrices with width 'pool_stride_'. More...
 
class  Mse
 
class  MultiBasisComponent
 
class  MultistreamComponent
 Class MultistreamComponent is an extension of UpdatableComponent for recurrent networks, which are trained with parallel sequences. More...
 
class  MultiTaskLoss
 
class  Nnet
 
struct  NnetDataRandomizerOptions
 Configuration variables that affect how frame-level shuffling is done. More...
 
struct  NnetTrainOptions
 
class  ParallelComponent
 
class  ParametricRelu
 
class  PdfPrior
 
struct  PdfPriorOptions
 
class  RandomizerMask
 Generates randomly ordered vector of indices,. More...
 
class  Rbm
 
class  RbmBase
 
struct  RbmTrainOptions
 
class  RecurrentComponent
 Component with recurrent connections, 'tanh' non-linearity. More...
 
class  Rescale
 Rescale the data column-wise by a vector (can be used for global variance normalization) More...
 
class  SentenceAveragingComponent
 Deprecated!!!, keeping it as Katka Zmolikova used it in JSALT 2015. More...
 
class  Sigmoid
 
class  SimpleSentenceAveragingComponent
 SimpleSentenceAveragingComponent does not have nested network, it is intended to be used inside of a <ParallelComponent>. More...
 
class  Softmax
 
class  Splice
 Splices the time context of the input features in N, out k*N, FrameOffset o_1,o_2,...,o_k FrameOffset example 11frames: -5 -4 -3 -2 -1 0 1 2 3 4 5. More...
 
class  StdVectorRandomizer
 Randomizes elements of a vector according to a mask. More...
 
class  Tanh
 
class  UpdatableComponent
 Class UpdatableComponent is a Component which has trainable parameters, it contains SGD training hyper-parameters in NnetTrainOptions. More...
 
class  VectorRandomizer
 Randomizes elements of a vector according to a mask. More...
 
class  Xent
 

Typedefs

typedef StdVectorRandomizer
< int32 > 
Int32VectorRandomizer
 
typedef StdVectorRandomizer
< std::vector< std::pair
< int32, BaseFloat > > > 
PosteriorRandomizer
 

Functions

template<typename Real >
void ReadCuMatrixFromString (const std::string &s, CuMatrix< Real > *m)
 
ComponentReadComponentFromString (const std::string &s)
 
void UnitTestLengthNorm ()
 
void UnitTestSimpleSentenceAveragingComponent ()
 
void UnitTestConvolutionalComponentUnity ()
 
void UnitTestConvolutionalComponent3x3 ()
 
void UnitTestMaxPoolingComponent ()
 
void UnitTestMaxPooling2DComponent ()
 
void UnitTestAveragePooling2DComponent ()
 
void UnitTestConvolutional2DComponent ()
 
void UnitTestDropoutComponent ()
 
template<typename T >
void CountCorrectFramesWeighted (const CuArray< T > &hyp, const CuArray< T > &ref, const CuVectorBase< BaseFloat > &weights, Vector< double > *correct)
 Helper function of Xent::Eval, calculates number of matching elemente in 'hyp', 'ref' weighted by 'weights'. More...
 
template<typename T >
std::ostream & operator<< (std::ostream &os, const std::vector< T > &v)
 Define stream insertion opeartor for 'std::vector', useful for log-prints,. More...
 
template<typename T >
std::string ToString (const T &t)
 Convert basic type to a string (please don't overuse),. More...
 
template<typename Real >
std::string MomentStatistics (const VectorBase< Real > &vec)
 Get a string with statistics of the data in a vector, so we can print them easily. More...
 
template<typename Real >
std::string MomentStatistics (const MatrixBase< Real > &mat)
 Overload MomentStatistics to MatrixBase<Real> More...
 
template<typename Real >
std::string MomentStatistics (const CuVectorBase< Real > &vec)
 Overload MomentStatistics to CuVectorBase<Real> More...
 
template<typename Real >
std::string MomentStatistics (const CuMatrixBase< Real > &mat)
 Overload MomentStatistics to CuMatrix<Real> More...
 
template<typename Real >
void CheckNanInf (const CuMatrixBase< Real > &mat, const char *msg="")
 Check that matrix contains no nan or inf. More...
 
template<typename Real >
Real ComputeStdDev (const CuMatrixBase< Real > &mat)
 Get the standard deviation of values in the matrix. More...
 
template<typename Real >
void RandGauss (BaseFloat mu, BaseFloat sigma, CuMatrixBase< Real > *mat, struct RandomState *state=NULL)
 Fill CuMatrix with random numbers (Gaussian distribution): mu = the mean value, sigma = standard deviation,. More...
 
template<typename Real >
void RandUniform (BaseFloat mu, BaseFloat range, CuMatrixBase< Real > *mat, struct RandomState *state=NULL)
 Fill CuMatrix with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 . More...
 
template<typename Real >
void RandUniform (BaseFloat mu, BaseFloat range, CuVectorBase< Real > *vec, struct RandomState *state=NULL)
 Fill CuVector with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 . More...
 
void BuildIntegerVector (const std::vector< std::vector< int32 > > &in, std::vector< int32 > *out)
 Build 'integer vector' out of vector of 'matlab-like' representation: 'b, b:e, b:s:e'. More...
 
void BuildIntegerVector (const std::vector< std::vector< int32 > > &in, CuArray< int32 > *out)
 Wrapper with 'CuArray<int32>' output. More...
 
template<typename Real >
void PosteriorToMatrix (const Posterior &post, const int32 post_dim, CuMatrix< Real > *mat)
 Wrapper of PosteriorToMatrix with CuMatrix argument. More...
 
template<typename Real >
void PosteriorToPdfMatrix (const Posterior &post, const TransitionModel &model, CuMatrix< Real > *mat)
 Wrapper of PosteriorToMatrixMapped with CuMatrix argument. More...
 
void LatticeAcousticRescore (const Matrix< BaseFloat > &log_like, const TransitionModel &trans_model, const std::vector< int32 > &state_times, Lattice *lat)
 

Typedef Documentation

Definition at line 267 of file nnet-randomizer.h.

typedef StdVectorRandomizer<std::vector<std::pair<int32, BaseFloat> > > PosteriorRandomizer

Definition at line 268 of file nnet-randomizer.h.

Function Documentation

void kaldi::nnet1::BuildIntegerVector ( const std::vector< std::vector< int32 > > &  in,
std::vector< int32 > *  out 
)
inline

Build 'integer vector' out of vector of 'matlab-like' representation: 'b, b:e, b:s:e'.

b,e,s are integers, where: b = beginning e = end s = step

The sequence includes 'end', 1:3 => [ 1 2 3 ]. The 'step' has to be positive.

Definition at line 239 of file nnet-utils.h.

References rnnlm::i, rnnlm::j, KALDI_ASSERT, and KALDI_ERR.

Referenced by BuildIntegerVector(), Splice::InitData(), and CopyComponent::InitData().

240  {
241  // start with empty vector,
242  out->clear();
243  // loop over records,
244  for (int32 i = 0; i < in.size(); i++) {
245  // process i'th record,
246  int32 beg = 0, end = 0, step = 1;
247  switch (in[i].size()) {
248  case 1:
249  beg = in[i][0];
250  end = in[i][0];
251  step = 1;
252  break;
253  case 2:
254  beg = in[i][0];
255  end = in[i][1];
256  step = 1;
257  break;
258  case 3:
259  beg = in[i][0];
260  end = in[i][2];
261  step = in[i][1];
262  break;
263  default:
264  KALDI_ERR << "Something is wrong! (should be 1-3) : "
265  << in[i].size();
266  }
267  // check the inputs,
268  KALDI_ASSERT(beg <= end);
269  KALDI_ASSERT(step > 0); // positive,
270  // append values to vector,
271  for (int32 j = beg; j <= end; j += step) {
272  out->push_back(j);
273  }
274  }
275 }
#define KALDI_ERR
Definition: kaldi-error.h:127
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:169
void kaldi::nnet1::BuildIntegerVector ( const std::vector< std::vector< int32 > > &  in,
CuArray< int32 > *  out 
)
inline

Wrapper with 'CuArray<int32>' output.

Definition at line 280 of file nnet-utils.h.

References BuildIntegerVector().

281  {
282  std::vector<int32> v;
283  BuildIntegerVector(in, &v);
284  (*out) = v;
285 }
void BuildIntegerVector(const std::vector< std::vector< int32 > > &in, CuArray< int32 > *out)
Wrapper with 'CuArray' output.
Definition: nnet-utils.h:280
void kaldi::nnet1::CheckNanInf ( const CuMatrixBase< Real > &  mat,
const char *  msg = "" 
)

Check that matrix contains no nan or inf.

Definition at line 132 of file nnet-utils.h.

References KALDI_ERR, KALDI_ISINF, KALDI_ISNAN, and CuMatrixBase< Real >::Sum().

Referenced by Rbm::RbmUpdate().

132  {
133  Real sum = mat.Sum();
134  if (KALDI_ISINF(sum)) { KALDI_ERR << "'inf' in " << msg; }
135  if (KALDI_ISNAN(sum)) { KALDI_ERR << "'nan' in " << msg; }
136 }
#define KALDI_ISINF
Definition: kaldi-math.h:73
#define KALDI_ERR
Definition: kaldi-error.h:127
#define KALDI_ISNAN
Definition: kaldi-math.h:72
Real kaldi::nnet1::ComputeStdDev ( const CuMatrixBase< Real > &  mat)

Get the standard deviation of values in the matrix.

Definition at line 142 of file nnet-utils.h.

References KALDI_WARN, CuMatrixBase< Real >::MulElements(), CuMatrixBase< Real >::NumCols(), CuMatrixBase< Real >::NumRows(), and CuMatrixBase< Real >::Sum().

Referenced by Rbm::RbmUpdate().

142  {
143  int32 N = mat.NumRows() * mat.NumCols();
144  Real mean = mat.Sum() / N;
145  CuMatrix<Real> pow_2(mat);
146  pow_2.MulElements(mat);
147  Real var = pow_2.Sum() / N - mean * mean;
148  if (var < 0.0) {
149  KALDI_WARN << "Forcing the variance to be non-negative! " << var << "->0.0";
150  var = 0.0;
151  }
152  return sqrt(var);
153 }
#define KALDI_WARN
Definition: kaldi-error.h:130
void kaldi::nnet1::CountCorrectFramesWeighted ( const CuArray< T > &  hyp,
const CuArray< T > &  ref,
const CuVectorBase< BaseFloat > &  weights,
Vector< double > *  correct 
)
inline

Helper function of Xent::Eval, calculates number of matching elemente in 'hyp', 'ref' weighted by 'weights'.

Definition at line 40 of file nnet-loss.cc.

References CuVectorBase< Real >::CopyToVec(), CuArray< T >::CopyToVec(), CuArray< T >::Dim(), VectorBase< Real >::Dim(), CuVectorBase< Real >::Dim(), rnnlm::i, and KALDI_ASSERT.

Referenced by Xent::Eval().

43  {
44  KALDI_ASSERT(hyp.Dim() == ref.Dim());
45  KALDI_ASSERT(hyp.Dim() == weights.Dim());
46  int32 dim = hyp.Dim();
47  // Get GPU data to host,
48  std::vector<T> hyp_h(dim), ref_h(dim);
49  hyp.CopyToVec(&hyp_h);
50  ref.CopyToVec(&ref_h);
51  Vector<BaseFloat> w(dim);
52  weights.CopyToVec(&w);
53  // Accumulate weighted counts of correct frames,
54  for (int32 i = 0; i < dim; i++) {
55  KALDI_ASSERT(ref_h[i] < correct->Dim());
56  (*correct)(ref_h[i]) += w(i) * (hyp_h[i] == ref_h[i] ? 1.0 : 0.0);
57  }
58 }
MatrixIndexT Dim() const
Dimensions.
Definition: cu-vector.h:67
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:169
void CopyToVec(VectorBase< OtherReal > *dst) const
Definition: cu-vector.cc:838
MatrixIndexT Dim() const
Returns the dimension of the vector.
Definition: kaldi-vector.h:62
void LatticeAcousticRescore ( const Matrix< BaseFloat > &  log_like,
const TransitionModel trans_model,
const std::vector< int32 > &  state_times,
Lattice lat 
)

Definition at line 45 of file nnet-train-mmi-sequential.cc.

References rnnlm::i, KALDI_ASSERT, KALDI_ERR, MatrixBase< Real >::NumRows(), and TransitionModel::TransitionIdToPdf().

Referenced by main().

48  {
49  kaldi::uint64 props = lat->Properties(fst::kFstProperties, false);
50  if (!(props & fst::kTopSorted))
51  KALDI_ERR << "Input lattice must be topologically sorted.";
52 
53  KALDI_ASSERT(!state_times.empty());
54  std::vector<std::vector<int32> > time_to_state(log_like.NumRows());
55  for (size_t i = 0; i < state_times.size(); i++) {
56  KALDI_ASSERT(state_times[i] >= 0);
57  if (state_times[i] < log_like.NumRows()) // end state may be past this..
58  time_to_state[state_times[i]].push_back(i);
59  else
60  KALDI_ASSERT(state_times[i] == log_like.NumRows()
61  && "There appears to be lattice/feature mismatch.");
62  }
63 
64  for (int32 t = 0; t < log_like.NumRows(); t++) {
65  for (size_t i = 0; i < time_to_state[t].size(); i++) {
66  int32 state = time_to_state[t][i];
67  for (fst::MutableArcIterator<Lattice> aiter(lat, state); !aiter.Done();
68  aiter.Next()) {
69  LatticeArc arc = aiter.Value();
70  int32 trans_id = arc.ilabel;
71  if (trans_id != 0) { // Non-epsilon input label on arc
72  int32 pdf_id = trans_model.TransitionIdToPdf(trans_id);
73  arc.weight.SetValue2(-log_like(t, pdf_id) + arc.weight.Value2());
74  aiter.SetValue(arc);
75  }
76  }
77  }
78  }
79 }
fst::ArcTpl< LatticeWeight > LatticeArc
Definition: kaldi-lattice.h:40
int32 TransitionIdToPdf(int32 trans_id) const
#define KALDI_ERR
Definition: kaldi-error.h:127
MatrixIndexT NumRows() const
Returns number of rows (or zero for emtpy matrix).
Definition: kaldi-matrix.h:58
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:169
std::string kaldi::nnet1::MomentStatistics ( const VectorBase< Real > &  vec)

Get a string with statistics of the data in a vector, so we can print them easily.

Definition at line 63 of file nnet-utils.h.

References VectorBase< Real >::Add(), VectorBase< Real >::Dim(), VectorBase< Real >::Max(), VectorBase< Real >::Min(), VectorBase< Real >::MulElements(), and VectorBase< Real >::Sum().

Referenced by ParametricRelu::Info(), LinearTransform::Info(), AffineTransform::Info(), RecurrentComponent::Info(), ConvolutionalComponent::Info(), Convolutional2DComponent::Info(), LstmProjected::Info(), AddShift::Info(), BlstmProjected::Info(), Rescale::Info(), Nnet::InfoBackPropagate(), ParametricRelu::InfoGradient(), LinearTransform::InfoGradient(), AffineTransform::InfoGradient(), RecurrentComponent::InfoGradient(), ConvolutionalComponent::InfoGradient(), Convolutional2DComponent::InfoGradient(), LstmProjected::InfoGradient(), AddShift::InfoGradient(), BlstmProjected::InfoGradient(), Rescale::InfoGradient(), Nnet::InfoPropagate(), and MomentStatistics().

63  {
64  // we use an auxiliary vector for the higher order powers
65  Vector<Real> vec_aux(vec);
66  Vector<Real> vec_no_mean(vec); // vec with mean subtracted
67  // mean
68  Real mean = vec.Sum() / vec.Dim();
69  // variance
70  vec_aux.Add(-mean);
71  vec_no_mean = vec_aux;
72  vec_aux.MulElements(vec_no_mean); // (vec-mean)^2
73  Real variance = vec_aux.Sum() / vec.Dim();
74  // skewness
75  // - negative : left tail is longer,
76  // - positive : right tail is longer,
77  // - zero : symmetric
78  vec_aux.MulElements(vec_no_mean); // (vec-mean)^3
79  Real skewness = vec_aux.Sum() / pow(variance, 3.0/2.0) / vec.Dim();
80  // kurtosis (peakedness)
81  // - makes sense for symmetric distributions (skewness is zero)
82  // - positive : 'sharper peak' than Normal distribution
83  // - negative : 'heavier tails' than Normal distribution
84  // - zero : same peakedness as the Normal distribution
85  vec_aux.MulElements(vec_no_mean); // (vec-mean)^4
86  Real kurtosis = vec_aux.Sum() / (variance * variance) / vec.Dim() - 3.0;
87  // send the statistics to stream,
88  std::ostringstream ostr;
89  ostr << " ( min " << vec.Min() << ", max " << vec.Max()
90  << ", mean " << mean
91  << ", stddev " << sqrt(variance)
92  << ", skewness " << skewness
93  << ", kurtosis " << kurtosis
94  << " ) ";
95  return ostr.str();
96 }
std::string kaldi::nnet1::MomentStatistics ( const MatrixBase< Real > &  mat)

Overload MomentStatistics to MatrixBase<Real>

Definition at line 102 of file nnet-utils.h.

References VectorBase< Real >::CopyRowsFromMat(), MomentStatistics(), MatrixBase< Real >::NumCols(), and MatrixBase< Real >::NumRows().

102  {
103  Vector<Real> vec(mat.NumRows()*mat.NumCols());
104  vec.CopyRowsFromMat(mat);
105  return MomentStatistics(vec);
106 }
std::string MomentStatistics(const CuMatrixBase< Real > &mat)
Overload MomentStatistics to CuMatrix
Definition: nnet-utils.h:122
std::string kaldi::nnet1::MomentStatistics ( const CuVectorBase< Real > &  vec)

Overload MomentStatistics to CuVectorBase<Real>

Definition at line 112 of file nnet-utils.h.

References CuVectorBase< Real >::CopyToVec(), CuVectorBase< Real >::Dim(), and MomentStatistics().

112  {
113  Vector<Real> vec_host(vec.Dim());
114  vec.CopyToVec(&vec_host);
115  return MomentStatistics(vec_host);
116 }
std::string MomentStatistics(const CuMatrixBase< Real > &mat)
Overload MomentStatistics to CuMatrix
Definition: nnet-utils.h:122
std::string kaldi::nnet1::MomentStatistics ( const CuMatrixBase< Real > &  mat)

Overload MomentStatistics to CuMatrix<Real>

Definition at line 122 of file nnet-utils.h.

References CuMatrixBase< Real >::CopyToMat(), MomentStatistics(), CuMatrixBase< Real >::NumCols(), and CuMatrixBase< Real >::NumRows().

122  {
123  Matrix<Real> mat_host(mat.NumRows(), mat.NumCols());
124  mat.CopyToMat(&mat_host);
125  return MomentStatistics(mat_host);
126 }
std::string MomentStatistics(const CuMatrixBase< Real > &mat)
Overload MomentStatistics to CuMatrix
Definition: nnet-utils.h:122
std::ostream& kaldi::nnet1::operator<< ( std::ostream &  os,
const std::vector< T > &  v 
)

Define stream insertion opeartor for 'std::vector', useful for log-prints,.

Definition at line 43 of file nnet-utils.h.

43  {
44  std::copy(v.begin(), v.end(), std::ostream_iterator<T>(os, " "));
45  return os;
46 }
void kaldi::nnet1::PosteriorToMatrix ( const Posterior &  post,
const int32  post_dim,
CuMatrix< Real > *  mat 
)

Wrapper of PosteriorToMatrix with CuMatrix argument.

Definition at line 292 of file nnet-utils.h.

Referenced by Xent::Eval(), Mse::Eval(), and MultiTaskLoss::Eval().

293  {
294  Matrix<Real> m;
295  PosteriorToMatrix(post, post_dim, &m);
296  (*mat) = m;
297 }
void PosteriorToMatrix(const Posterior &post, const int32 post_dim, CuMatrix< Real > *mat)
Wrapper of PosteriorToMatrix with CuMatrix argument.
Definition: nnet-utils.h:292
void kaldi::nnet1::PosteriorToPdfMatrix ( const Posterior &  post,
const TransitionModel &  model,
CuMatrix< Real > *  mat 
)

Wrapper of PosteriorToMatrixMapped with CuMatrix argument.

Definition at line 304 of file nnet-utils.h.

Referenced by main().

306  {
307  Matrix<BaseFloat> m;
308  PosteriorToPdfMatrix(post, model, &m);
309  // Copy to output GPU matrix,
310  (*mat) = m;
311 }
void PosteriorToPdfMatrix(const Posterior &post, const TransitionModel &model, CuMatrix< Real > *mat)
Wrapper of PosteriorToMatrixMapped with CuMatrix argument.
Definition: nnet-utils.h:304
void kaldi::nnet1::RandGauss ( BaseFloat  mu,
BaseFloat  sigma,
CuMatrixBase< Real > *  mat,
struct RandomState *  state = NULL 
)

Fill CuMatrix with random numbers (Gaussian distribution): mu = the mean value, sigma = standard deviation,.

Using the CPU random generator.

Definition at line 164 of file nnet-utils.h.

References CuMatrixBase< Real >::CopyFromMat(), kaldi::kUndefined, CuMatrixBase< Real >::NumCols(), and CuMatrixBase< Real >::NumRows().

Referenced by AffineTransform::InitData(), LinearTransform::InitData(), ConvolutionalComponent::InitData(), Convolutional2DComponent::InitData(), Rbm::InitData(), and InitRand().

165  {
166  // fill temporary matrix with 'Normal' samples,
167  Matrix<Real> m(mat->NumRows(), mat->NumCols(), kUndefined);
168  for (int32 r = 0; r < m.NumRows(); r++) {
169  for (int32 c = 0; c < m.NumCols(); c++) {
170  m(r, c) = RandGauss(state);
171  }
172  }
173  // re-shape the distrbution,
174  m.Scale(sigma);
175  m.Add(mu);
176  // export,
177  mat->CopyFromMat(m);
178 }
void RandGauss(BaseFloat mu, BaseFloat sigma, CuMatrixBase< Real > *mat, struct RandomState *state=NULL)
Fill CuMatrix with random numbers (Gaussian distribution): mu = the mean value, sigma = standard devi...
Definition: nnet-utils.h:164
void kaldi::nnet1::RandUniform ( BaseFloat  mu,
BaseFloat  range,
CuMatrixBase< Real > *  mat,
struct RandomState *  state = NULL 
)

Fill CuMatrix with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 .

. mu+range/2)

Using the CPU random generator.

Definition at line 188 of file nnet-utils.h.

References CuMatrixBase< Real >::CopyFromMat(), kaldi::kUndefined, CuMatrixBase< Real >::NumCols(), CuMatrixBase< Real >::NumRows(), and kaldi::Rand().

Referenced by AffineTransform::InitData(), RecurrentComponent::InitData(), LstmProjected::InitData(), BlstmProjected::InitData(), ConvolutionalComponent::InitData(), Convolutional2DComponent::InitData(), and Rbm::InitData().

189  {
190  // fill temporary matrix with '0..1' samples,
191  Matrix<Real> m(mat->NumRows(), mat->NumCols(), kUndefined);
192  for (int32 r = 0; r < m.NumRows(); r++) {
193  for (int32 c = 0; c < m.NumCols(); c++) {
194  m(r, c) = Rand(state) / static_cast<Real>(RAND_MAX);
195  }
196  }
197  // re-shape the distrbution,
198  m.Scale(range); // 0..range,
199  m.Add(mu - (range / 2.0)); // mu-range/2 .. mu+range/2,
200  // export,
201  mat->CopyFromMat(m);
202 }
int Rand(struct RandomState *state)
Definition: kaldi-math.cc:46
void kaldi::nnet1::RandUniform ( BaseFloat  mu,
BaseFloat  range,
CuVectorBase< Real > *  vec,
struct RandomState *  state = NULL 
)

Fill CuVector with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 .

. mu+range/2)

Using the CPU random generator.

Definition at line 212 of file nnet-utils.h.

References CuVectorBase< Real >::CopyFromVec(), CuVectorBase< Real >::Dim(), rnnlm::i, kaldi::kUndefined, and kaldi::Rand().

213  {
214  // fill temporary vector with '0..1' samples,
215  Vector<Real> v(vec->Dim(), kUndefined);
216  for (int32 i = 0; i < v.Dim(); i++) {
217  v(i) = Rand(state) / static_cast<Real>(RAND_MAX);
218  }
219  // re-shape the distrbution,
220  v.Scale(range); // 0..range,
221  v.Add(mu - (range / 2.0)); // mu-range/2 .. mu+range/2,
222  // export,
223  vec->CopyFromVec(v);
224 }
int Rand(struct RandomState *state)
Definition: kaldi-math.cc:46
Component* kaldi::nnet1::ReadComponentFromString ( const std::string &  s)

Definition at line 45 of file nnet-component-test.cc.

References Component::Read().

Referenced by UnitTestConvolutional2DComponent(), UnitTestConvolutionalComponent3x3(), UnitTestConvolutionalComponentUnity(), UnitTestDropoutComponent(), UnitTestLengthNorm(), and UnitTestSimpleSentenceAveragingComponent().

45  {
46  std::istringstream is(s + "\n");
47  return Component::Read(is, false); // false for ascii
48  }
void kaldi::nnet1::ReadCuMatrixFromString ( const std::string &  s,
CuMatrix< Real > *  m 
)

Definition at line 40 of file nnet-component-test.cc.

References CuMatrix< Real >::Read().

Referenced by UnitTestAveragePooling2DComponent(), UnitTestConvolutional2DComponent(), UnitTestConvolutionalComponent3x3(), UnitTestConvolutionalComponentUnity(), UnitTestLengthNorm(), UnitTestMaxPooling2DComponent(), UnitTestMaxPoolingComponent(), and UnitTestSimpleSentenceAveragingComponent().

40  {
41  std::istringstream is(s + "\n");
42  m->Read(is, false); // false for ascii
43  }
void Read(std::istream &is, bool binary)
I/O functions.
Definition: cu-matrix.cc:459
void kaldi::nnet1::UnitTestAveragePooling2DComponent ( )

Definition at line 297 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), Component::Init(), KALDI_LOG, Component::Propagate(), and ReadCuMatrixFromString().

Referenced by main().

297  { /* Implemented by Harish Mallidi */
298  // make average-pooling2d component
299  Component* c = Component::Init(
300  "<AveragePooling2DComponent> <InputDim> 56 <OutputDim> 18 \
301  <FmapXLen> 4 <FmapYLen> 7 <PoolXLen> 2 <PoolYLen> 3 \
302  <PoolXStep> 1 <PoolYStep> 2"
303  );
304 
305  // input matrix,
306  CuMatrix<BaseFloat> mat_in;
307  ReadCuMatrixFromString("[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 \
308  11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18 19 19 20 20 \
309  21 21 22 22 23 23 24 24 25 25 26 26 27 27 ]", &mat_in);
310 
311  // expected output (max values in the patch)
312  CuMatrix<BaseFloat> mat_out_ref;
313  ReadCuMatrixFromString("[ 4.5 4.5 6.5 6.5 8.5 8.5 11.5 11.5 13.5 13.5 \
314  15.5 15.5 18.5 18.5 20.5 20.5 22.5 22.5 ]", &mat_out_ref);
315 
316  // propagate,
317  CuMatrix<BaseFloat> mat_out;
318  c->Propagate(mat_in, &mat_out);
319  KALDI_LOG << "mat_out" << mat_out << "mat_out_ref" << mat_out_ref;
320  AssertEqual(mat_out, mat_out_ref);
321 
322 
323  // locations of max values will be shown
324  CuMatrix<BaseFloat> mat_out_diff(mat_out);
325  ReadCuMatrixFromString("[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 ]", &mat_out_diff);
326 
327  // expected backpropagated values,
328  CuMatrix<BaseFloat> mat_in_diff_ref; // hand-computed back-propagated values,
329  ReadCuMatrixFromString("[ 0 0 0 0 0.0833333 0.0833333 0.166667 0.166667 \
330  0.25 0.25 0.333333 0.333333 0.333333 0.333333 0.25 0.25 0.25 0.25 \
331  0.333333 0.333333 0.416667 0.416667 0.5 0.5 0.583333 0.583333 0.583333 \
332  0.583333 0.75 0.75 0.75 0.75 0.833333 0.833333 0.916667 0.916667 1 1 \
333  1.08333 1.08333 1.08333 1.08333 1 1 1 1 1.08333 1.08333 1.16667 1.16667 \
334  1.25 1.25 1.33333 1.33333 1.33333 1.33333 ]", &mat_in_diff_ref
335  );
336 
337  // backpropagate,
338  CuMatrix<BaseFloat> mat_in_diff;
339  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
340  KALDI_LOG << "mat_in_diff " << mat_in_diff
341  << " mat_in_diff_ref " << mat_in_diff_ref;
342  AssertEqual(mat_in_diff, mat_in_diff_ref);
343 
344  delete c;
345  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:133
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestConvolutional2DComponent ( )

Definition at line 348 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), KALDI_LOG, Component::Propagate(), ReadComponentFromString(), and ReadCuMatrixFromString().

Referenced by main().

348  { /* Implemented by Harish Mallidi */
349  // Convolutional2D component
350  Component* c = ReadComponentFromString("<Convolutional2DComponent> 18 56 \
351  <LearnRateCoef> 0 <BiasLearnRateCoef> 0 <FmapXLen> 4 <FmapYLen> 7 \
352  <FiltXLen> 2 <FiltYLen> 3 <FiltXStep> 1 <FiltYStep> 2 <ConnectFmap> 1 \
353  <Filters> [ 0 0 1 1 2 2 3 3 4 4 5 5 ; 0 0 1 1 2 2 3 3 4 4 5 5 ] \
354  <Bias> [ 0 0 ]"
355  );
356 
357  // input matrix
358  CuMatrix<BaseFloat> mat_in;
359  ReadCuMatrixFromString("[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 \
360  11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18 19 19 20 20 \
361  21 21 22 22 23 23 24 24 25 25 26 26 27 27 ]", &mat_in);
362 
363  CuMatrix<BaseFloat> mat_out_ref;
364  ReadCuMatrixFromString("[ 206 206 266 266 326 326 416 416 476 476 536 536 \
365  626 626 686 686 746 746 ]", &mat_out_ref);
366 
367  // propagate
368  CuMatrix<BaseFloat> mat_out;
369  c->Propagate(mat_in, &mat_out);
370  KALDI_LOG << "mat_out" << mat_out << "mat_out" << mat_out_ref;
371  AssertEqual(mat_out, mat_out_ref);
372 
373  // prepare mat_out_diff, mat_in_diff_ref,
374  CuMatrix<BaseFloat> mat_out_diff;
375  ReadCuMatrixFromString("[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 ]",
376  &mat_out_diff);
377 
378  CuMatrix<BaseFloat> mat_in_diff_ref;
379  ReadCuMatrixFromString("[ 0 0 0 0 0 0 2 2 2 2 4 4 8 8 0 0 3 3 4.5 4.5 8 8 \
380  9.5 9.5 13 13 20 20 9 9 18 18 19.5 19.5 23 23 24.5 24.5 28 28 41 41 \
381  36 36 48 48 51 51 56 56 59 59 64 64 80 80 ]", &mat_in_diff_ref);
382 
383  // backpropagate
384  CuMatrix<BaseFloat> mat_in_diff;
385  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
386  KALDI_LOG << "mat_in_diff " << mat_in_diff
387  << " mat_in_diff_ref " << mat_in_diff_ref;
388  AssertEqual(mat_in_diff, mat_in_diff_ref);
389 
390  delete c;
391  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:133
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestConvolutionalComponent3x3 ( )

Definition at line 134 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), KALDI_LOG, Component::Propagate(), ReadComponentFromString(), and ReadCuMatrixFromString().

Referenced by main().

134  {
135  // make 3x3 convolutional component,
136  // design such weights and input so output is zero,
137  Component* c = ReadComponentFromString("<ConvolutionalComponent> 9 15 \
138  <PatchDim> 3 <PatchStep> 1 <PatchStride> 5 \
139  <LearnRateCoef> 1.0 <BiasLearnRateCoef> 1.0 \
140  <MaxNorm> 0 \
141  <Filters> [ -1 -2 -7 0 0 0 1 2 7 ; \
142  -1 0 1 -3 0 3 -2 2 0 ; \
143  -4 0 0 -3 0 3 4 0 0 ] \
144  <Bias> [ -20 -20 -20 ]"
145  );
146 
147  // prepare input, reference output,
148  CuMatrix<BaseFloat> mat_in;
149  ReadCuMatrixFromString("[ 1 3 5 7 9 2 4 6 8 10 3 5 7 9 11 ]", &mat_in);
150  CuMatrix<BaseFloat> mat_out_ref;
151  ReadCuMatrixFromString("[ 0 0 0 0 0 0 0 0 0 ]", &mat_out_ref);
152 
153  // propagate,
154  CuMatrix<BaseFloat> mat_out;
155  c->Propagate(mat_in, &mat_out);
156  KALDI_LOG << "mat_in" << mat_in << "mat_out" << mat_out;
157  AssertEqual(mat_out, mat_out_ref);
158 
159  // prepare mat_out_diff, mat_in_diff_ref,
160  CuMatrix<BaseFloat> mat_out_diff;
161  ReadCuMatrixFromString("[ 1 0 0 1 1 0 1 1 1 ]", &mat_out_diff);
162  // hand-computed back-propagated values,
163  CuMatrix<BaseFloat> mat_in_diff_ref;
164  ReadCuMatrixFromString("[ -1 -4 -15 -8 -6 0 -3 -6 3 6 1 1 14 11 7 ]",
165  &mat_in_diff_ref);
166 
167  // backpropagate,
168  CuMatrix<BaseFloat> mat_in_diff;
169  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
170  KALDI_LOG << "mat_in_diff " << mat_in_diff
171  << " mat_in_diff_ref " << mat_in_diff_ref;
172  AssertEqual(mat_in_diff, mat_in_diff_ref);
173 
174  // clean,
175  delete c;
176  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:133
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestConvolutionalComponentUnity ( )

Definition at line 103 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), KALDI_LOG, Component::Propagate(), ReadComponentFromString(), and ReadCuMatrixFromString().

Referenced by main().

103  {
104  // make 'identity' convolutional component,
105  Component* c = ReadComponentFromString("<ConvolutionalComponent> 5 5 \
106  <PatchDim> 1 <PatchStep> 1 <PatchStride> 5 \
107  <LearnRateCoef> 1.0 <BiasLearnRateCoef> 1.0 \
108  <MaxNorm> 0 \
109  <Filters> [ 1 \
110  ] <Bias> [ 0 ]"
111  );
112 
113  // prepare input,
114  CuMatrix<BaseFloat> mat_in;
115  ReadCuMatrixFromString("[ 1 2 3 4 5 ] ", &mat_in);
116 
117  // propagate,
118  CuMatrix<BaseFloat> mat_out;
119  c->Propagate(mat_in, &mat_out);
120  KALDI_LOG << "mat_in" << mat_in << "mat_out" << mat_out;
121  AssertEqual(mat_in, mat_out);
122 
123  // backpropagate,
124  CuMatrix<BaseFloat> mat_out_diff(mat_in), mat_in_diff;
125  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
126  KALDI_LOG << "mat_out_diff " << mat_out_diff
127  << " mat_in_diff " << mat_in_diff;
128  AssertEqual(mat_out_diff, mat_in_diff);
129 
130  // clean,
131  delete c;
132  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:133
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestDropoutComponent ( )

Definition at line 393 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), Component::Propagate(), ReadComponentFromString(), CuMatrixBase< Real >::Set(), and CuMatrixBase< Real >::Sum().

Referenced by main().

393  {
394  Component* c = ReadComponentFromString("<Dropout> 100 100 <DropoutRetention> 0.7");
395  // buffers,
396  CuMatrix<BaseFloat> in(777, 100),
397  out,
398  out_diff,
399  in_diff;
400  // init,
401  in.Set(2.0);
402 
403  // propagate,
404  c->Propagate(in, &out);
405  AssertEqual(in.Sum(), out.Sum(), 0.01);
406 
407  // backprop,
408  out_diff = in;
409  c->Backpropagate(in, out, out_diff, &in_diff);
410  AssertEqual(in_diff, out);
411 
412  delete c;
413  }
Real Sum() const
Definition: cu-matrix.cc:2683
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
Component * ReadComponentFromString(const std::string &s)
void Set(Real value)
Definition: cu-matrix.cc:495
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
void kaldi::nnet1::UnitTestLengthNorm ( )

Definition at line 54 of file nnet-component-test.cc.

References CuVectorBase< Real >::AddColSumMat(), CuVectorBase< Real >::ApplyPow(), kaldi::AssertEqual(), CuMatrixBase< Real >::MulElements(), Component::Propagate(), ReadComponentFromString(), ReadCuMatrixFromString(), and CuVectorBase< Real >::Set().

Referenced by main().

54  {
55  // make L2-length normalization component,
56  Component* c = ReadComponentFromString("<LengthNormComponent> 5 5");
57  // prepare input,
58  CuMatrix<BaseFloat> mat_in;
59  ReadCuMatrixFromString("[ 1 2 3 4 5 \n 2 3 5 6 8 ] ", &mat_in);
60  // propagate,
61  CuMatrix<BaseFloat> mat_out;
62  c->Propagate(mat_in, &mat_out);
63  // check the length,
64  mat_out.MulElements(mat_out); // ^2,
65  CuVector<BaseFloat> check_length_is_one(2);
66  check_length_is_one.AddColSumMat(1.0, mat_out, 0.0); // sum_of_cols(x^2),
67  check_length_is_one.ApplyPow(0.5); // L2norm = sqrt(sum_of_cols(x^2)),
68  CuVector<BaseFloat> ones(2);
69  ones.Set(1.0);
70  AssertEqual(check_length_is_one, ones);
71  }
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
void MulElements(const CuMatrixBase< Real > &A)
Multiply two matrices elementwise: C = C .* A.
Definition: cu-matrix.cc:652
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestMaxPooling2DComponent ( )

Definition at line 248 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), Component::Init(), KALDI_LOG, Component::Propagate(), and ReadCuMatrixFromString().

Referenced by main().

248  { /* Implemented by Harish Mallidi */
249  // make max-pooling2d component
250  Component* c = Component::Init(
251  "<MaxPooling2DComponent> <InputDim> 56 <OutputDim> 18 \
252  <FmapXLen> 4 <FmapYLen> 7 <PoolXLen> 2 <PoolYLen> 3 \
253  <PoolXStep> 1 <PoolYStep> 2"
254  );
255 
256  // input matrix,
257  CuMatrix<BaseFloat> mat_in;
258  ReadCuMatrixFromString("[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10 \
259  11 11 12 12 13 13 14 14 15 15 16 16 17 17 18 18 19 19 20 20 21 21 \
260  22 22 23 23 24 24 25 25 26 26 27 27 ]", &mat_in);
261 
262  // expected output (max values in the patch)
263  CuMatrix<BaseFloat> mat_out_ref;
264  ReadCuMatrixFromString("[ 9 9 11 11 13 13 16 16 18 18 \
265  20 20 23 23 25 25 27 27 ]", &mat_out_ref);
266 
267  // propagate,
268  CuMatrix<BaseFloat> mat_out;
269  c->Propagate(mat_in, &mat_out);
270  KALDI_LOG << "mat_out" << mat_out << "mat_out_ref" << mat_out_ref;
271  AssertEqual(mat_out, mat_out_ref);
272 
273 
274  // locations of max values will be shown
275  CuMatrix<BaseFloat> mat_out_diff(mat_out);
277  "[ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 ]", &mat_out_diff
278  );
279 
280  // expected backpropagated values,
281  CuMatrix<BaseFloat> mat_in_diff_ref; // hand-computed back-propagated values,
282  ReadCuMatrixFromString("[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 \
283  0.25 0.25 0 0 1 1 0 0 0 0 0.75 0.75 0 0 1 1 0 0 2.5 2.5 \
284  0 0 0 0 3 3 0 0 3.5 3.5 0 0 8 8 ]", &mat_in_diff_ref
285  );
286 
287  // backpropagate,
288  CuMatrix<BaseFloat> mat_in_diff;
289  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
290  KALDI_LOG << "mat_in_diff " << mat_in_diff
291  << " mat_in_diff_ref " << mat_in_diff_ref;
292  AssertEqual(mat_in_diff, mat_in_diff_ref);
293 
294  delete c;
295  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:133
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestMaxPoolingComponent ( )

Definition at line 179 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), Component::Init(), KALDI_LOG, Component::Propagate(), ReadCuMatrixFromString(), and CuMatrixBase< Real >::Set().

Referenced by main().

179  {
180  // make max-pooling component, assuming 4 conv. neurons,
181  // non-overlapping pool of size 3,
182  Component* c = Component::Init(
183  "<MaxPoolingComponent> <InputDim> 24 <OutputDim> 8 \
184  <PoolSize> 3 <PoolStep> 3 <PoolStride> 4"
185  );
186 
187  // input matrix,
188  CuMatrix<BaseFloat> mat_in;
189  ReadCuMatrixFromString("[ 3 8 2 9 \
190  8 3 9 3 \
191  2 4 9 6 \
192  \
193  2 4 2 0 \
194  6 4 9 4 \
195  7 3 0 3;\
196  \
197  5 4 7 8 \
198  3 9 5 6 \
199  3 4 8 9 \
200  \
201  5 4 5 6 \
202  3 1 4 5 \
203  8 2 1 7 ]", &mat_in);
204 
205  // expected output (max values in columns),
206  CuMatrix<BaseFloat> mat_out_ref;
207  ReadCuMatrixFromString("[ 8 8 9 9 \
208  7 4 9 4;\
209  5 9 8 9 \
210  8 4 5 7 ]", &mat_out_ref);
211 
212  // propagate,
213  CuMatrix<BaseFloat> mat_out;
214  c->Propagate(mat_in, &mat_out);
215  KALDI_LOG << "mat_out" << mat_out << "mat_out_ref" << mat_out_ref;
216  AssertEqual(mat_out, mat_out_ref);
217 
218  // locations of max values will be shown,
219  CuMatrix<BaseFloat> mat_out_diff(mat_out);
220  mat_out_diff.Set(1);
221  // expected backpropagated values (hand-computed),
222  CuMatrix<BaseFloat> mat_in_diff_ref;
223  ReadCuMatrixFromString("[ 0 1 0 1 \
224  1 0 1 0 \
225  0 0 1 0 \
226  \
227  0 1 0 0 \
228  0 1 1 1 \
229  1 0 0 0;\
230  \
231  1 0 0 0 \
232  0 1 0 0 \
233  0 0 1 1 \
234  \
235  0 1 1 0 \
236  0 0 0 0 \
237  1 0 0 1 ]", &mat_in_diff_ref);
238  // backpropagate,
239  CuMatrix<BaseFloat> mat_in_diff;
240  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
241  KALDI_LOG << "mat_in_diff " << mat_in_diff
242  << " mat_in_diff_ref " << mat_in_diff_ref;
243  AssertEqual(mat_in_diff, mat_in_diff_ref);
244 
245  delete c;
246  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:133
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)
void kaldi::nnet1::UnitTestSimpleSentenceAveragingComponent ( )

Definition at line 73 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), rnnlm::i, CuMatrixBase< Real >::NumRows(), Component::Propagate(), ReadComponentFromString(), ReadCuMatrixFromString(), CuMatrixBase< Real >::Row(), and CuVectorBase< Real >::Set().

Referenced by main().

73  {
74  // make SimpleSentenceAveraging component,
76  "<SimpleSentenceAveragingComponent> 2 2 <GradientBoost> 10.0"
77  );
78  // prepare input,
79  CuMatrix<BaseFloat> mat_in;
80  ReadCuMatrixFromString("[ 0 0.5 \n 1 1 \n 2 1.5 ] ", &mat_in);
81 
82  // propagate,
83  CuMatrix<BaseFloat> mat_out;
84  c->Propagate(mat_in, &mat_out);
85  // check the output,
86  CuVector<BaseFloat> ones(2);
87  ones.Set(1.0);
88  for (int32 i = 0; i < mat_out.NumRows(); i++) {
89  AssertEqual(mat_out.Row(i), ones);
90  }
91 
92  // backpropagate,
93  CuMatrix<BaseFloat> dummy1(3, 2), dummy2(3, 2), diff_out(mat_in), diff_in;
94  // the average 1.0 in 'diff_in' will be boosted by 10.0,
95  c->Backpropagate(dummy1, dummy2, diff_out, &diff_in);
96  // check the output,
97  CuVector<BaseFloat> tens(2); tens.Set(10);
98  for (int32 i = 0; i < diff_in.NumRows(); i++) {
99  AssertEqual(diff_in.Row(i), tens);
100  }
101  }
const CuSubVector< Real > Row(MatrixIndexT i) const
Definition: cu-matrix.h:529
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation 'out_diff' -> 'in_diff'.
Component * ReadComponentFromString(const std::string &s)
MatrixIndexT NumRows() const
Dimensions.
Definition: cu-matrix.h:195
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation 'in' -> 'out',.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:273
Abstract class, building block of the network.
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)