kaldi::nnet1 Namespace Reference

Classes

class  AddShift
 Adds shift to all the lines of the matrix (can be used for global mean normalization) More...
 
class  AffineTransform
 
class  AveragePoolingComponent
 AveragePoolingComponent : The input/output matrices are split to submatrices with width 'pool_stride_'. More...
 
class  BlockSoftmax
 
class  BlstmProjected
 
class  Component
 Abstract class, building block of the network. More...
 
class  ConvolutionalComponent
 ConvolutionalComponent implements convolution over single axis (i.e. More...
 
class  CopyComponent
 Rearrange the matrix columns according to the indices in copy_from_indices_. More...
 
class  Dropout
 
class  FramePoolingComponent
 FramePoolingComponent : The input/output matrices are split to frames of width 'feature_dim_'. More...
 
class  HiddenSoftmax
 
class  KlHmm
 
class  LengthNormComponent
 Rescale the matrix-rows to have unit length (L2-norm). More...
 
class  LinearTransform
 
class  LossItf
 
struct  LossOptions
 
class  LstmProjected
 
class  MatrixBuffer
 A buffer for caching (utterance-key, feature-matrix) pairs. More...
 
struct  MatrixBufferOptions
 
class  MatrixRandomizer
 Shuffles rows of a matrix according to the indices in the mask,. More...
 
class  MaxPoolingComponent
 MaxPoolingComponent : The input/output matrices are split to submatrices with width 'pool_stride_'. More...
 
class  Mse
 
class  MultiBasisComponent
 
class  MultistreamComponent
 Class MultistreamComponent is an extension of UpdatableComponent for recurrent networks, which are trained with parallel sequences. More...
 
class  MultiTaskLoss
 
class  Nnet
 
struct  NnetDataRandomizerOptions
 Configuration variables that affect how frame-level shuffling is done. More...
 
struct  NnetTrainOptions
 
class  ParallelComponent
 
class  ParametricRelu
 
class  PdfPrior
 
struct  PdfPriorOptions
 
class  RandomizerMask
 Generates randomly ordered vector of indices,. More...
 
class  Rbm
 
class  RbmBase
 
struct  RbmTrainOptions
 
class  RecurrentComponent
 Component with recurrent connections, 'tanh' non-linearity. More...
 
class  Rescale
 Rescale the data column-wise by a vector (can be used for global variance normalization) More...
 
class  SentenceAveragingComponent
 Deprecated!!!, keeping it as Katka Zmolikova used it in JSALT 2015. More...
 
class  Sigmoid
 
class  SimpleSentenceAveragingComponent
 SimpleSentenceAveragingComponent does not have nested network, it is intended to be used inside of a <ParallelComponent>. More...
 
class  Softmax
 
class  Splice
 Splices the time context of the input features in N, out k*N, FrameOffset o_1,o_2,...,o_k FrameOffset example 11frames: -5 -4 -3 -2 -1 0 1 2 3 4 5. More...
 
class  StdVectorRandomizer
 Randomizes elements of a vector according to a mask. More...
 
class  Tanh
 
class  UpdatableComponent
 Class UpdatableComponent is a Component which has trainable parameters, it contains SGD training hyper-parameters in NnetTrainOptions. More...
 
class  VectorRandomizer
 Randomizes elements of a vector according to a mask. More...
 
class  Xent
 

Typedefs

typedef StdVectorRandomizer< int32Int32VectorRandomizer
 
typedef StdVectorRandomizer< std::vector< std::pair< int32, BaseFloat > > > PosteriorRandomizer
 

Functions

template<typename Real >
void ReadCuMatrixFromString (const std::string &s, CuMatrix< Real > *m)
 
ComponentReadComponentFromString (const std::string &s)
 
void UnitTestLengthNorm ()
 
void UnitTestSimpleSentenceAveragingComponent ()
 
void UnitTestConvolutionalComponentUnity ()
 
void UnitTestConvolutionalComponent3x3 ()
 
void UnitTestMaxPoolingComponent ()
 
void UnitTestDropoutComponent ()
 
template<typename T >
void CountCorrectFramesWeighted (const CuArray< T > &hyp, const CuArray< T > &ref, const CuVectorBase< BaseFloat > &weights, Vector< double > *correct)
 Helper function of Xent::Eval, calculates number of matching elemente in 'hyp', 'ref' weighted by 'weights'. More...
 
template<typename T >
std::ostream & operator<< (std::ostream &os, const std::vector< T > &v)
 Define stream insertion opeartor for 'std::vector', useful for log-prints,. More...
 
template<typename T >
std::string ToString (const T &t)
 Convert basic type to a string (please don't overuse),. More...
 
template<typename Real >
std::string MomentStatistics (const VectorBase< Real > &vec)
 Get a string with statistics of the data in a vector, so we can print them easily. More...
 
template<typename Real >
std::string MomentStatistics (const MatrixBase< Real > &mat)
 Overload MomentStatistics to MatrixBase<Real> More...
 
template<typename Real >
std::string MomentStatistics (const CuVectorBase< Real > &vec)
 Overload MomentStatistics to CuVectorBase<Real> More...
 
template<typename Real >
std::string MomentStatistics (const CuMatrixBase< Real > &mat)
 Overload MomentStatistics to CuMatrix<Real> More...
 
template<typename Real >
void CheckNanInf (const CuMatrixBase< Real > &mat, const char *msg="")
 Check that matrix contains no nan or inf. More...
 
template<typename Real >
Real ComputeStdDev (const CuMatrixBase< Real > &mat)
 Get the standard deviation of values in the matrix. More...
 
template<typename Real >
void RandGauss (BaseFloat mu, BaseFloat sigma, CuMatrixBase< Real > *mat, struct RandomState *state=NULL)
 Fill CuMatrix with random numbers (Gaussian distribution): mu = the mean value, sigma = standard deviation,. More...
 
template<typename Real >
void RandUniform (BaseFloat mu, BaseFloat range, CuMatrixBase< Real > *mat, struct RandomState *state=NULL)
 Fill CuMatrix with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 . More...
 
template<typename Real >
void RandUniform (BaseFloat mu, BaseFloat range, CuVectorBase< Real > *vec, struct RandomState *state=NULL)
 Fill CuVector with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 . More...
 
void BuildIntegerVector (const std::vector< std::vector< int32 > > &in, std::vector< int32 > *out)
 Build 'integer vector' out of vector of 'matlab-like' representation: 'b, b:e, b:s:e'. More...
 
void BuildIntegerVector (const std::vector< std::vector< int32 > > &in, CuArray< int32 > *out)
 Wrapper with 'CuArray<int32>' output. More...
 
template<typename Real >
void PosteriorToMatrix (const Posterior &post, const int32 post_dim, CuMatrix< Real > *mat)
 Wrapper of PosteriorToMatrix with CuMatrix argument. More...
 
template<typename Real >
void PosteriorToPdfMatrix (const Posterior &post, const TransitionModel &model, CuMatrix< Real > *mat)
 Wrapper of PosteriorToMatrixMapped with CuMatrix argument. More...
 
void LatticeAcousticRescore (const Matrix< BaseFloat > &log_like, const TransitionModel &trans_model, const std::vector< int32 > &state_times, Lattice *lat)
 

Typedef Documentation

◆ Int32VectorRandomizer

◆ PosteriorRandomizer

typedef StdVectorRandomizer<std::vector<std::pair<int32, BaseFloat> > > PosteriorRandomizer

Definition at line 268 of file nnet-randomizer.h.

Function Documentation

◆ BuildIntegerVector() [1/2]

void kaldi::nnet1::BuildIntegerVector ( const std::vector< std::vector< int32 > > &  in,
std::vector< int32 > *  out 
)
inline

Build 'integer vector' out of vector of 'matlab-like' representation: 'b, b:e, b:s:e'.

b,e,s are integers, where: b = beginning e = end s = step

The sequence includes 'end', 1:3 => [ 1 2 3 ]. The 'step' has to be positive.

Definition at line 239 of file nnet-utils.h.

References rnnlm::i, rnnlm::j, KALDI_ASSERT, and KALDI_ERR.

Referenced by BuildIntegerVector(), Splice::InitData(), and CopyComponent::InitData().

240  {
241  // start with empty vector,
242  out->clear();
243  // loop over records,
244  for (int32 i = 0; i < in.size(); i++) {
245  // process i'th record,
246  int32 beg = 0, end = 0, step = 1;
247  switch (in[i].size()) {
248  case 1:
249  beg = in[i][0];
250  end = in[i][0];
251  step = 1;
252  break;
253  case 2:
254  beg = in[i][0];
255  end = in[i][1];
256  step = 1;
257  break;
258  case 3:
259  beg = in[i][0];
260  end = in[i][2];
261  step = in[i][1];
262  break;
263  default:
264  KALDI_ERR << "Something is wrong! (should be 1-3) : "
265  << in[i].size();
266  }
267  // check the inputs,
268  KALDI_ASSERT(beg <= end);
269  KALDI_ASSERT(step > 0); // positive,
270  // append values to vector,
271  for (int32 j = beg; j <= end; j += step) {
272  out->push_back(j);
273  }
274  }
275 }
kaldi::int32 int32
#define KALDI_ERR
Definition: kaldi-error.h:147
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:185

◆ BuildIntegerVector() [2/2]

void kaldi::nnet1::BuildIntegerVector ( const std::vector< std::vector< int32 > > &  in,
CuArray< int32 > *  out 
)
inline

Wrapper with 'CuArray<int32>' output.

Definition at line 280 of file nnet-utils.h.

References BuildIntegerVector().

281  {
282  std::vector<int32> v;
283  BuildIntegerVector(in, &v);
284  (*out) = v;
285 }
void BuildIntegerVector(const std::vector< std::vector< int32 > > &in, CuArray< int32 > *out)
Wrapper with &#39;CuArray<int32>&#39; output.
Definition: nnet-utils.h:280

◆ CheckNanInf()

void kaldi::nnet1::CheckNanInf ( const CuMatrixBase< Real > &  mat,
const char *  msg = "" 
)

Check that matrix contains no nan or inf.

Definition at line 132 of file nnet-utils.h.

References KALDI_ERR, KALDI_ISINF, KALDI_ISNAN, and CuMatrixBase< Real >::Sum().

Referenced by Rbm::RbmUpdate().

132  {
133  Real sum = mat.Sum();
134  if (KALDI_ISINF(sum)) { KALDI_ERR << "'inf' in " << msg; }
135  if (KALDI_ISNAN(sum)) { KALDI_ERR << "'nan' in " << msg; }
136 }
#define KALDI_ISINF
Definition: kaldi-math.h:73
#define KALDI_ERR
Definition: kaldi-error.h:147
#define KALDI_ISNAN
Definition: kaldi-math.h:72

◆ ComputeStdDev()

Real kaldi::nnet1::ComputeStdDev ( const CuMatrixBase< Real > &  mat)

Get the standard deviation of values in the matrix.

Definition at line 142 of file nnet-utils.h.

References KALDI_WARN, CuMatrixBase< Real >::MulElements(), CuMatrixBase< Real >::NumCols(), CuMatrixBase< Real >::NumRows(), and CuMatrixBase< Real >::Sum().

Referenced by Rbm::RbmUpdate().

142  {
143  int32 N = mat.NumRows() * mat.NumCols();
144  Real mean = mat.Sum() / N;
145  CuMatrix<Real> pow_2(mat);
146  pow_2.MulElements(mat);
147  Real var = pow_2.Sum() / N - mean * mean;
148  if (var < 0.0) {
149  KALDI_WARN << "Forcing the variance to be non-negative! " << var << "->0.0";
150  var = 0.0;
151  }
152  return sqrt(var);
153 }
kaldi::int32 int32
#define KALDI_WARN
Definition: kaldi-error.h:150

◆ CountCorrectFramesWeighted()

void kaldi::nnet1::CountCorrectFramesWeighted ( const CuArray< T > &  hyp,
const CuArray< T > &  ref,
const CuVectorBase< BaseFloat > &  weights,
Vector< double > *  correct 
)
inline

Helper function of Xent::Eval, calculates number of matching elemente in 'hyp', 'ref' weighted by 'weights'.

Definition at line 41 of file nnet-loss.cc.

References CuArrayBase< T >::CopyToVec(), CuVectorBase< Real >::CopyToVec(), CuArrayBase< T >::Dim(), VectorBase< Real >::Dim(), CuVectorBase< Real >::Dim(), rnnlm::i, and KALDI_ASSERT.

Referenced by Xent::Eval().

44  {
45  KALDI_ASSERT(hyp.Dim() == ref.Dim());
46  KALDI_ASSERT(hyp.Dim() == weights.Dim());
47  int32 dim = hyp.Dim();
48  // Get GPU data to host,
49  std::vector<T> hyp_h(dim), ref_h(dim);
50  hyp.CopyToVec(&hyp_h);
51  ref.CopyToVec(&ref_h);
52  Vector<BaseFloat> w(dim);
53  weights.CopyToVec(&w);
54  // Accumulate weighted counts of correct frames,
55  for (int32 i = 0; i < dim; i++) {
56  KALDI_ASSERT(ref_h[i] < correct->Dim());
57  (*correct)(ref_h[i]) += w(i) * (hyp_h[i] == ref_h[i] ? 1.0 : 0.0);
58  }
59 }
kaldi::int32 int32
MatrixIndexT Dim() const
Returns the dimension of the vector.
Definition: kaldi-vector.h:64
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:185

◆ LatticeAcousticRescore()

void LatticeAcousticRescore ( const Matrix< BaseFloat > &  log_like,
const TransitionModel trans_model,
const std::vector< int32 > &  state_times,
Lattice lat 
)

Definition at line 45 of file nnet-train-mmi-sequential.cc.

References rnnlm::i, KALDI_ASSERT, KALDI_ERR, MatrixBase< Real >::NumRows(), and TransitionModel::TransitionIdToPdf().

Referenced by main().

48  {
49  kaldi::uint64 props = lat->Properties(fst::kFstProperties, false);
50  if (!(props & fst::kTopSorted))
51  KALDI_ERR << "Input lattice must be topologically sorted.";
52 
53  KALDI_ASSERT(!state_times.empty());
54  std::vector<std::vector<int32> > time_to_state(log_like.NumRows());
55  for (size_t i = 0; i < state_times.size(); i++) {
56  KALDI_ASSERT(state_times[i] >= 0);
57  if (state_times[i] < log_like.NumRows()) // end state may be past this..
58  time_to_state[state_times[i]].push_back(i);
59  else
60  KALDI_ASSERT(state_times[i] == log_like.NumRows()
61  && "There appears to be lattice/feature mismatch.");
62  }
63 
64  for (int32 t = 0; t < log_like.NumRows(); t++) {
65  for (size_t i = 0; i < time_to_state[t].size(); i++) {
66  int32 state = time_to_state[t][i];
67  for (fst::MutableArcIterator<Lattice> aiter(lat, state); !aiter.Done();
68  aiter.Next()) {
69  LatticeArc arc = aiter.Value();
70  int32 trans_id = arc.ilabel;
71  if (trans_id != 0) { // Non-epsilon input label on arc
72  int32 pdf_id = trans_model.TransitionIdToPdf(trans_id);
73  arc.weight.SetValue2(-log_like(t, pdf_id) + arc.weight.Value2());
74  aiter.SetValue(arc);
75  }
76  }
77  }
78  }
79 }
fst::ArcTpl< LatticeWeight > LatticeArc
Definition: kaldi-lattice.h:40
kaldi::int32 int32
int32 TransitionIdToPdf(int32 trans_id) const
#define KALDI_ERR
Definition: kaldi-error.h:147
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:185
MatrixIndexT NumRows() const
Returns number of rows (or zero for empty matrix).
Definition: kaldi-matrix.h:64

◆ MomentStatistics() [1/4]

std::string kaldi::nnet1::MomentStatistics ( const VectorBase< Real > &  vec)

Get a string with statistics of the data in a vector, so we can print them easily.

Definition at line 63 of file nnet-utils.h.

References VectorBase< Real >::Add(), VectorBase< Real >::Dim(), VectorBase< Real >::Max(), VectorBase< Real >::Min(), VectorBase< Real >::MulElements(), and VectorBase< Real >::Sum().

Referenced by ParametricRelu::Info(), LinearTransform::Info(), AffineTransform::Info(), RecurrentComponent::Info(), ConvolutionalComponent::Info(), LstmProjected::Info(), AddShift::Info(), BlstmProjected::Info(), Rescale::Info(), Nnet::InfoBackPropagate(), ParametricRelu::InfoGradient(), LinearTransform::InfoGradient(), AffineTransform::InfoGradient(), RecurrentComponent::InfoGradient(), ConvolutionalComponent::InfoGradient(), LstmProjected::InfoGradient(), AddShift::InfoGradient(), BlstmProjected::InfoGradient(), Rescale::InfoGradient(), Nnet::InfoPropagate(), and MomentStatistics().

63  {
64  // we use an auxiliary vector for the higher order powers
65  Vector<Real> vec_aux(vec);
66  Vector<Real> vec_no_mean(vec); // vec with mean subtracted
67  // mean
68  Real mean = vec.Sum() / vec.Dim();
69  // variance
70  vec_aux.Add(-mean);
71  vec_no_mean = vec_aux;
72  vec_aux.MulElements(vec_no_mean); // (vec-mean)^2
73  Real variance = vec_aux.Sum() / vec.Dim();
74  // skewness
75  // - negative : left tail is longer,
76  // - positive : right tail is longer,
77  // - zero : symmetric
78  vec_aux.MulElements(vec_no_mean); // (vec-mean)^3
79  Real skewness = vec_aux.Sum() / pow(variance, 3.0/2.0) / vec.Dim();
80  // kurtosis (peakedness)
81  // - makes sense for symmetric distributions (skewness is zero)
82  // - positive : 'sharper peak' than Normal distribution
83  // - negative : 'heavier tails' than Normal distribution
84  // - zero : same peakedness as the Normal distribution
85  vec_aux.MulElements(vec_no_mean); // (vec-mean)^4
86  Real kurtosis = vec_aux.Sum() / (variance * variance) / vec.Dim() - 3.0;
87  // send the statistics to stream,
88  std::ostringstream ostr;
89  ostr << " ( min " << vec.Min() << ", max " << vec.Max()
90  << ", mean " << mean
91  << ", stddev " << sqrt(variance)
92  << ", skewness " << skewness
93  << ", kurtosis " << kurtosis
94  << " ) ";
95  return ostr.str();
96 }

◆ MomentStatistics() [2/4]

std::string kaldi::nnet1::MomentStatistics ( const MatrixBase< Real > &  mat)

Overload MomentStatistics to MatrixBase<Real>

Definition at line 102 of file nnet-utils.h.

References VectorBase< Real >::CopyRowsFromMat(), MomentStatistics(), MatrixBase< Real >::NumCols(), and MatrixBase< Real >::NumRows().

102  {
103  Vector<Real> vec(mat.NumRows()*mat.NumCols());
104  vec.CopyRowsFromMat(mat);
105  return MomentStatistics(vec);
106 }
std::string MomentStatistics(const CuMatrixBase< Real > &mat)
Overload MomentStatistics to CuMatrix<Real>
Definition: nnet-utils.h:122

◆ MomentStatistics() [3/4]

std::string kaldi::nnet1::MomentStatistics ( const CuVectorBase< Real > &  vec)

Overload MomentStatistics to CuVectorBase<Real>

Definition at line 112 of file nnet-utils.h.

References CuVectorBase< Real >::CopyToVec(), CuVectorBase< Real >::Dim(), and MomentStatistics().

112  {
113  Vector<Real> vec_host(vec.Dim());
114  vec.CopyToVec(&vec_host);
115  return MomentStatistics(vec_host);
116 }
std::string MomentStatistics(const CuMatrixBase< Real > &mat)
Overload MomentStatistics to CuMatrix<Real>
Definition: nnet-utils.h:122

◆ MomentStatistics() [4/4]

std::string kaldi::nnet1::MomentStatistics ( const CuMatrixBase< Real > &  mat)

Overload MomentStatistics to CuMatrix<Real>

Definition at line 122 of file nnet-utils.h.

References CuMatrixBase< Real >::CopyToMat(), MomentStatistics(), CuMatrixBase< Real >::NumCols(), and CuMatrixBase< Real >::NumRows().

122  {
123  Matrix<Real> mat_host(mat.NumRows(), mat.NumCols());
124  mat.CopyToMat(&mat_host);
125  return MomentStatistics(mat_host);
126 }
std::string MomentStatistics(const CuMatrixBase< Real > &mat)
Overload MomentStatistics to CuMatrix<Real>
Definition: nnet-utils.h:122

◆ operator<<()

std::ostream& kaldi::nnet1::operator<< ( std::ostream &  os,
const std::vector< T > &  v 
)

Define stream insertion opeartor for 'std::vector', useful for log-prints,.

Definition at line 43 of file nnet-utils.h.

43  {
44  std::copy(v.begin(), v.end(), std::ostream_iterator<T>(os, " "));
45  return os;
46 }

◆ PosteriorToMatrix()

void kaldi::nnet1::PosteriorToMatrix ( const Posterior post,
const int32  post_dim,
CuMatrix< Real > *  mat 
)

Wrapper of PosteriorToMatrix with CuMatrix argument.

Definition at line 292 of file nnet-utils.h.

Referenced by Xent::Eval(), Mse::Eval(), and MultiTaskLoss::Eval().

293  {
294  Matrix<Real> m;
295  PosteriorToMatrix(post, post_dim, &m);
296  (*mat) = m;
297 }
void PosteriorToMatrix(const Posterior &post, const int32 post_dim, CuMatrix< Real > *mat)
Wrapper of PosteriorToMatrix with CuMatrix argument.
Definition: nnet-utils.h:292

◆ PosteriorToPdfMatrix()

void kaldi::nnet1::PosteriorToPdfMatrix ( const Posterior post,
const TransitionModel model,
CuMatrix< Real > *  mat 
)

Wrapper of PosteriorToMatrixMapped with CuMatrix argument.

Definition at line 304 of file nnet-utils.h.

Referenced by main().

306  {
307  Matrix<BaseFloat> m;
308  PosteriorToPdfMatrix(post, model, &m);
309  // Copy to output GPU matrix,
310  (*mat) = m;
311 }
void PosteriorToPdfMatrix(const Posterior &post, const TransitionModel &model, CuMatrix< Real > *mat)
Wrapper of PosteriorToMatrixMapped with CuMatrix argument.
Definition: nnet-utils.h:304

◆ RandGauss()

void kaldi::nnet1::RandGauss ( BaseFloat  mu,
BaseFloat  sigma,
CuMatrixBase< Real > *  mat,
struct RandomState state = NULL 
)

Fill CuMatrix with random numbers (Gaussian distribution): mu = the mean value, sigma = standard deviation,.

Using the CPU random generator.

Definition at line 164 of file nnet-utils.h.

References CuMatrixBase< Real >::CopyFromMat(), kaldi::kUndefined, CuMatrixBase< Real >::NumCols(), and CuMatrixBase< Real >::NumRows().

Referenced by AffineTransform::InitData(), LinearTransform::InitData(), ConvolutionalComponent::InitData(), Rbm::InitData(), and InitRand().

165  {
166  // fill temporary matrix with 'Normal' samples,
167  Matrix<Real> m(mat->NumRows(), mat->NumCols(), kUndefined);
168  for (int32 r = 0; r < m.NumRows(); r++) {
169  for (int32 c = 0; c < m.NumCols(); c++) {
170  m(r, c) = RandGauss(state);
171  }
172  }
173  // re-shape the distrbution,
174  m.Scale(sigma);
175  m.Add(mu);
176  // export,
177  mat->CopyFromMat(m);
178 }
kaldi::int32 int32
void RandGauss(BaseFloat mu, BaseFloat sigma, CuMatrixBase< Real > *mat, struct RandomState *state=NULL)
Fill CuMatrix with random numbers (Gaussian distribution): mu = the mean value, sigma = standard devi...
Definition: nnet-utils.h:164

◆ RandUniform() [1/2]

void kaldi::nnet1::RandUniform ( BaseFloat  mu,
BaseFloat  range,
CuMatrixBase< Real > *  mat,
struct RandomState state = NULL 
)

Fill CuMatrix with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 .

. mu+range/2)

Using the CPU random generator.

Definition at line 188 of file nnet-utils.h.

References CuMatrixBase< Real >::CopyFromMat(), kaldi::kUndefined, CuMatrixBase< Real >::NumCols(), CuMatrixBase< Real >::NumRows(), and kaldi::Rand().

Referenced by AffineTransform::InitData(), RecurrentComponent::InitData(), LstmProjected::InitData(), BlstmProjected::InitData(), ConvolutionalComponent::InitData(), and Rbm::InitData().

189  {
190  // fill temporary matrix with '0..1' samples,
191  Matrix<Real> m(mat->NumRows(), mat->NumCols(), kUndefined);
192  for (int32 r = 0; r < m.NumRows(); r++) {
193  for (int32 c = 0; c < m.NumCols(); c++) {
194  m(r, c) = Rand(state) / static_cast<Real>(RAND_MAX);
195  }
196  }
197  // re-shape the distrbution,
198  m.Scale(range); // 0..range,
199  m.Add(mu - (range / 2.0)); // mu-range/2 .. mu+range/2,
200  // export,
201  mat->CopyFromMat(m);
202 }
kaldi::int32 int32
int Rand(struct RandomState *state)
Definition: kaldi-math.cc:45

◆ RandUniform() [2/2]

void kaldi::nnet1::RandUniform ( BaseFloat  mu,
BaseFloat  range,
CuVectorBase< Real > *  vec,
struct RandomState state = NULL 
)

Fill CuVector with random numbers (Uniform distribution): mu = the mean value, range = the 'width' of the uniform PDF (spanning mu-range/2 .

. mu+range/2)

Using the CPU random generator.

Definition at line 212 of file nnet-utils.h.

References CuVectorBase< Real >::CopyFromVec(), CuVectorBase< Real >::Dim(), rnnlm::i, kaldi::kUndefined, and kaldi::Rand().

213  {
214  // fill temporary vector with '0..1' samples,
215  Vector<Real> v(vec->Dim(), kUndefined);
216  for (int32 i = 0; i < v.Dim(); i++) {
217  v(i) = Rand(state) / static_cast<Real>(RAND_MAX);
218  }
219  // re-shape the distrbution,
220  v.Scale(range); // 0..range,
221  v.Add(mu - (range / 2.0)); // mu-range/2 .. mu+range/2,
222  // export,
223  vec->CopyFromVec(v);
224 }
kaldi::int32 int32
int Rand(struct RandomState *state)
Definition: kaldi-math.cc:45

◆ ReadComponentFromString()

Component* kaldi::nnet1::ReadComponentFromString ( const std::string &  s)

Definition at line 42 of file nnet-component-test.cc.

References Component::Read().

Referenced by UnitTestConvolutionalComponent3x3(), UnitTestConvolutionalComponentUnity(), UnitTestDropoutComponent(), UnitTestLengthNorm(), and UnitTestSimpleSentenceAveragingComponent().

42  {
43  std::istringstream is(s + "\n");
44  return Component::Read(is, false); // false for ascii
45  }

◆ ReadCuMatrixFromString()

void kaldi::nnet1::ReadCuMatrixFromString ( const std::string &  s,
CuMatrix< Real > *  m 
)

Definition at line 37 of file nnet-component-test.cc.

References CuMatrix< Real >::Read().

Referenced by UnitTestConvolutionalComponent3x3(), UnitTestConvolutionalComponentUnity(), UnitTestLengthNorm(), UnitTestMaxPoolingComponent(), and UnitTestSimpleSentenceAveragingComponent().

37  {
38  std::istringstream is(s + "\n");
39  m->Read(is, false); // false for ascii
40  }
void Read(std::istream &is, bool binary)
I/O functions.
Definition: cu-matrix.cc:494

◆ ToString()

◆ UnitTestConvolutionalComponent3x3()

void kaldi::nnet1::UnitTestConvolutionalComponent3x3 ( )

Definition at line 131 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), KALDI_LOG, Component::Propagate(), ReadComponentFromString(), and ReadCuMatrixFromString().

Referenced by main().

131  {
132  // make 3x3 convolutional component,
133  // design such weights and input so output is zero,
134  Component* c = ReadComponentFromString("<ConvolutionalComponent> 9 15 \
135  <PatchDim> 3 <PatchStep> 1 <PatchStride> 5 \
136  <LearnRateCoef> 1.0 <BiasLearnRateCoef> 1.0 \
137  <MaxNorm> 0 \
138  <Filters> [ -1 -2 -7 0 0 0 1 2 7 ; \
139  -1 0 1 -3 0 3 -2 2 0 ; \
140  -4 0 0 -3 0 3 4 0 0 ] \
141  <Bias> [ -20 -20 -20 ]"
142  );
143 
144  // prepare input, reference output,
145  CuMatrix<BaseFloat> mat_in;
146  ReadCuMatrixFromString("[ 1 3 5 7 9 2 4 6 8 10 3 5 7 9 11 ]", &mat_in);
147  CuMatrix<BaseFloat> mat_out_ref;
148  ReadCuMatrixFromString("[ 0 0 0 0 0 0 0 0 0 ]", &mat_out_ref);
149 
150  // propagate,
151  CuMatrix<BaseFloat> mat_out;
152  c->Propagate(mat_in, &mat_out);
153  KALDI_LOG << "mat_in" << mat_in << "mat_out" << mat_out;
154  AssertEqual(mat_out, mat_out_ref);
155 
156  // prepare mat_out_diff, mat_in_diff_ref,
157  CuMatrix<BaseFloat> mat_out_diff;
158  ReadCuMatrixFromString("[ 1 0 0 1 1 0 1 1 1 ]", &mat_out_diff);
159  // hand-computed back-propagated values,
160  CuMatrix<BaseFloat> mat_in_diff_ref;
161  ReadCuMatrixFromString("[ -1 -4 -15 -8 -6 0 -3 -6 3 6 1 1 14 11 7 ]",
162  &mat_in_diff_ref);
163 
164  // backpropagate,
165  CuMatrix<BaseFloat> mat_in_diff;
166  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
167  KALDI_LOG << "mat_in_diff " << mat_in_diff
168  << " mat_in_diff_ref " << mat_in_diff_ref;
169  AssertEqual(mat_in_diff, mat_in_diff_ref);
170 
171  // clean,
172  delete c;
173  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation &#39;out_diff&#39; -> &#39;in_diff&#39;.
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation &#39;in&#39; -> &#39;out&#39;,.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:276
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:153
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)

◆ UnitTestConvolutionalComponentUnity()

void kaldi::nnet1::UnitTestConvolutionalComponentUnity ( )

Definition at line 100 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), KALDI_LOG, Component::Propagate(), ReadComponentFromString(), and ReadCuMatrixFromString().

Referenced by main().

100  {
101  // make 'identity' convolutional component,
102  Component* c = ReadComponentFromString("<ConvolutionalComponent> 5 5 \
103  <PatchDim> 1 <PatchStep> 1 <PatchStride> 5 \
104  <LearnRateCoef> 1.0 <BiasLearnRateCoef> 1.0 \
105  <MaxNorm> 0 \
106  <Filters> [ 1 \
107  ] <Bias> [ 0 ]"
108  );
109 
110  // prepare input,
111  CuMatrix<BaseFloat> mat_in;
112  ReadCuMatrixFromString("[ 1 2 3 4 5 ] ", &mat_in);
113 
114  // propagate,
115  CuMatrix<BaseFloat> mat_out;
116  c->Propagate(mat_in, &mat_out);
117  KALDI_LOG << "mat_in" << mat_in << "mat_out" << mat_out;
118  AssertEqual(mat_in, mat_out);
119 
120  // backpropagate,
121  CuMatrix<BaseFloat> mat_out_diff(mat_in), mat_in_diff;
122  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
123  KALDI_LOG << "mat_out_diff " << mat_out_diff
124  << " mat_in_diff " << mat_in_diff;
125  AssertEqual(mat_out_diff, mat_in_diff);
126 
127  // clean,
128  delete c;
129  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation &#39;out_diff&#39; -> &#39;in_diff&#39;.
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation &#39;in&#39; -> &#39;out&#39;,.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:276
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:153
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)

◆ UnitTestDropoutComponent()

void kaldi::nnet1::UnitTestDropoutComponent ( )

Definition at line 245 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), Component::Propagate(), ReadComponentFromString(), CuMatrixBase< Real >::Set(), and CuMatrixBase< Real >::Sum().

Referenced by main().

245  {
246  Component* c = ReadComponentFromString("<Dropout> 100 100 <DropoutRetention> 0.7");
247  // buffers,
248  CuMatrix<BaseFloat> in(777, 100),
249  out,
250  out_diff,
251  in_diff;
252  // init,
253  in.Set(2.0);
254 
255  // propagate,
256  c->Propagate(in, &out);
257  AssertEqual(in.Sum(), out.Sum(), 0.01);
258 
259  // backprop,
260  out_diff = in;
261  c->Backpropagate(in, out, out_diff, &in_diff);
262  AssertEqual(in_diff, out);
263 
264  delete c;
265  }
Real Sum() const
Definition: cu-matrix.cc:3012
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation &#39;out_diff&#39; -> &#39;in_diff&#39;.
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation &#39;in&#39; -> &#39;out&#39;,.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:276
Abstract class, building block of the network.
void Set(Real value)
Definition: cu-matrix.cc:531

◆ UnitTestLengthNorm()

void kaldi::nnet1::UnitTestLengthNorm ( )

Definition at line 51 of file nnet-component-test.cc.

References CuVectorBase< Real >::AddColSumMat(), CuVectorBase< Real >::ApplyPow(), kaldi::AssertEqual(), CuMatrixBase< Real >::MulElements(), Component::Propagate(), ReadComponentFromString(), ReadCuMatrixFromString(), and CuVectorBase< Real >::Set().

Referenced by main().

51  {
52  // make L2-length normalization component,
53  Component* c = ReadComponentFromString("<LengthNormComponent> 5 5");
54  // prepare input,
55  CuMatrix<BaseFloat> mat_in;
56  ReadCuMatrixFromString("[ 1 2 3 4 5 \n 2 3 5 6 8 ] ", &mat_in);
57  // propagate,
58  CuMatrix<BaseFloat> mat_out;
59  c->Propagate(mat_in, &mat_out);
60  // check the length,
61  mat_out.MulElements(mat_out); // ^2,
62  CuVector<BaseFloat> check_length_is_one(2);
63  check_length_is_one.AddColSumMat(1.0, mat_out, 0.0); // sum_of_cols(x^2),
64  check_length_is_one.ApplyPow(0.5); // L2norm = sqrt(sum_of_cols(x^2)),
65  CuVector<BaseFloat> ones(2);
66  ones.Set(1.0);
67  AssertEqual(check_length_is_one, ones);
68  }
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
Component * ReadComponentFromString(const std::string &s)
void MulElements(const CuMatrixBase< Real > &A)
Multiply two matrices elementwise: C = C .* A.
Definition: cu-matrix.cc:667
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation &#39;in&#39; -> &#39;out&#39;,.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:276
Abstract class, building block of the network.
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)

◆ UnitTestMaxPoolingComponent()

void kaldi::nnet1::UnitTestMaxPoolingComponent ( )

Definition at line 176 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), Component::Init(), KALDI_LOG, Component::Propagate(), ReadCuMatrixFromString(), and CuMatrixBase< Real >::Set().

Referenced by main().

176  {
177  // make max-pooling component, assuming 4 conv. neurons,
178  // non-overlapping pool of size 3,
179  Component* c = Component::Init(
180  "<MaxPoolingComponent> <InputDim> 24 <OutputDim> 8 \
181  <PoolSize> 3 <PoolStep> 3 <PoolStride> 4"
182  );
183 
184  // input matrix,
185  CuMatrix<BaseFloat> mat_in;
186  ReadCuMatrixFromString("[ 3 8 2 9 \
187  8 3 9 3 \
188  2 4 9 6 \
189  \
190  2 4 2 0 \
191  6 4 9 4 \
192  7 3 0 3;\
193  \
194  5 4 7 8 \
195  3 9 5 6 \
196  3 4 8 9 \
197  \
198  5 4 5 6 \
199  3 1 4 5 \
200  8 2 1 7 ]", &mat_in);
201 
202  // expected output (max values in columns),
203  CuMatrix<BaseFloat> mat_out_ref;
204  ReadCuMatrixFromString("[ 8 8 9 9 \
205  7 4 9 4;\
206  5 9 8 9 \
207  8 4 5 7 ]", &mat_out_ref);
208 
209  // propagate,
210  CuMatrix<BaseFloat> mat_out;
211  c->Propagate(mat_in, &mat_out);
212  KALDI_LOG << "mat_out" << mat_out << "mat_out_ref" << mat_out_ref;
213  AssertEqual(mat_out, mat_out_ref);
214 
215  // locations of max values will be shown,
216  CuMatrix<BaseFloat> mat_out_diff(mat_out);
217  mat_out_diff.Set(1);
218  // expected backpropagated values (hand-computed),
219  CuMatrix<BaseFloat> mat_in_diff_ref;
220  ReadCuMatrixFromString("[ 0 1 0 1 \
221  1 0 1 0 \
222  0 0 1 0 \
223  \
224  0 1 0 0 \
225  0 1 1 1 \
226  1 0 0 0;\
227  \
228  1 0 0 0 \
229  0 1 0 0 \
230  0 0 1 1 \
231  \
232  0 1 1 0 \
233  0 0 0 0 \
234  1 0 0 1 ]", &mat_in_diff_ref);
235  // backpropagate,
236  CuMatrix<BaseFloat> mat_in_diff;
237  c->Backpropagate(mat_in, mat_out, mat_out_diff, &mat_in_diff);
238  KALDI_LOG << "mat_in_diff " << mat_in_diff
239  << " mat_in_diff_ref " << mat_in_diff_ref;
240  AssertEqual(mat_in_diff, mat_in_diff_ref);
241 
242  delete c;
243  }
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation &#39;out_diff&#39; -> &#39;in_diff&#39;.
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation &#39;in&#39; -> &#39;out&#39;,.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:276
Abstract class, building block of the network.
#define KALDI_LOG
Definition: kaldi-error.h:153
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)

◆ UnitTestSimpleSentenceAveragingComponent()

void kaldi::nnet1::UnitTestSimpleSentenceAveragingComponent ( )

Definition at line 70 of file nnet-component-test.cc.

References kaldi::AssertEqual(), Component::Backpropagate(), rnnlm::i, CuMatrixBase< Real >::NumRows(), Component::Propagate(), ReadComponentFromString(), ReadCuMatrixFromString(), CuMatrixBase< Real >::Row(), and CuVectorBase< Real >::Set().

Referenced by main().

70  {
71  // make SimpleSentenceAveraging component,
73  "<SimpleSentenceAveragingComponent> 2 2 <GradientBoost> 10.0"
74  );
75  // prepare input,
76  CuMatrix<BaseFloat> mat_in;
77  ReadCuMatrixFromString("[ 0 0.5 \n 1 1 \n 2 1.5 ] ", &mat_in);
78 
79  // propagate,
80  CuMatrix<BaseFloat> mat_out;
81  c->Propagate(mat_in, &mat_out);
82  // check the output,
83  CuVector<BaseFloat> ones(2);
84  ones.Set(1.0);
85  for (int32 i = 0; i < mat_out.NumRows(); i++) {
86  AssertEqual(mat_out.Row(i), ones);
87  }
88 
89  // backpropagate,
90  CuMatrix<BaseFloat> dummy1(3, 2), dummy2(3, 2), diff_out(mat_in), diff_in;
91  // the average 1.0 in 'diff_in' will be boosted by 10.0,
92  c->Backpropagate(dummy1, dummy2, diff_out, &diff_in);
93  // check the output,
94  CuVector<BaseFloat> tens(2); tens.Set(10);
95  for (int32 i = 0; i < diff_in.NumRows(); i++) {
96  AssertEqual(diff_in.Row(i), tens);
97  }
98  }
const CuSubVector< Real > Row(MatrixIndexT i) const
Definition: cu-matrix.h:670
void Backpropagate(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward-pass propagation &#39;out_diff&#39; -> &#39;in_diff&#39;.
kaldi::int32 int32
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
Component * ReadComponentFromString(const std::string &s)
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward-pass propagation &#39;in&#39; -> &#39;out&#39;,.
static void AssertEqual(float a, float b, float relative_tolerance=0.001)
assert abs(a - b) <= relative_tolerance * (abs(a)+abs(b))
Definition: kaldi-math.h:276
Abstract class, building block of the network.
MatrixIndexT NumRows() const
Dimensions.
Definition: cu-matrix.h:215
void ReadCuMatrixFromString(const std::string &s, CuMatrix< Real > *m)