nnet-multibasis-component.h
Go to the documentation of this file.
1 // nnet/nnet-multibasis-component.h
2 
3 // Copyright 2016 Brno University of Technology (Author: Karel Vesely)
4 
5 // See ../../COPYING for clarification regarding multiple authors
6 //
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 //
11 // http://www.apache.org/licenses/LICENSE-2.0
12 //
13 // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
15 // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
16 // MERCHANTABLITY OR NON-INFRINGEMENT.
17 // See the Apache 2 License for the specific language governing permissions and
18 // limitations under the License.
19 
20 
21 #ifndef KALDI_NNET_NNET_MULTIBASIS_COMPONENT_H_
22 #define KALDI_NNET_NNET_MULTIBASIS_COMPONENT_H_
23 
24 #include <sstream>
25 #include <vector>
26 #include <string>
27 
28 #include "nnet/nnet-component.h"
30 
31 namespace kaldi {
32 namespace nnet1 {
33 
35  public:
36  MultiBasisComponent(int32 dim_in, int32 dim_out) :
37  UpdatableComponent(dim_in, dim_out),
38  selector_lr_coef_(1.0),
39  threshold_(0.1)
40  { }
41 
43  { }
44 
45  Component* Copy() const { return new MultiBasisComponent(*this); }
47 
48  void InitData(std::istream &is) {
49  // define options,
50  std::string selector_proto;
51  std::string selector_filename;
52  std::string basis_proto;
53  std::string basis_filename;
54  std::vector<std::string> basis_filename_vector;
55 
56  // parse config
57  std::string token;
58  while (is >> std::ws, !is.eof()) {
59  ReadToken(is, false, &token);
60  if (token == "<SelectorProto>") ReadToken(is, false, &selector_proto);
61  else if (token == "<SelectorFilename>") ReadToken(is, false, &selector_filename);
62  else if (token == "<SelectorLearnRateCoef>") ReadBasicType(is, false, &selector_lr_coef_);
63  else if (token == "<BasisProto>") ReadToken(is, false, &basis_proto);
64  else if (token == "<BasisFilename>") ReadToken(is, false, &basis_filename);
65  else if (token == "<BasisFilenameVector>") {
66  while(is >> std::ws, !is.eof()) {
67  std::string file_or_end;
68  ReadToken(is, false, &file_or_end);
69  if (file_or_end == "</BasisFilenameVector>") break;
70  basis_filename_vector.push_back(file_or_end);
71  }
72  } else KALDI_ERR << "Unknown token " << token << ", typo in config?"
73  << " (SelectorProto|SelectorFilename|BasisProto|BasisFilename|BasisFilenameVector)";
74  }
75 
77 
78  // selector,
79  if (selector_proto != "") {
80  KALDI_LOG << "Initializing 'selector' from : " << selector_proto;
81  selector_.Init(selector_proto);
82  }
83  if (selector_filename != "") {
84  KALDI_LOG << "Reading 'selector' from : " << selector_filename;
85  selector_.Read(selector_filename);
86  }
87 
88  // as many empty basis as outputs of the selector,
90  // fill the basis,
91  if (basis_proto != "") {
92  // Initialized from prototype,
93  KALDI_LOG << "Initializing 'basis' from : " << basis_proto;
94  for (int32 i = 0; i < nnet_basis_.size(); i++) {
95  nnet_basis_[i].Init(basis_proto);
96  }
97  } else if (basis_filename != "") {
98  // Load 1 initial basis repeateadly,
99  KALDI_LOG << "Reading 'basis' from : " << basis_filename;
100  for (int32 i = 0; i < nnet_basis_.size(); i++) {
101  nnet_basis_[i].Read(basis_filename);
102  }
103  } else if (basis_filename_vector.size() > 0) {
104  // Read a list of basis functions,
105  if (basis_filename_vector.size() != nnet_basis_.size()) {
106  KALDI_ERR << "We need " << nnet_basis_.size() << " filenames. "
107  << "We got " << basis_filename_vector.size();
108  }
109  for (int32 i = 0; i < nnet_basis_.size(); i++) {
110  KALDI_LOG << "Reading 'basis' from : "
111  << basis_filename_vector[i];
112  nnet_basis_[i].Read(basis_filename_vector[i]);
113  }
114  } else {
115  // Initialize basis by square identity matrix,
116  int32 basis_input_dim = InputDim() - selector_.InputDim();
117  KALDI_LOG << "Initializing 'basis' to Identity <AffineTransform> "
118  << OutputDim() << "x" << basis_input_dim;
119  KALDI_ASSERT(OutputDim() == basis_input_dim); // has to be square!
120  Matrix<BaseFloat> m(OutputDim(), basis_input_dim);
121  m.SetUnit();
122  // wrap identity into AffineTransform,
123  // (bias is vector of zeros),
124  AffineTransform identity_comp(basis_input_dim, OutputDim());
125  identity_comp.SetLinearity(CuMatrix<BaseFloat>(m));
126  //
127  for (int32 i = 0; i < nnet_basis_.size(); i++) {
128  nnet_basis_[i].AppendComponent(identity_comp);
129  }
130  }
131 
132  // check,
133  KALDI_ASSERT(InputDim() == selector_.InputDim() + nnet_basis_[0].InputDim());
135  }
136 
137  void ReadData(std::istream &is, bool binary) {
138  // Read all the '<Tokens>' in arbitrary order,
139  bool end_loop = false;
140  while (!end_loop && '<' == Peek(is, binary)) {
141  std::string token;
142  int first_char = PeekToken(is, binary);
143  switch (first_char) {
144  case 'S': ReadToken(is, false, &token);
145  if (token == "<SelectorLearnRateCoef>") ReadBasicType(is, binary, &selector_lr_coef_);
146  else if (token == "<Selector>") selector_.Read(is, binary);
147  else KALDI_ERR << "Unknown token: " << token;
148  break;
149  case 'N': ExpectToken(is, binary, "<NumBasis>");
150  int32 num_basis;
151  ReadBasicType(is, binary, &num_basis);
152  nnet_basis_.resize(num_basis);
153  for (int32 i = 0; i < num_basis; i++) {
154  int32 dummy;
155  ExpectToken(is, binary, "<Basis>");
156  ReadBasicType(is, binary, &dummy);
157  nnet_basis_[i].Read(is, binary);
158  }
159  break;
160  case '!':
161  ExpectToken(is, binary, "<!EndOfComponent>");
162  end_loop=true;
163  break;
164  default:
165  ReadToken(is, false, &token);
166  KALDI_ERR << "Unknown token: " << token;
167  }
168  }
169 
170  // check,
172  KALDI_ASSERT(InputDim() == selector_.InputDim() + nnet_basis_[0].InputDim());
174  }
175 
176  void WriteData(std::ostream &os, bool binary) const {
177  int32 num_basis = nnet_basis_.size();
178  WriteToken(os, binary, "<SelectorLearnRateCoef>");
179  WriteBasicType(os, binary, selector_lr_coef_);
180  if (!binary) os << "\n\n";
181  WriteToken(os, binary, "<Selector>");
182  if (!binary) os << "\n";
183  selector_.Write(os, binary);
184  if (!binary) os << "\n";
185  WriteToken(os, binary, "<NumBasis>");
186  WriteBasicType(os, binary, num_basis);
187  if (!binary) os << "\n";
188  for (int32 i = 0; i < num_basis; i++) {
189  WriteToken(os, binary, "<Basis>");
190  WriteBasicType(os, binary, i+1);
191  if (!binary) os << "\n";
192  nnet_basis_.at(i).Write(os, binary);
193  }
194  }
195 
196  Nnet& GetBasis(int32 id) { return nnet_basis_.at(id); }
197  const Nnet& GetBasis(int32 id) const { return nnet_basis_.at(id); }
198 
199  int32 NumParams() const {
200  int32 num_params_sum = selector_.NumParams();
201  for (int32 i = 0; i < nnet_basis_.size(); i++) {
202  num_params_sum += nnet_basis_[i].NumParams();
203  }
204  return num_params_sum;
205  }
206 
207  void GetGradient(VectorBase<BaseFloat> *gradient) const {
208  KALDI_ERR << "TODO, not yet implemented!";
209  }
210 
211  void GetParams(VectorBase<BaseFloat> *params) const {
212  int32 offset = 0;
213  Vector<BaseFloat> params_tmp;
214  // selector,
215  selector_.GetParams(&params_tmp);
216  params->Range(offset, params_tmp.Dim()).CopyFromVec(params_tmp);
217  offset += params_tmp.Dim();
218  // basis,
219  for (int32 i = 0; i < nnet_basis_.size(); i++) {
220  nnet_basis_[i].GetParams(&params_tmp);
221  params->Range(offset, params_tmp.Dim()).CopyFromVec(params_tmp);
222  offset += params_tmp.Dim();
223  }
224  KALDI_ASSERT(offset == NumParams());
225  }
226 
227  void SetParams(const VectorBase<BaseFloat> &params) {
228  int32 offset = 0;
229  // selector,
230  selector_.SetParams(params.Range(offset, selector_.NumParams()));
231  offset += selector_.NumParams();
232  // basis,
233  for (int32 i = 0; i < nnet_basis_.size(); i++) {
234  nnet_basis_[i].SetParams(params.Range(offset, nnet_basis_[i].NumParams()));
235  offset += nnet_basis_[i].NumParams();
236  }
237  KALDI_ASSERT(offset == NumParams());
238  }
239 
240  std::string Info() const {
241  std::ostringstream os;
242  for (int32 i = 0; i < nnet_basis_.size(); i++) {
243  os << "basis_network #" << i+1 << " {\n"
244  << nnet_basis_[i].Info()
245  << "}\n";
246  }
247  os << "\nselector {\n"
248  << selector_.Info()
249  << "}";
250  return os.str();
251  }
252 
253  std::string InfoGradient() const {
254  std::ostringstream os;
255  for (int32 i = 0; i < nnet_basis_.size(); i++) {
256  if (posterior_sum_(i) > threshold_) {
257  os << "basis_gradient #" << i+1 << " {\n"
258  << nnet_basis_[i].InfoGradient(false)
259  << "}\n";
260  }
261  }
262  os << "selector_gradient {\n"
263  << selector_.InfoGradient(false)
264  << "}";
265  return os.str();
266  }
267 
268  std::string InfoPropagate() const {
269  std::ostringstream os;
270  for (int32 i = 0; i < nnet_basis_.size(); i++) {
271  if (posterior_sum_(i) > threshold_) {
272  os << "basis_propagate #" << i+1 << " {\n"
273  << nnet_basis_[i].InfoPropagate(false)
274  << "}\n";
275  }
276  }
277  os << "selector_propagate {\n"
278  << selector_.InfoPropagate(false)
279  << "}\n";
280  return os.str();
281  }
282 
283  std::string InfoBackPropagate() const {
284  std::ostringstream os;
285  for (int32 i = 0; i < nnet_basis_.size(); i++) {
286  if (posterior_sum_(i) > threshold_) {
287  os << "basis_backpropagate #" << i+1 << "{\n"
288  << nnet_basis_[i].InfoBackPropagate(false)
289  << "}\n";
290  }
291  }
292  os << "selector_backpropagate {\n"
293  << selector_.InfoBackPropagate(false)
294  << "}\n";
295  return os.str();
296  }
297 
300  // dimensions,
301  int32 num_basis = nnet_basis_.size();
302 
303  // make sure we have all the buffers,
304  if (basis_out_.size() != num_basis) {
305  basis_out_.resize(num_basis);
306  }
307 
308  // split the input,
309  const CuSubMatrix<BaseFloat> in_basis(
310  in.ColRange(0, nnet_basis_[0].InputDim())
311  );
312  const CuSubMatrix<BaseFloat> in_selector(
313  in.ColRange(nnet_basis_[0].InputDim(), selector_.InputDim())
314  );
315 
316  // get the 'selector_' posteriors,
317  selector_.Propagate(in_selector, &posterior_);
318  KALDI_ASSERT(posterior_.Row(0).Min() >= 0.0);
319  KALDI_ASSERT(posterior_.Row(0).Max() <= 1.0);
320  KALDI_ASSERT(ApproxEqual(posterior_.Row(0).Sum(), 1.0));
321  posterior_.Transpose(); // trans,
322 
323  // sum 'selector_' posteriors over time,
324  CuVector<BaseFloat> posterior_sum(num_basis);
325  posterior_sum.AddColSumMat(1.0, posterior_, 0.0);
326  posterior_sum_ = Vector<BaseFloat>(posterior_sum);
327 
328  // combine the 'basis' outputs,
329  for (int32 i = 0; i < nnet_basis_.size(); i++) {
330  if (posterior_sum_(i) > threshold_) {
331  // use only basis with occupancy >0.1,
332  nnet_basis_[i].Propagate(in_basis, &basis_out_[i]);
333  out->AddDiagVecMat(1.0, posterior_.Row(i), basis_out_[i], kNoTrans);
334  }
335  }
336  }
337 
339  const CuMatrixBase<BaseFloat> &out,
340  const CuMatrixBase<BaseFloat> &out_diff,
341  CuMatrixBase<BaseFloat> *in_diff) {
342  // dimensions,
343  int32 num_basis = nnet_basis_.size(),
344  num_frames = in.NumRows();
345 
346  // split the in_diff,
347  CuSubMatrix<BaseFloat> in_diff_basis(
348  in_diff->ColRange(0, nnet_basis_[0].InputDim())
349  );
350  CuSubMatrix<BaseFloat> in_diff_selector(
351  in_diff->ColRange(nnet_basis_[0].InputDim(), selector_.InputDim())
352  );
353 
354  // backprop through 'selector',
355  CuMatrix<BaseFloat> selector_out_diff(num_basis, num_frames);
356  for (int32 i = 0; i < num_basis; i++) {
357  if (posterior_sum_(i) > threshold_) {
358  selector_out_diff.Row(i).AddDiagMatMat(1.0, out_diff, kNoTrans, basis_out_[i], kTrans, 0.0);
359  }
360  }
361  selector_out_diff.Transpose();
362  selector_out_diff.Scale(selector_lr_coef_);
363  CuMatrix<BaseFloat> in_diff_selector_tmp;
364  selector_.Backpropagate(selector_out_diff, &in_diff_selector_tmp);
365  in_diff_selector.CopyFromMat(in_diff_selector_tmp);
366 
367  // backprop through 'basis',
368  CuMatrix<BaseFloat> out_diff_scaled(num_frames, OutputDim()),
369  in_diff_basis_tmp;
370  for (int32 i = 0; i < num_basis; i++) {
371  // use only basis with occupancy >0.1,
372  if (posterior_sum_(i) > threshold_) {
373  out_diff_scaled.AddDiagVecMat(1.0, posterior_.Row(i), out_diff, kNoTrans, 0.0);
374  nnet_basis_[i].Backpropagate(out_diff_scaled, &in_diff_basis_tmp);
375  in_diff_basis.AddMat(1.0, in_diff_basis_tmp);
376  }
377  }
378  }
379 
380  void Update(const CuMatrixBase<BaseFloat> &input,
381  const CuMatrixBase<BaseFloat> &diff) {
382  { } // do nothing
383  }
384 
389  void SetTrainOptions(const NnetTrainOptions &opts) {
391  for (int32 i=0; i<nnet_basis_.size(); i++) {
392  nnet_basis_[i].SetTrainOptions(opts);
393  }
394  }
395 
401  // loop over nnets,
402  for (int32 i = 0; i < nnet_basis_.size(); i++) {
403  // loop over components,
404  for (int32 j = 0; j < nnet_basis_[i].NumComponents(); j++) {
405  if (nnet_basis_[i].GetComponent(j).IsUpdatable()) {
406  UpdatableComponent& comp =
407  dynamic_cast<UpdatableComponent&>(nnet_basis_[i].GetComponent(j));
408  // set the value,
409  comp.SetLearnRateCoef(val);
410  }
411  }
412  }
413  }
414 
420  // loop over nnets,
421  for (int32 i = 0; i < nnet_basis_.size(); i++) {
422  // loop over components,
423  for (int32 j = 0; j < nnet_basis_[i].NumComponents(); j++) {
424  if (nnet_basis_[i].GetComponent(j).IsUpdatable()) {
425  UpdatableComponent& comp =
426  dynamic_cast<UpdatableComponent&>(nnet_basis_[i].GetComponent(j));
427  // set the value,
428  comp.SetBiasLearnRateCoef(val);
429  }
430  }
431  }
432  }
433 
434  private:
437  std::vector<Nnet> nnet_basis_;
438  std::vector<CuMatrix<BaseFloat> > basis_out_;
439 
443 
447 
450 
451 };
452 
453 } // namespace nnet1
454 } // namespace kaldi
455 
456 #endif // KALDI_NNET_NNET_MULTIBASIS_COMPONENT_H_
void Backpropagate(const CuMatrixBase< BaseFloat > &out_diff, CuMatrix< BaseFloat > *in_diff)
Perform backward pass through the network,.
Definition: nnet-nnet.cc:96
void InitData(std::istream &is)
Initialize the content of the component by the &#39;line&#39; from the prototype,.
void CopyFromMat(const MatrixBase< OtherReal > &src, MatrixTransposeType trans=kNoTrans)
Definition: cu-matrix.cc:344
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
Definition: chain.dox:20
void SetParams(const VectorBase< BaseFloat > &params)
Set the network weights from a supervector,.
Definition: nnet-nnet.cc:253
Component * Copy() const
Copy component (deep copy),.
void Propagate(const CuMatrixBase< BaseFloat > &in, CuMatrix< BaseFloat > *out)
Perform forward pass through the network,.
Definition: nnet-nnet.cc:70
void GetParams(Vector< BaseFloat > *params) const
Get the network weights in a supervector,.
Definition: nnet-nnet.cc:237
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...)
int32 NumParams() const
Get the number of parameters in the network,.
Definition: nnet-nnet.cc:210
const CuSubVector< Real > Row(MatrixIndexT i) const
Definition: cu-matrix.h:670
std::vector< CuMatrix< BaseFloat > > basis_out_
void ReadBasicType(std::istream &is, bool binary, T *t)
ReadBasicType is the name of the read function for bool, integer types, and floating-point types...
Definition: io-funcs-inl.h:55
void WriteData(std::ostream &os, bool binary) const
Writes the component content.
void Write(const std::string &wxfilename, bool binary) const
Write Nnet to &#39;wxfilename&#39;,.
Definition: nnet-nnet.cc:367
void Update(const CuMatrixBase< BaseFloat > &input, const CuMatrixBase< BaseFloat > &diff)
Compute gradient and update parameters,.
int32 InputDim() const
Dimensionality on network input (input feature dim.),.
Definition: nnet-nnet.cc:148
Class UpdatableComponent is a Component which has trainable parameters, it contains SGD training hype...
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation.
kaldi::int32 int32
std::string Info() const
Print some additional info (after <ComponentName> and the dims),.
void ReadToken(std::istream &is, bool binary, std::string *str)
ReadToken gets the next token and puts it in str (exception on failure).
Definition: io-funcs.cc:154
void AddMat(Real alpha, const CuMatrixBase< Real > &A, MatrixTransposeType trans=kNoTrans)
*this += alpha * A
Definition: cu-matrix.cc:954
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
int Peek(std::istream &is, bool binary)
Peek consumes whitespace (if binary == false) and then returns the peek() value of the stream...
Definition: io-funcs.cc:145
void SetUnit()
Sets to zero, except ones along diagonal [for non-square matrices too].
ComponentType GetType() const
Get Type Identification of the component,.
virtual void SetLearnRateCoef(BaseFloat val)
Set the learn-rate coefficient,.
ComponentType
Component type identification mechanism,.
void GetGradient(VectorBase< BaseFloat > *gradient) const
Get gradient reshaped as a vector,.
void Scale(Real value)
Definition: cu-matrix.cc:644
void SetParams(const VectorBase< BaseFloat > &params)
Set the trainable parameters from, reshaped as a vector,.
virtual void SetBiasLearnRateCoef(BaseFloat val)
Set the learn-rate coefficient for bias,.
int32 OutputDim() const
Dimensionality of network outputs (posteriors | bn-features | etc.),.
Definition: nnet-nnet.cc:143
void AddColSumMat(Real alpha, const CuMatrixBase< Real > &mat, Real beta=1.0)
Sum the columns of the matrix, add to vector.
Definition: cu-vector.cc:1298
std::string InfoGradient() const
Print some additional info about gradient (after <...> and dims),.
std::string InfoBackPropagate(bool header=true) const
Create string with back-propagation-buffer statistics,.
Definition: nnet-nnet.cc:443
void ExpectToken(std::istream &is, bool binary, const char *token)
ExpectToken tries to read in the given token, and throws an exception on failure. ...
Definition: io-funcs.cc:191
void SetBiasLearnRateCoef(BaseFloat val)
Overriding the default, which was UpdatableComponent::SetBiasLearnRateCoef(...)
MultiBasisComponent(int32 dim_in, int32 dim_out)
std::vector< Nnet > nnet_basis_
The vector of &#39;basis&#39; networks (output of basis is combined according to the posterior_ from the sele...
int32 InputDim() const
Get the dimension of the input,.
#define KALDI_ERR
Definition: kaldi-error.h:147
void Read(const std::string &rxfilename)
Read Nnet from &#39;rxfilename&#39;,.
Definition: nnet-nnet.cc:333
void SetLearnRateCoef(BaseFloat val)
Overriding the default, which was UpdatableComponent::SetLearnRateCoef(...)
void SetTrainOptions(const NnetTrainOptions &opts)
Overriding the default, which was UpdatableComponent::SetTrainOptions(...)
This class is used for a piece of a CuMatrix.
Definition: matrix-common.h:70
void WriteToken(std::ostream &os, bool binary, const char *token)
The WriteToken functions are for writing nonempty sequences of non-space characters.
Definition: io-funcs.cc:134
BaseFloat threshold_
Threshold, applied to posterior_sum_, disables the unused basis,.
MatrixIndexT Dim() const
Returns the dimension of the vector.
Definition: kaldi-vector.h:64
int PeekToken(std::istream &is, bool binary)
PeekToken will return the first character of the next token, or -1 if end of file.
Definition: io-funcs.cc:170
CuSubMatrix< Real > ColRange(const MatrixIndexT col_offset, const MatrixIndexT num_cols) const
Definition: cu-matrix.h:665
Matrix for CUDA computing.
Definition: matrix-common.h:69
void GetParams(VectorBase< BaseFloat > *params) const
Get the trainable parameters reshaped as a vector,.
void Init(const std::string &proto_file)
Initialize the Nnet from the prototype,.
Definition: nnet-nnet.cc:301
A class representing a vector.
Definition: kaldi-vector.h:406
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:185
void SetLinearity(const CuMatrixBase< BaseFloat > &linearity)
std::string InfoGradient(bool header=true) const
Create string with per-component gradient statistics,.
Definition: nnet-nnet.cc:407
std::string InfoPropagate(bool header=true) const
Create string with propagation-buffer statistics,.
Definition: nnet-nnet.cc:420
std::string Info() const
Create string with human readable description of the nnet,.
Definition: nnet-nnet.cc:386
void SetTrainOptions(const NnetTrainOptions &opts)
Set hyper-parameters of the training (pushes to all UpdatableComponents),.
Definition: nnet-nnet.cc:508
void WriteBasicType(std::ostream &os, bool binary, T t)
WriteBasicType is the name of the write function for bool, integer types, and floating-point types...
Definition: io-funcs-inl.h:34
Abstract class, building block of the network.
int32 OutputDim() const
Get the dimension of the output,.
MatrixIndexT NumRows() const
Dimensions.
Definition: cu-matrix.h:215
Provides a vector abstraction class.
Definition: kaldi-vector.h:41
int32 NumParams() const
Number of trainable parameters,.
#define KALDI_LOG
Definition: kaldi-error.h:153
static bool ApproxEqual(float a, float b, float relative_tolerance=0.001)
return abs(a - b) <= relative_tolerance * (abs(a)+abs(b)).
Definition: kaldi-math.h:265
CuMatrix< BaseFloat > posterior_
The output of &#39;selector_&#39;,.
void AddDiagVecMat(const Real alpha, const CuVectorBase< Real > &v, const CuMatrixBase< Real > &M, MatrixTransposeType transM, Real beta=1.0)
*this = beta * *this + alpha * diag(v) * M [or M^T].
Definition: cu-matrix.cc:1382
void ReadData(std::istream &is, bool binary)
Reads the component content.
SubVector< Real > Range(const MatrixIndexT o, const MatrixIndexT l)
Returns a sub-vector of a vector (a range of elements).
Definition: kaldi-vector.h:94