nnet-loss.cc
Go to the documentation of this file.
1 // nnet/nnet-loss.cc
2 
3 // Copyright 2011-2015 Brno University of Technology (author: Karel Vesely)
4 
5 // See ../../COPYING for clarification regarding multiple authors
6 //
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 //
11 // http://www.apache.org/licenses/LICENSE-2.0
12 //
13 // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
15 // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
16 // MERCHANTABLITY OR NON-INFRINGEMENT.
17 // See the Apache 2 License for the specific language governing permissions and
18 // limitations under the License.
19 
20 #include <sstream>
21 #include <iterator>
22 #include <algorithm>
23 #include <iomanip>
24 
25 #include "nnet/nnet-loss.h"
26 #include "nnet/nnet-utils.h"
27 #include "cudamatrix/cu-math.h"
28 #include "hmm/posterior.h"
29 
30 namespace kaldi {
31 namespace nnet1 {
32 
33 
34 /* Xent */
35 
40 template <typename T>
41 inline void CountCorrectFramesWeighted(const CuArray<T> &hyp,
42  const CuArray<T> &ref,
43  const CuVectorBase<BaseFloat> &weights,
44  Vector<double> *correct) {
45  KALDI_ASSERT(hyp.Dim() == ref.Dim());
46  KALDI_ASSERT(hyp.Dim() == weights.Dim());
47  int32 dim = hyp.Dim();
48  // Get GPU data to host,
49  std::vector<T> hyp_h(dim), ref_h(dim);
50  hyp.CopyToVec(&hyp_h);
51  ref.CopyToVec(&ref_h);
52  Vector<BaseFloat> w(dim);
53  weights.CopyToVec(&w);
54  // Accumulate weighted counts of correct frames,
55  for (int32 i = 0; i < dim; i++) {
56  KALDI_ASSERT(ref_h[i] < correct->Dim());
57  (*correct)(ref_h[i]) += w(i) * (hyp_h[i] == ref_h[i] ? 1.0 : 0.0);
58  }
59 }
60 
61 
62 void Xent::Eval(const VectorBase<BaseFloat> &frame_weights,
63  const CuMatrixBase<BaseFloat> &net_out,
64  const CuMatrixBase<BaseFloat> &targets,
65  CuMatrix<BaseFloat> *diff) {
66  // check inputs,
67  KALDI_ASSERT(net_out.NumCols() == targets.NumCols());
68  KALDI_ASSERT(net_out.NumRows() == targets.NumRows());
69  KALDI_ASSERT(net_out.NumRows() == frame_weights.Dim());
70 
71  KALDI_ASSERT(KALDI_ISFINITE(frame_weights.Sum()));
72  KALDI_ASSERT(KALDI_ISFINITE(net_out.Sum()));
73  KALDI_ASSERT(KALDI_ISFINITE(targets.Sum()));
74 
75  // buffer initialization,
76  int32 num_classes = targets.NumCols();
77  if (frames_.Dim() == 0) {
78  frames_.Resize(num_classes);
79  xentropy_.Resize(num_classes);
80  entropy_.Resize(num_classes);
81  correct_.Resize(num_classes);
82  }
83 
84  // get frame_weights to GPU,
85  frame_weights_ = frame_weights;
86 
87  // There may be frames for which the sum of targets is zero.
88  // This happens in multi-lingual training when the frame
89  // has target class in the softmax of another language.
90  // We 'switch-off' such frames by masking the 'frame_weights_',
91  target_sum_.Resize(targets.NumRows());
92  target_sum_.AddColSumMat(1.0, targets, 0.0);
93  frame_weights_.MulElements(target_sum_);
94 
95  // compute derivative wrt. activations of last layer of neurons,
96  *diff = net_out;
97  diff->AddMat(-1.0, targets);
98  diff->MulRowsVec(frame_weights_); // weighting,
99 
100  // count frames per class,
101  frames_aux_ = targets;
102  frames_aux_.MulRowsVec(frame_weights_);
104 
105  // evaluate the frame-level classification,
106  net_out.FindRowMaxId(&max_id_out_); // find max in nn-output
107  targets.FindRowMaxId(&max_id_tgt_); // find max in targets
110 
111  // calculate cross_entropy (in GPU),
112  xentropy_aux_ = net_out; // y
113  xentropy_aux_.Add(1e-20); // avoid log(0)
114  xentropy_aux_.ApplyLog(); // log(y)
115  xentropy_aux_.MulElements(targets); // t*log(y)
116  xentropy_aux_.MulRowsVec(frame_weights_); // w*t*log(y)
118 
119  // caluculate entropy (in GPU),
120  entropy_aux_ = targets; // t
121  entropy_aux_.Add(1e-20); // avoid log(0)
122  entropy_aux_.ApplyLog(); // log(t)
123  entropy_aux_.MulElements(targets); // t*log(t)
124  entropy_aux_.MulRowsVec(frame_weights_); // w*t*log(t)
126 
127  // progressive loss reporting
128  if (opts_.loss_report_frames > 0) {
132 
135 
137  // loss value,
138  double progress_value =
140 
141  // time-related info (fps is weighted),
142  double time_now = timer_.Elapsed();
143  double fps = frames_progress_ / (time_now - elapsed_seconds_);
144  double elapsed_hours = time_now / 3600;
145  elapsed_seconds_ = time_now; // store,
146 
147  // print,
148  KALDI_LOG << "ProgressLoss[last "
149  << static_cast<int>(frames_progress_/100/3600) << "h of "
150  << static_cast<int>(frames_.Sum()/100/3600) << "h]: "
151  << progress_value << " (Xent)"
152  << ", fps=" << fps
153  << std::setprecision(3)
154  << ", elapsed " << elapsed_hours << "h";
155  // store,
156  loss_vec_.push_back(progress_value);
157  // reset,
158  frames_progress_ = 0;
159  xentropy_progress_ = 0.0;
160  entropy_progress_ = 0.0;
161  }
162  }
163 }
164 
165 
166 void Xent::Eval(const VectorBase<BaseFloat> &frame_weights,
167  const CuMatrixBase<BaseFloat> &net_out,
168  const Posterior &post,
169  CuMatrix<BaseFloat> *diff) {
170  int32 num_frames = net_out.NumRows(),
171  num_pdf = net_out.NumCols();
172  KALDI_ASSERT(num_frames == post.size());
173 
174  // convert posterior to matrix,
175  PosteriorToMatrix(post, num_pdf, &tgt_mat_);
176 
177  // call the other eval function,
178  Eval(frame_weights, net_out, tgt_mat_, diff);
179 }
180 
181 
182 std::string Xent::Report() {
183  double loss_value =
184  (xentropy_.Sum() - entropy_.Sum()) / frames_.Sum();
185  std::ostringstream oss;
186  oss << "AvgLoss: " << loss_value << " (Xent), "
187  << "[AvgXent: " << xentropy_.Sum() / frames_.Sum()
188  << ", AvgTargetEnt: " << entropy_.Sum() / frames_.Sum()
189  << "]" << std::endl;
190 
191  oss << "progress: [";
192  std::copy(loss_vec_.begin(), loss_vec_.end(),
193  std::ostream_iterator<float>(oss, " "));
194  oss << "]" << std::endl;
195 
196  double frame_accuracy = 100.0 * correct_.Sum() / frames_.Sum();
197  oss << "FRAME_ACCURACY >> " << frame_accuracy << "% <<" << std::endl;
198 
199  return oss.str();
200 }
201 
202 
203 std::string Xent::ReportPerClass() {
204  std::ostringstream oss;
205  oss << "PER-CLASS PERFORMANCE:" << std::endl;
206  oss << "@@@ Frames per-class:" << frames_;
207  // get inverted counts,
208  CuVector<double> inv_frames(frames_);
209  inv_frames.Add(0.5); // avoid 0-frames,
210  inv_frames.ApplyPow(-1.0);
211  // loss, kl = xentropy-entropy,
213  loss.AddVec(-1.0, entropy_);
214  loss.MulElements(inv_frames);
215  oss << "@@@ Loss per-class:" << loss;
216  // frame accuracy (assuming targets are binary),
217  CuVector<double> frm_accu(correct_);
218  frm_accu.MulElements(inv_frames);
219  frm_accu.Scale(100.0);
220  oss << "@@@ Frame-accuracy per-class:" << frm_accu;
221  //
222  return oss.str();
223 }
224 
225 
226 /* Mse */
227 
228 void Mse::Eval(const VectorBase<BaseFloat> &frame_weights,
229  const CuMatrixBase<BaseFloat>& net_out,
230  const CuMatrixBase<BaseFloat>& target,
231  CuMatrix<BaseFloat>* diff) {
232  // check inputs,
233  KALDI_ASSERT(net_out.NumCols() == target.NumCols());
234  KALDI_ASSERT(net_out.NumRows() == target.NumRows());
235  KALDI_ASSERT(net_out.NumRows() == frame_weights.Dim());
236 
237  KALDI_ASSERT(KALDI_ISFINITE(frame_weights.Sum()));
238  KALDI_ASSERT(KALDI_ISFINITE(net_out.Sum()));
239  KALDI_ASSERT(KALDI_ISFINITE(target.Sum()));
240 
241  int32 num_frames = frame_weights.Sum();
242  KALDI_ASSERT(num_frames >= 0.0);
243 
244  // get frame_weights to GPU,
245  frame_weights_ = frame_weights;
246 
247  // compute derivative w.r.t. neural nerwork outputs
248  *diff = net_out; // y
249  diff->AddMat(-1.0, target); // (y - t)
250  diff->MulRowsVec(frame_weights_); // weighting,
251 
252  // Compute MeanSquareError loss of mini-batch
253  diff_pow_2_ = *diff;
254  diff_pow_2_.MulElements(diff_pow_2_); // (y - t)^2
255  diff_pow_2_.MulRowsVec(frame_weights_); // w*(y - t)^2
256  double mean_square_error = 0.5 * diff_pow_2_.Sum(); // sum the matrix,
257 
258  KALDI_ASSERT(KALDI_ISFINITE(mean_square_error));
259 
260  // accumulate
261  loss_ += mean_square_error;
262  frames_ += num_frames;
263 
264  // progressive loss reporting
265  if (opts_.loss_report_frames > 0) {
266  frames_progress_ += num_frames;
267  loss_progress_ += mean_square_error;
269  KALDI_LOG << "ProgressLoss[last "
270  << static_cast<int>(frames_progress_/100/3600) << "h of "
271  << static_cast<int>(frames_/100/3600) << "h]: "
272  << loss_progress_/frames_progress_ << " (Mse)";
273  // store
274  loss_vec_.push_back(loss_progress_/frames_progress_);
275  // reset
276  frames_progress_ = 0;
277  loss_progress_ = 0.0;
278  }
279  }
280 }
281 
282 
283 void Mse::Eval(const VectorBase<BaseFloat> &frame_weights,
284  const CuMatrixBase<BaseFloat>& net_out,
285  const Posterior& post,
286  CuMatrix<BaseFloat>* diff) {
287  int32 num_frames = net_out.NumRows(),
288  num_nn_outputs = net_out.NumCols();
289  KALDI_ASSERT(num_frames == post.size());
290 
291  // convert posterior to matrix,
292  PosteriorToMatrix(post, num_nn_outputs, &tgt_mat_);
293 
294  // call the other eval function,
295  Eval(frame_weights, net_out, tgt_mat_, diff);
296 }
297 
298 
299 std::string Mse::Report() {
300  // compute root mean square,
301  int32 num_tgt = diff_pow_2_.NumCols();
302  BaseFloat root_mean_square = sqrt(loss_/frames_/num_tgt);
303  // build the message,
304  std::ostringstream oss;
305  oss << "AvgLoss: " << loss_/frames_ << " (Mse), "
306  << "[RMS " << root_mean_square << ", frames "
307  << frames_ << "]" << std::endl;
308  oss << "progress: [";
309  std::copy(loss_vec_.begin(), loss_vec_.end(),
310  std::ostream_iterator<float>(oss, " "));
311  oss << "]" << std::endl;
312  return oss.str();
313 }
314 
315 
316 /* MultiTaskLoss */
317 
318 void MultiTaskLoss::InitFromString(const std::string& s) {
319  std::vector<std::string> v;
320  SplitStringToVector(s, ",:" /* delimiter */, false, &v);
321 
322  KALDI_ASSERT((v.size()-1) % 3 == 0); // triplets,
323  KALDI_ASSERT(v[0] == "multitask"); // header,
324 
325  // parse the definition of multitask loss,
326  std::vector<std::string>::iterator it(v.begin()+1); // skip header,
327  for ( ; it != v.end(); ++it) {
328  // type,
329  if (*it == "xent") {
330  loss_vec_.push_back(new Xent(opts_));
331  } else if (*it == "mse") {
332  loss_vec_.push_back(new Mse(opts_));
333  } else {
334  KALDI_ERR << "Unknown objective function code : " << *it;
335  }
336  ++it;
337  // dim,
338  int32 dim;
339  if (!ConvertStringToInteger(*it, &dim)) {
340  KALDI_ERR << "Cannot convert 'dim' " << *it << " to integer!";
341  }
342  loss_dim_.push_back(dim);
343  ++it;
344  // weight,
345  BaseFloat weight;
346  if (!ConvertStringToReal(*it, &weight)) {
347  KALDI_ERR << "Cannot convert 'weight' " << *it << " to integer!";
348  }
349  KALDI_ASSERT(weight >= 0.0);
350  loss_weights_.push_back(weight);
351  }
352 
353  // build vector with starting-point offsets,
354  loss_dim_offset_.resize(loss_dim_.size()+1, 0); // 1st zero stays,
355  for (int32 i = 1; i <= loss_dim_.size(); i++) {
356  loss_dim_offset_[i] = loss_dim_offset_[i-1] + loss_dim_[i-1];
357  }
358 
359  // sanity check,
360  KALDI_ASSERT(loss_vec_.size() > 0);
361  KALDI_ASSERT(loss_vec_.size() == loss_dim_.size());
362  KALDI_ASSERT(loss_vec_.size() == loss_weights_.size());
363 }
364 
365 void MultiTaskLoss::Eval(const VectorBase<BaseFloat> &frame_weights,
366  const CuMatrixBase<BaseFloat>& net_out,
367  const Posterior& post,
368  CuMatrix<BaseFloat>* diff) {
369  int32 num_frames = net_out.NumRows(),
370  num_output = net_out.NumCols();
371  KALDI_ASSERT(num_frames == post.size());
372  KALDI_ASSERT(num_output == loss_dim_offset_.back()); // sum of loss-dims,
373 
374  // convert posterior to matrix,
375  PosteriorToMatrix(post, num_output, &tgt_mat_);
376 
377  // allocate diff matrix,
378  diff->Resize(num_frames, num_output);
379 
383  std::vector<Vector<BaseFloat> > frmwei_have_tgt;
384  for (int32 l = 0; l < loss_vec_.size(); l++) {
385  // copy original weights,
386  frmwei_have_tgt.push_back(Vector<BaseFloat>(frame_weights));
387  // We need to mask-out the frames for which the 'posterior' is not defined (= is empty):
388  int32 loss_beg = loss_dim_offset_[l]; // first column of loss target,
389  int32 loss_end = loss_dim_offset_[l+1]; // (last+1) column of loss target,
390  for (int32 f = 0; f < num_frames; f++) {
391  bool tgt_defined = false;
392  for (int32 p = 0; p < post[f].size(); p++) {
393  if (post[f][p].first >= loss_beg && post[f][p].first < loss_end) {
394  tgt_defined = true;
395  break;
396  }
397  }
398  if (!tgt_defined) {
399  frmwei_have_tgt[l](f) = 0.0; // set zero_weight for the frame with no targets!
400  }
401  }
402  }
403 
404  // call the vector of loss functions,
405  CuMatrix<BaseFloat> diff_aux;
406  for (int32 l = 0; l < loss_vec_.size(); l++) {
407  loss_vec_[l]->Eval(frmwei_have_tgt[l],
408  net_out.ColRange(loss_dim_offset_[l], loss_dim_[l]),
409  tgt_mat_.ColRange(loss_dim_offset_[l], loss_dim_[l]),
410  &diff_aux);
411  // Scale the gradients,
412  diff_aux.Scale(loss_weights_[l]);
413  // Copy to diff,
414  diff->ColRange(loss_dim_offset_[l], loss_dim_[l]).CopyFromMat(diff_aux);
415  }
416 }
417 
418 std::string MultiTaskLoss::Report() {
419  // calculate overall loss (weighted),
420  BaseFloat overall_loss = AvgLoss();
421  // copy the loss-values into a vector,
422  std::vector<BaseFloat> loss_values;
423  for (int32 i = 0; i < loss_vec_.size(); i++) {
424  loss_values.push_back(loss_vec_[i]->AvgLoss());
425  }
426 
427  // build the message,
428  std::ostringstream oss;
429  oss << "MultiTaskLoss, with " << loss_vec_.size()
430  << " parallel loss functions." << std::endl;
431  // individual loss reports first,
432  for (int32 i = 0; i < loss_vec_.size(); i++) {
433  oss << "Loss " << i+1 << ", " << loss_vec_[i]->Report() << std::endl;
434  }
435 
436  // overall loss is last,
437  oss << "Loss (OVERALL), "
438  << "AvgLoss: " << overall_loss << " (MultiTaskLoss), "
439  << "weights " << loss_weights_ << ", "
440  << "values " << loss_values << std::endl;
441 
442  return oss.str();
443 }
444 
446  BaseFloat ans(0.0);
447  for (int32 i = 0; i < loss_vec_.size(); i++) {
448  BaseFloat val = loss_weights_[i] * loss_vec_[i]->AvgLoss();
449  if (!KALDI_ISFINITE(val)) {
450  KALDI_WARN << "Loss " << i+1 << ", has bad objective function value '"
451  << val << "', using 0.0 instead.";
452  val = 0.0;
453  }
454  ans += val;
455  }
456  return ans;
457 }
458 
459 } // namespace nnet1
460 } // namespace kaldi
void MulElements(const CuVectorBase< Real > &v)
Definition: cu-vector.cc:838
double xentropy_progress_
Definition: nnet-loss.h:128
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
Definition: chain.dox:20
bool ConvertStringToInteger(const std::string &str, Int *out)
Converts a string into an integer via strtoll and returns false if there was any kind of problem (i...
Definition: text-utils.h:118
BaseFloat AvgLoss()
Get loss value (frame average),.
Definition: nnet-loss.h:114
CuVector< double > frames_
Definition: nnet-loss.h:121
double elapsed_seconds_
Definition: nnet-loss.h:131
std::string Report()
Generate string with error report.
Definition: nnet-loss.cc:418
void CopyToVec(std::vector< T > *dst) const
This function resizes *dst if needed.
Definition: cu-array-inl.h:177
CuMatrix< BaseFloat > frames_aux_
Definition: nnet-loss.h:139
void Add(Real value)
Definition: cu-vector.cc:1157
CuVector< BaseFloat > target_sum_
Definition: nnet-loss.h:135
Real Sum() const
Definition: cu-vector.cc:297
Real Sum() const
Definition: cu-matrix.cc:3012
#define KALDI_ISFINITE(x)
Definition: kaldi-math.h:74
std::vector< float > loss_vec_
Definition: nnet-loss.h:130
CuVector< double > xentropy_
Definition: nnet-loss.h:123
kaldi::int32 int32
void AddMat(Real alpha, const CuMatrixBase< Real > &A, MatrixTransposeType trans=kNoTrans)
*this += alpha * A
Definition: cu-matrix.cc:954
This class represents a matrix that&#39;s stored on the GPU if we have one, and in memory if not...
Definition: matrix-common.h:71
void Resize(MatrixIndexT length, MatrixResizeType resize_type=kSetZero)
Set vector to a specified size (can be zero).
void PosteriorToMatrix(const Posterior &post, const int32 post_dim, CuMatrix< Real > *mat)
Wrapper of PosteriorToMatrix with CuMatrix argument.
Definition: nnet-utils.h:292
CuMatrix< BaseFloat > tgt_mat_
Definition: nnet-loss.h:138
CuMatrix< BaseFloat > entropy_aux_
Definition: nnet-loss.h:141
CuArray< int32 > max_id_out_
Definition: nnet-loss.h:144
void CountCorrectFramesWeighted(const CuArray< T > &hyp, const CuArray< T > &ref, const CuVectorBase< BaseFloat > &weights, Vector< double > *correct)
Helper function of Xent::Eval, calculates number of matching elemente in &#39;hyp&#39;, &#39;ref&#39; weighted by &#39;we...
Definition: nnet-loss.cc:41
Vector< double > correct_
Definition: nnet-loss.h:122
std::string Report()
Generate string with error report.
Definition: nnet-loss.cc:299
BaseFloat AvgLoss()
Get loss value (frame average),.
Definition: nnet-loss.cc:445
double entropy_progress_
Definition: nnet-loss.h:129
std::vector< std::vector< std::pair< int32, BaseFloat > > > Posterior
Posterior is a typedef for storing acoustic-state (actually, transition-id) posteriors over an uttera...
Definition: posterior.h:42
void Eval(const VectorBase< BaseFloat > &frame_weights, const CuMatrixBase< BaseFloat > &net_out, const CuMatrixBase< BaseFloat > &target, CuMatrix< BaseFloat > *diff)
Evaluate mean square error using target-matrix,.
Definition: nnet-loss.h:218
int32 loss_report_frames
Report loss value every &#39;report_interval&#39; frames,.
Definition: nnet-loss.h:39
CuVector< double > entropy_
Definition: nnet-loss.h:124
void Eval(const VectorBase< BaseFloat > &frame_weights, const CuMatrixBase< BaseFloat > &net_out, const CuMatrixBase< BaseFloat > &target, CuMatrix< BaseFloat > *diff)
Evaluate cross entropy using target-matrix (supports soft labels),.
Definition: nnet-loss.cc:62
void SplitStringToVector(const std::string &full, const char *delim, bool omit_empty_strings, std::vector< std::string > *out)
Split a string using any of the single character delimiters.
Definition: text-utils.cc:63
void MulElements(const CuMatrixBase< Real > &A)
Multiply two matrices elementwise: C = C .* A.
Definition: cu-matrix.cc:667
CuVector< BaseFloat > frame_weights_
Definition: nnet-loss.h:134
std::string Report()
Generate string with error report,.
Definition: nnet-loss.cc:182
void Eval(const VectorBase< BaseFloat > &frame_weights, const CuMatrixBase< BaseFloat > &net_out, const CuMatrixBase< BaseFloat > &target, CuMatrix< BaseFloat > *diff)
Evaluate mean square error using target-matrix,.
Definition: nnet-loss.cc:228
void Resize(MatrixIndexT dim, MatrixResizeType t=kSetZero)
Allocate the memory.
Definition: cu-vector.cc:993
#define KALDI_ERR
Definition: kaldi-error.h:147
bool ConvertStringToReal(const std::string &str, T *out)
ConvertStringToReal converts a string into either float or double and returns false if there was any ...
Definition: text-utils.cc:238
void ApplyPow(Real power)
Definition: cu-vector.h:147
#define KALDI_WARN
Definition: kaldi-error.h:150
MatrixIndexT Dim() const
Returns the dimension of the vector.
Definition: kaldi-vector.h:64
Real Sum() const
Returns sum of the elements.
CuMatrix< BaseFloat > xentropy_aux_
Definition: nnet-loss.h:140
void FindRowMaxId(CuArray< int32 > *id) const
Find the id of the maximal element for each row (resizes the &#39;id&#39; array to the appropriate size)...
Definition: cu-matrix.cc:1829
void AddVec(Real alpha, const CuVectorBase< Real > &vec, Real beta=1.0)
Definition: cu-vector.cc:1237
CuSubMatrix< Real > ColRange(const MatrixIndexT col_offset, const MatrixIndexT num_cols) const
Definition: cu-matrix.h:665
Matrix for CUDA computing.
Definition: matrix-common.h:69
MatrixIndexT NumCols() const
Definition: cu-matrix.h:216
CuArray< int32 > max_id_tgt_
Definition: nnet-loss.h:145
Class CuArray represents a vector of an integer or struct of type T.
Definition: cu-array.h:32
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:185
void Scale(Real value)
Definition: cu-vector.cc:1216
double frames_progress_
Definition: nnet-loss.h:127
std::string ReportPerClass()
Generate string with per-class error report,.
Definition: nnet-loss.cc:203
Xent(LossOptions &opts)
Definition: nnet-loss.h:84
MatrixIndexT NumRows() const
Dimensions.
Definition: cu-matrix.h:215
Provides a vector abstraction class.
Definition: kaldi-vector.h:41
MatrixIndexT Dim() const
Return the vector dimension.
Definition: cu-array.h:49
#define KALDI_LOG
Definition: kaldi-error.h:153
double Elapsed() const
Returns time in seconds.
Definition: timer.h:74
void CopyToVec(VectorBase< OtherReal > *dst) const
Definition: cu-vector.cc:938
void MulRowsVec(const CuVectorBase< Real > &scale)
scale i&#39;th row by scale[i]
Definition: cu-matrix.cc:792
void Resize(MatrixIndexT rows, MatrixIndexT cols, MatrixResizeType resize_type=kSetZero, MatrixStrideType stride_type=kDefaultStride)
Allocate the memory.
Definition: cu-matrix.cc:50
void InitFromString(const std::string &s)
Initialize from string, the format for string &#39;s&#39; is : &#39;multitask,<type1>,<dim1>,<weight1>,...,<typeN>,<dimN>,<weightN>&#39;.
Definition: nnet-loss.cc:318
LossOptions opts_
Definition: nnet-loss.h:77
void AddRowSumMat(Real alpha, const CuMatrixBase< Real > &mat, Real beta=1.0)
Sum the rows of the matrix, add to vector.
Definition: cu-vector.cc:1277
MatrixIndexT Dim() const
Dimensions.
Definition: cu-vector.h:69
Vector for CUDA computing.
Definition: matrix-common.h:72