35 double *tot_weight_ptr,
38 bool store_separate_gradients):
80 std::vector<NnetExample> examples;
92 <<
tot_weight_ <<
" frames so far (weighted); likelihood " 125 int32 minibatch_size,
127 double *tot_weight_out,
128 Nnet *nnet_to_update) {
129 double ans = 0.0, tot_weight = 0.0;
131 while (!examples_reader->
Done()) {
132 std::vector<NnetExample> egs;
133 egs.reserve(minibatch_size);
134 while (egs.size() < minibatch_size && examples_reader->
Done()) {
135 egs.push_back(examples_reader->
Value());
136 examples_reader->
Next();
141 *tot_weight_out = tot_weight;
148 int32 minibatch_size,
151 Nnet *nnet_to_update) {
156 if (CuDevice::Instantiate().Enabled())
158 tot_weight, nnet_to_update);
163 double tot_log_prob = 0.0;
168 const bool store_separate_gradients = (nnet_to_update != &nnet);
171 &tot_log_prob, nnet_to_update,
172 store_separate_gradients);
179 std::vector<NnetExample> examples;
180 for (; !examples_reader->
Done(); examples_reader->
Next()) {
181 examples.push_back(examples_reader->
Value());
182 if (examples.size() == minibatch_size)
185 if (!examples.empty())
194 KALDI_LOG <<
"Did backprop on " << *tot_weight <<
" examples, average log-prob " 195 <<
"per frame is " << (tot_log_prob / *tot_weight);
196 KALDI_LOG <<
"[this line is to be parsed by a script:] log-prob-per-frame=" 197 << (tot_log_prob / *tot_weight);
203 int32 minibatch_size,
204 const std::vector<NnetExample> &egs,
206 Nnet *nnet_to_update) {
209 for (
size_t i = 0;
i < egs.size();
i += minibatch_size) {
210 std::vector<NnetExample>::const_iterator end_iter =
211 (
i + minibatch_size > egs.size() ? egs.end() :
212 egs.begin() +
i + minibatch_size);
213 std::vector<NnetExample> this_egs(egs.begin() +
i,
215 ans +=
DoBackprop(nnet, this_egs, nnet_to_update);
222 int32 minibatch_size,
224 const std::vector<NnetExample> &egs,
226 Nnet *nnet_to_update) {
227 if (num_threads == 1)
229 tot_weight, nnet_to_update);
233 double tot_log_prob = 0.0;
235 const bool store_separate_gradients = (nnet_to_update != &nnet);
238 &tot_log_prob, nnet_to_update,
239 store_separate_gradients);
246 int32 num_egs = egs.size();
247 for (
int32 offset = 0; offset < num_egs; offset += minibatch_size) {
248 int32 this_minibatch_size = std::min(minibatch_size, num_egs - offset);
251 std::vector<NnetExample> examples(egs.begin() + offset,
252 egs.begin() + offset + this_minibatch_size);
264 KALDI_VLOG(2) <<
"Did backprop on " << *tot_weight <<
" examples, average log-prob " 265 <<
"per frame is " << (tot_log_prob / *tot_weight);
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
double DoBackpropSingleThreaded(const Nnet &nnet, int32 minibatch_size, const std::vector< NnetExample > &egs, double *tot_weight, Nnet *nnet_to_update)
void AcceptExamples(std::vector< NnetExample > *examples)
The following function is called by the code that reads in the examples, with a batch of examples...
void AddNnet(const VectorBase< BaseFloat > &scales, const Nnet &other)
For each updatatable component, adds to it the corresponding element of "other" times the appropriate...
~DoBackpropParallelClass()
void ExamplesDone()
The following function is called by the code that reads in the examples, when we're done reading exam...
double ComputeNnetObjf(const Nnet &nnet, const std::vector< NnetExample > &examples, double *tot_accuracy)
Computes objective function over a minibatch.
ExamplesRepository * repository_
double DoBackprop(const Nnet &nnet, const std::vector< NnetExample > &examples, Nnet *nnet_to_update, double *tot_accuracy)
This function computes the objective function and either updates the model or adds to parameter gradi...
void SetZero(bool treat_as_gradient)
This class stores neural net training examples to be used in multi-threaded training.
bool ProvideExamples(std::vector< NnetExample > *examples)
This function is called by the code that does the training.
A templated class for reading objects sequentially from an archive or script file; see The Table conc...
DoBackpropParallelClass(const DoBackpropParallelClass &other)
BaseFloat TotalNnetTrainingWeight(const std::vector< NnetExample > &egs)
Returns the total weight summed over all the examples...
bool store_separate_gradients_
DoBackpropParallelClass(const Nnet &nnet, ExamplesRepository *repository, double *tot_weight_ptr, double *log_prob_ptr, Nnet *nnet_to_update, bool store_separate_gradients)
Nnet * nnet_to_update_orig_
#define KALDI_ASSERT(cond)
This header provides functionality for sample-by-sample stochastic gradient descent and gradient comp...
double DoBackpropParallel(const Nnet &nnet, int32 minibatch_size, SequentialNnetExampleReader *examples_reader, double *tot_weight, Nnet *nnet_to_update)
This function is similar to "DoBackprop" in nnet-update.h This function computes the objective functi...