20 #ifndef KALDI_NNET2_COMBINE_NNET_A_H_ 21 #define KALDI_NNET2_COMBINE_NNET_A_H_ 47 valid_impr_thresh(0.5), overshoot(1.8),
48 min_learning_rate_factor(0.5),
49 max_learning_rate_factor(2.0),
50 min_learning_rate(0.0001) { }
53 opts->
Register(
"num-bfgs-iters", &num_bfgs_iters,
"Maximum number of function " 54 "evaluations for BFGS to use when optimizing combination weights");
55 opts->
Register(
"initial-step", &initial_step,
"Parameter in the optimization, " 56 "used to set the initial step length; the default value should be " 58 opts->
Register(
"num-bfgs-iters", &num_bfgs_iters,
"Maximum number of function " 59 "evaluations for BFGS to use when optimizing combination weights");
60 opts->
Register(
"valid-impr-thresh", &valid_impr_thresh,
"Threshold of improvement " 61 "in validation-set objective function for one iteratin; below this, " 62 "we start using the \"overshoot\" mechanism to keep learning rates high.");
63 opts->
Register(
"overshoot", &overshoot,
"Factor by which we overshoot the step " 64 "size obtained by BFGS; only applies when validation set impr is less " 65 "than valid-impr-thresh.");
66 opts->
Register(
"max-learning-rate-factor", &max_learning_rate_factor,
67 "Maximum factor by which to increase the learning rate for any layer.");
68 opts->
Register(
"min-learning-rate-factor", &min_learning_rate_factor,
69 "Minimum factor by which to increase the learning rate for any layer.");
70 opts->
Register(
"min-learning-rate", &min_learning_rate,
71 "Floor on the automatically updated learning rates");
76 const std::vector<NnetExample> &validation_set,
77 const std::vector<Nnet> &nnets_in,
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
void CombineNnetsA(const NnetCombineAconfig &config, const std::vector< NnetExample > &validation_set, const std::vector< Nnet > &nnets, Nnet *nnet_out)
virtual void Register(const std::string &name, bool *ptr, const std::string &doc)=0
BaseFloat min_learning_rate
BaseFloat min_learning_rate_factor
BaseFloat max_learning_rate_factor
BaseFloat valid_impr_thresh
This header provides functionality for sample-by-sample stochastic gradient descent and gradient comp...
void Register(OptionsItf *opts)