nnet-fix.h
Go to the documentation of this file.
1 // nnet2/nnet-fix.h
2 
3 // Copyright 2012 Johns Hopkins University (author: Daniel Povey)
4 
5 // See ../../COPYING for clarification regarding multiple authors
6 //
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 //
11 // http://www.apache.org/licenses/LICENSE-2.0
12 //
13 // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
15 // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
16 // MERCHANTABLITY OR NON-INFRINGEMENT.
17 // See the Apache 2 License for the specific language governing permissions and
18 // limitations under the License.
19 
20 #ifndef KALDI_NNET2_NNET_FIX_H_
21 #define KALDI_NNET2_NNET_FIX_H_
22 
23 #include "nnet2/nnet-nnet.h"
24 
25 namespace kaldi {
26 namespace nnet2 {
27 
28 /* This header provides a function FixNnet(), and associated config, which
29  is responsible for fixing certain pathologies in a neural network during
30  training.
31 
32  For Sigmoid/Tanh units: it identifies neurons whose parameters are getting so large that
33  they are maxing out the sigmoid, and scales down those parameters by a
34  specified factor. It also identifies neurons that have the opposite pathology
35  that they are just in the linear part of the sigmoid, and it scales up
36  their parameters.
37 
38  For ReLU (rectified linear) units, it identifies neurons that are always zero
39  or close to zero, re-randomizes the corresponding parameters, increasing the bias.
40 */
41 
42 struct NnetFixConfig {
43  BaseFloat min_average_deriv; // Minimum average derivative that we allow,
44  // as a proportion of the maximum derivative of the nonlinearity (1.0 for tanh, 0.25 for sigmoid).
45  // If average derivative is less, we scale up the parameters.
46  BaseFloat max_average_deriv; // Maximum average derivative that we allow,
47  // also expressed relative to the maximum derivative of the nonlinearity.
48  BaseFloat parameter_factor; // Factor (>1.0) by which we change the parameters if
49  // the exceed the bounds above
50  BaseFloat relu_bias_change; // Change in bias for relus that are usually close to zero.
51 
52  NnetFixConfig(): min_average_deriv(0.1), max_average_deriv(0.75),
53  parameter_factor(2.0), relu_bias_change(1.0) { }
54  void Register(OptionsItf *opts) {
55  opts->Register("min-average-deriv", &min_average_deriv, "Miniumum derivative, "
56  "averaged over the training data, that we allow for a nonlinearity,"
57  "expressed relative to the maximum derivative of the nonlinearity,"
58  "i.e. 1.0 for tanh or 0.25 for sigmoid, 1.0 for rectified linear.");
59  opts->Register("max-average-deriv", &max_average_deriv, "Maximum derivative, "
60  "averaged over the training data, that we allow for the nonlinearity "
61  "associated with one neuron.");
62  opts->Register("parameter-factor", &parameter_factor, "Maximum factor by which we change "
63  "the set of parameters associated with a neuron.");
64  opts->Register("relu-bias-change", &relu_bias_change, "For ReLUs, change in bias when "
65  "we identify a component that's too frequently on or off.");
66  }
67 };
68 
69 void FixNnet(const NnetFixConfig &config, Nnet *nnet);
70 
71 } // namespace nnet2
72 } // namespace kaldi
73 
74 #endif // KALDI_NNET2_NNET_FIX_H_
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
Definition: chain.dox:20
void FixNnet(const NnetFixConfig &config, Nnet *nnet)
Definition: nnet-fix.cc:31
void Register(OptionsItf *opts)
Definition: nnet-fix.h:54
virtual void Register(const std::string &name, bool *ptr, const std::string &doc)=0