21 #ifndef KALDI_NNET3_NNET_OPTIMIZE_H_    22 #define KALDI_NNET3_NNET_OPTIMIZE_H_    64       consolidate_model_update(true),
    65       propagate_in_place(true),
    66       backprop_in_place(true),
    67       optimize_row_ops(true),
    69       extend_matrices(true),
    70       convert_addition(true),
    71       remove_assignments(true),
    72       allow_left_merge(true),
    73       allow_right_merge(true),
    74       initialize_undefined(true),
    75       move_sizing_commands(true),
    76       allocate_from_other(true),
    77       min_deriv_time(std::numeric_limits<
int32>::min()),
    78       max_deriv_time(std::numeric_limits<
int32>::max()),
    79       max_deriv_time_relative(std::numeric_limits<
int32>::max()),
    81       memory_compression_level(1),
    82       optimize_looped_computation(false) { }
    85     opts->
Register(
"optimize", &optimize, 
"Set this to false to turn off all "    87     opts->
Register(
"consolidate-model-update", &consolidate_model_update,
    88                    "Set to false to disable optimization that consolidates "    89                    "the model-update phase of backprop (e.g. for recurrent "    91     opts->
Register(
"propagate-in-place", &propagate_in_place, 
"Set to false to "    92                    "disable optimization that allows in-place propagation");
    93     opts->
Register(
"backprop-in-place", &backprop_in_place, 
"Set to false to "    94                    "disable optimization that allows in-place backprop");
    95     opts->
Register(
"extend-matrices", &extend_matrices, 
"This optimization "    96                    "can reduce memory requirements for TDNNs when applied "    97                    "together with --convert-addition=true");
    98     opts->
Register(
"optimize-row-ops", &optimize_row_ops, 
"Set to false to "    99                    "disable certain optimizations that act on operations of "   101     opts->
Register(
"split-row-ops", &split_row_ops, 
"Set to false to disable "   102                    "an optimization that may replace some operations of type "   103                    "kCopyRowsMulti or kAddRowsMulti with up to two simpler "   105     opts->
Register(
"convert-addition", &convert_addition, 
"Set to false to "   106                    "disable the optimization that converts Add commands into "   107                    "Copy commands wherever possible.");
   108     opts->
Register(
"remove-assignments", &remove_assignments, 
"Set to false to "   109                    "disable optimization that removes redundant assignments");
   110     opts->
Register(
"allow-left-merge", &allow_left_merge, 
"Set to false to "   111                    "disable left-merging of variables in remove-assignments "   113     opts->
Register(
"allow-right-merge", &allow_right_merge, 
"Set to false to "   114                    "disable right-merging of variables in remove-assignments "   116     opts->
Register(
"initialize-undefined", &initialize_undefined, 
"Set to false "   117                    "to disable optimization that avoids redundant zeroing");
   118     opts->
Register(
"move-sizing-commands", &move_sizing_commands, 
"Set to false "   119                    "to disable optimization that moves matrix allocation and "   120                    "deallocation commands to conserve memory.");
   121     opts->
Register(
"allocate-from-other", &allocate_from_other, 
"Instead of "   122                    "deleting a matrix of a given size and then allocating "   123                    "a matrix of the same size, allow re-use of that memory");
   124     opts->
Register(
"min-deriv-time", &min_deriv_time, 
"You can set this to "   125                    "the minimum t value that you want derivatives to be computed "   126                    "at when updating the model.  This is an optimization that "   127                    "saves time in the backprop phase for recurrent frameworks");
   128     opts->
Register(
"max-deriv-time", &max_deriv_time, 
"You can set this to "   129                    "the maximum t value that you want derivatives to be computed "   130                    "at when updating the model.  This is an optimization that "   131                    "saves time in the backprop phase for recurrent frameworks");
   132     opts->
Register(
"max-deriv-time-relative", &max_deriv_time_relative,
   133                    "An alternative mechanism for setting the --max-deriv-time, "   134                    "suitable for situations where the length of the egs is "   135                    "variable.  If set, it is equivalent to setting the "   136                    "--max-deriv-time to this value plus the largest 't' value "   137                    "in any 'output' node of the computation request.");
   138     opts->
Register(
"snip-row-ops", &snip_row_ops, 
"Set this to false to "   139                    "disable an optimization that reduces the size of certain "   140                    "per-row operations");
   141     opts->
Register(
"memory-compression-level", &memory_compression_level,
   142                    "This is only relevant to training, not decoding.  Set this "   143                    "to 0,1,2; higher levels are more aggressive at reducing "   144                    "memory by compressing quantities needed for backprop, "   145                    "potentially at the expense of speed and the accuracy "   146                    "of derivatives.  0 means no compression at all; 1 means "   147                    "compression that shouldn't affect results at all.");
   150   void Read(std::istream &is, 
bool binary);
   151   void Write(std::ostream &os, 
bool binary) 
const;
   187               int32 max_output_time_in_request,
   198       cache_capacity(64) { }
   201     opts->
Register(
"use-shortcut", &use_shortcut,
   202                    "If true, use the 'shortcut' in compilation whereby "   203                    "computation requests with regular structure are identified "   204                    "as such, a computation with a smaller number of distinct "   205                    "values of 'n' is compiled (e.g. 2), and the compiled "   206                    "computation is expanded to match the size of the real "   207                    "computation request.");
   208     opts->
Register(
"cache-capacity", &cache_capacity,
   209                    "Determines how many computations the computation-cache will "   210                    "store (most-recently-used).");
   240   std::shared_ptr<const NnetComputation> 
Compile(
   242   void ReadCache(std::istream &is, 
bool binary);
   243   void WriteCache(std::ostream &os, 
bool binary);
   252   void GetSimpleNnetContext(
int32 *nnet_left_context,
   253                             int32 *nnet_right_context);
   260   std::shared_ptr<const NnetComputation> CompileInternal(
const ComputationRequest &request);
   267   std::shared_ptr<const NnetComputation> CompileAndCache(
const ComputationRequest &request);
 double seconds_taken_check_
 
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
 
void Register(OptionsItf *opts)
 
CachingOptimizingCompilerOptions()
 
void Read(std::istream &is, bool binary)
 
int32 max_deriv_time_relative
 
void ConsolidateIoOperations(const Nnet &nnet, NnetComputation *computation)
This optimization puts the input operations (kAcceptInput) and output operations (kProvideOutput) at ...
 
void Write(std::ostream &os, bool binary) const
 
ArpaLmCompiler * Compile(bool seps, const std::string &infile)
 
bool move_sizing_commands
 
This class enables you to do the compilation and optimization in one call, and also ensures that if t...
 
void Register(OptionsItf *opts)
 
void VariableMergingOptimization(const NnetOptimizeOptions &config, const Nnet &nnet, NnetComputation *computation)
This wraps class VariableMergingOptimizer in a simplified interface. 
 
This file contains utilities for analyzing and checking computations, which are used in the optimizat...
 
void ConvertAdditionToAssignment(const Nnet &nnet, NnetComputation *computation)
This converts addition operations (things with Add in their names) to copy operations (things with Co...
 
void LimitDerivativeTimes(const Nnet &nnet, int32 min_deriv_time, int32 max_deriv_time, NnetComputation *computation)
 
virtual void Register(const std::string &name, bool *ptr, const std::string &doc)=0
 
bool optimize_looped_computation
 
void MoveSizingCommands(const Nnet &nnet, NnetComputation *computation)
This optimization moves commands that allocate and zero matrices to as late as possible, and moves commands that deallocate matrices to as early as possible. 
 
double seconds_taken_expand_
 
bool consolidate_model_update
 
bool operator==(const NnetOptimizeOptions &other) const
 
double seconds_taken_total_
 
int32 MaxOutputTimeInRequest(const ComputationRequest &request)
 
void RemoveUnnecessaryAllocation(const Nnet &nnet, NnetComputation *computation)
This optimization detects cases where we deallocate a matrix, and then later allocate another matrix ...
 
void RemoveUnnecessaryZeroing(const Nnet &nnet, NnetComputation *computation)
This optimization function removes, where possible, commands of type type kSetConst. 
 
int32 memory_compression_level
 
double seconds_taken_compile_
 
double seconds_taken_optimize_
 
void Optimize(const NnetOptimizeOptions &config, const Nnet &nnet, int32 max_output_time_in_request, NnetComputation *computation)
This is the top-level function for optimizing a computation. 
 
void ConsolidateModelUpdate(const Nnet &nnet, NnetComputation *computation)
This optimization consolidates the model-update part of backprop commands, for components in (e...
 
int32 nnet_right_context_
 
double seconds_taken_indexes_
 
NnetOptimizeOptions opt_config_
 
Class ComputationCache is used inside class CachingOptimizingCompiler to cache previously computed co...
 
bool initialize_undefined
 
CachingOptimizingCompilerOptions config_