34 nnet_config_(nnet_config),
35 discriminative_config_(discriminative_config),
39 compiler_(nnet, nnet_config_.optimize_config),
41 num_minibatches_processed_(0) {
52 KALDI_ERR <<
"GetDeriv() called when no derivatives were requested.";
71 store_component_stats =
false;
73 use_xent_derivative =
false;
77 need_model_derivative,
78 store_component_stats,
79 use_xent_regularization, use_xent_derivative,
81 std::shared_ptr<const NnetComputation> computation =
compiler_.
Compile(request);
97 std::vector<NnetDiscriminativeSupervision>::const_iterator iter = eg.
outputs.begin(),
99 for (; iter != end; ++iter) {
102 if (node_index < 0 ||
109 std::string xent_name = sup.
name +
"-xent";
113 nnet_output_deriv.
Resize(nnet_output.NumRows(), nnet_output.NumCols(),
117 xent_deriv.
Resize(nnet_output.NumRows(), nnet_output.NumCols(),
131 &nnet_output_deriv : NULL),
132 (use_xent ? &xent_deriv : NULL));
159 unordered_map<std::string, discriminative::DiscriminativeObjectiveInfo, StringHasher>::const_iterator
163 for (; iter != end; ++iter) {
164 const std::string &name = iter->first;
176 <<
" objective for '" 178 << (tot_objective / tot_weight)
180 <<
"over " << tot_weight <<
" frames.";
183 <<
" objective for '" 185 << (tot_objective / tot_weight)
188 <<
"over " << tot_weight <<
" frames.";
198 const std::string &output_name)
const {
199 unordered_map<std::string, discriminative::DiscriminativeObjectiveInfo, StringHasher>::const_iterator
202 return &(iter->second);
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
bool PrintTotalStats() const
void ScaleNnet(BaseFloat scale, Nnet *nnet)
Scales the nnet parameters and stats by this scale.
unordered_map< std::string, discriminative::DiscriminativeObjectiveInfo, StringHasher > objf_info_
~NnetDiscriminativeComputeObjf()
const TransitionModel & tmodel_
void PrintAll(const std::string &criterion) const
This class represents a matrix that's stored on the GPU if we have one, and in memory if not...
This file contains some miscellaneous functions dealing with class Nnet.
void SetNnetAsGradient(Nnet *nnet)
Sets nnet as gradient by Setting is_gradient_ to true and learning_rate_ to 1 for each UpdatableCompo...
const discriminative::DiscriminativeObjectiveInfo * GetObjective(const std::string &output_name) const
void AcceptInput(const std::string &node_name, CuMatrix< BaseFloat > *input)
e.g.
int32 num_minibatches_processed_
double TotalObjf(const std::string &criterion) const
const CuMatrixBase< BaseFloat > & GetOutput(const std::string &node_name)
bool IsOutputNode(int32 node) const
Returns true if this is an output node, meaning that it is of type kDescriptor and is not directly fo...
CuVector< BaseFloat > log_priors_
void AcceptInputs(const Nnet &nnet, const std::vector< NnetIo > &io)
This convenience function calls AcceptInput() in turn on all the inputs in the training example...
BaseFloat xent_regularize
NnetDiscriminativeComputeObjf(const NnetComputeProbOptions &nnet_config, const discriminative::DiscriminativeOptions &discriminative_config, const TransitionModel &tmodel, const VectorBase< BaseFloat > &priors, const Nnet &nnet)
void ComputeDiscriminativeObjfAndDeriv(const DiscriminativeOptions &opts, const TransitionModel &tmodel, const CuVectorBase< BaseFloat > &log_priors, const DiscriminativeSupervision &supervision, const CuMatrixBase< BaseFloat > &nnet_output, DiscriminativeObjectiveInfo *stats, CuMatrixBase< BaseFloat > *nnet_output_deriv, CuMatrixBase< BaseFloat > *xent_output_deriv)
This function does forward-backward on the numerator and denominator lattices and computes derivates ...
discriminative::DiscriminativeOptions discriminative_config_
Real TraceMatMat(const MatrixBase< Real > &A, const MatrixBase< Real > &B, MatrixTransposeType trans)
We need to declare this here as it will be a friend function.
std::shared_ptr< const NnetComputation > Compile(const ComputationRequest &request)
Does the compilation and returns a const pointer to the result, which is owned by this class...
NnetComputeOptions compute_config
Matrix for CUDA computing.
NnetComputeProbOptions nnet_config_
class NnetComputer is responsible for executing the computation described in the "computation" object...
#define KALDI_ASSERT(cond)
std::vector< NnetIo > inputs
'inputs' contains the input to the network– normally just it has just one element called "input"...
std::vector< NnetDiscriminativeSupervision > outputs
'outputs' contains the sequence output supervision.
void Compute(const NnetDiscriminativeExample &eg)
void GetDiscriminativeComputationRequest(const Nnet &nnet, const NnetDiscriminativeExample &eg, bool need_model_derivative, bool store_component_stats, bool use_xent_regularization, bool use_xent_derivative, ComputationRequest *request)
This function takes a NnetDiscriminativeExample and produces a ComputationRequest.
int32 GetNodeIndex(const std::string &node_name) const
returns index associated with this node name, or -1 if no such index.
Provides a vector abstraction class.
discriminative::DiscriminativeSupervision supervision
CachingOptimizingCompiler compiler_
const Nnet & GetDeriv() const
void Resize(MatrixIndexT rows, MatrixIndexT cols, MatrixResizeType resize_type=kSetZero, MatrixStrideType stride_type=kDefaultStride)
Allocate the memory.
NnetDiscriminativeExample is like NnetExample, but specialized for sequence training.
void ProcessOutputs(const NnetDiscriminativeExample &eg, NnetComputer *computer)
void Run()
This does either the forward or backward computation, depending when it is called (in a typical compu...