21 #ifndef KALDI_NNET_NNET_ACTIVATION_H_    22 #define KALDI_NNET_NNET_ACTIVATION_H_   103     diag_out_diff_out_.Resize(out.
NumRows());
   104     diag_out_diff_out_.AddDiagMatMat(1.0, out_diff, 
kNoTrans, out, 
kTrans, 0.0);
   129     while (is >> std::ws, !is.eof()) {
   131        if (token == 
"<BlockDims>") is >> dims_str;
   132       else KALDI_ERR << 
"Unknown token " << token << 
", a typo in config?"   137       KALDI_ERR << 
"Invalid block-dims " << dims_str;
   140     for (
int32 i = 0; 
i < block_dims.size(); 
i++) {
   141       sum += block_dims[
i];
   148     block_offset.resize(block_dims.size()+1, 0);
   149     for (
int32 i = 0; 
i < block_dims.size(); 
i++) {
   150       block_offset[
i+1] = block_offset[
i] + block_dims[
i];
   163     for (
int32 bl = 0; bl < block_dims.size(); bl++) {
   166         in.
ColRange(block_offset[bl], block_dims[bl]);
   168         out->
ColRange(block_offset[bl], block_dims[bl]);
   184     for (
int32 bl = 0; bl < block_dims.size(); bl++) {
   187         in_diff->
ColRange(block_offset[bl], block_dims[bl]);
   193       row_diff_mask.
Scale(-1.0);  
   194       row_diff_mask.
Add(1.0);  
   201     return "\n  softmax-dims " + 
ToString(block_dims);
   286     while (is >> std::ws, !is.eof()) {
   288        if (token == 
"<DropoutRate>") 
ReadBasicType(is, 
false, &dropout_rate_);
   289       else KALDI_ERR << 
"Unknown token " << token << 
", a typo in config?"   292     KALDI_ASSERT(dropout_rate_ >= 0.0 && dropout_rate_ < 1.0);
   297     bool finished = 
false;
   298     while (
'<' == 
Peek(is, binary) && !finished) {
   301       switch (first_char) {
   303            if (token == 
"<DropoutRate>") 
ReadBasicType(is, binary, &dropout_rate_);
   304           else if (token == 
"<DropoutRetention>") { 
   307             dropout_rate_ = 1.0 - dropout_retention;
   308           } 
else KALDI_ERR << 
"Unknown token: " << token;
   310         case '!': 
ExpectToken(is, binary, 
"<!EndOfComponent>");
   317     KALDI_ASSERT(dropout_rate_ >= 0.0 && dropout_rate_ < 1.0);
   326     return std::string(
"<DropoutRate> ") + 
ToString(dropout_rate_);
   334     rand_.RandUniform(&dropout_mask_);  
   335     dropout_mask_.Add(-dropout_rate_);  
   336     dropout_mask_.Heaviside(dropout_mask_); 
   339     out->
Scale(1.0 / (1.0 - dropout_rate_));
   350     in_diff->
Scale(1.0 / (1.0 - dropout_rate_));
   357     KALDI_ASSERT(dropout_rate_ >= 0.0 && dropout_rate_ < 1.0);
   372 #endif  // KALDI_NNET_NNET_ACTIVATION_H_ std::string ToString(const T &t)
Convert basic type to a string (please don't overuse),. 
 
ComponentType GetType() const
Get Type Identification of the component,. 
 
void CopyFromMat(const MatrixBase< OtherReal > &src, MatrixTransposeType trans=kNoTrans)
 
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
 
std::string Info() const
Print some additional info (after <ComponentName> and the dims),. 
 
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...) 
 
void WriteData(std::ostream &os, bool binary) const
Writes the component content. 
 
Component * Copy() const
Copy component (deep copy),. 
 
Component * Copy() const
Copy component (deep copy),. 
 
void InitData(std::istream &is)
Virtual interface for initialization and I/O,. 
 
Component * Copy() const
Copy component (deep copy),. 
 
Tanh(int32 dim_in, int32 dim_out)
 
BaseFloat GetDropoutRate()
 
ComponentType GetType() const
Get Type Identification of the component,. 
 
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...) 
 
void ReadBasicType(std::istream &is, bool binary, T *t)
ReadBasicType is the name of the read function for bool, integer types, and floating-point types...
 
bool SplitStringToIntegers(const std::string &full, const char *delim, bool omit_empty_strings, std::vector< I > *out)
Split a string (e.g. 
 
void InitData(std::istream &is)
Virtual interface for initialization and I/O,. 
 
CuRand< BaseFloat > rand_
generator of random numbers, 
 
Sigmoid(int32 dim_in, int32 dim_out)
 
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...) 
 
std::string Info() const
Print some additional info (after <ComponentName> and the dims),. 
 
CuMatrix< BaseFloat > dropout_mask_
 
void ReadToken(std::istream &is, bool binary, std::string *str)
ReadToken gets the next token and puts it in str (exception on failure). 
 
This class represents a matrix that's stored on the GPU if we have one, and in memory if not...
 
std::vector< int32 > block_offset
 
Component * Copy() const
Copy component (deep copy),. 
 
BlockSoftmax(int32 dim_in, int32 dim_out)
 
int Peek(std::istream &is, bool binary)
Peek consumes whitespace (if binary == false) and then returns the peek() value of the stream...
 
Component * Copy() const
Copy component (deep copy),. 
 
ComponentType
Component type identification mechanism,. 
 
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...) 
 
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation. 
 
void SetDropoutRate(BaseFloat dr)
 
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation. 
 
void ReadIntegerVector(std::istream &is, bool binary, std::vector< T > *v)
Function for reading STL vector of integer types. 
 
Softmax(int32 dim_in, int32 dim_out)
 
void AddColSumMat(Real alpha, const CuMatrixBase< Real > &mat, Real beta=1.0)
Sum the columns of the matrix, add to vector. 
 
void Sigmoid(const CuMatrixBase< Real > &src)
Set each element to the sigmoid of the corresponding element of "src": element by element...
 
BaseFloat dropout_rate_
probability that a neuron is dropped, 
 
void ExpectToken(std::istream &is, bool binary, const char *token)
ExpectToken tries to read in the given token, and throws an exception on failure. ...
 
void SoftMaxPerRow(const CuMatrixBase< Real > &src)
Softmax nonlinearity Y = Softmax(X) : Yij = e^Xij / sum_k(e^Xik), done to each row, with attention to avoiding overflow or underflow. 
 
void MulElements(const CuMatrixBase< Real > &A)
Multiply two matrices elementwise: C = C .* A. 
 
ComponentType GetType() const
Get Type Identification of the component,. 
 
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...) 
 
ComponentType GetType() const
Get Type Identification of the component,. 
 
This class is used for a piece of a CuMatrix. 
 
void WriteData(std::ostream &os, bool binary) const
Writes the component content. 
 
void WriteToken(std::ostream &os, bool binary, const char *token)
The WriteToken functions are for writing nonempty sequences of non-space characters. 
 
void BackpropagateFnc(const CuMatrixBase< BaseFloat > &in, const CuMatrixBase< BaseFloat > &out, const CuMatrixBase< BaseFloat > &out_diff, CuMatrixBase< BaseFloat > *in_diff)
Backward pass transformation (to be implemented by descending class...) 
 
int PeekToken(std::istream &is, bool binary)
PeekToken will return the first character of the next token, or -1 if end of file. 
 
ComponentType GetType() const
Get Type Identification of the component,. 
 
Component * Copy() const
Copy component (deep copy),. 
 
ComponentType GetType() const
Get Type Identification of the component,. 
 
void DiffTanh(const CuMatrixBase< Real > &value, const CuMatrixBase< Real > &diff)
Differentiate backward through the tanh function. 
 
std::vector< int32 > block_dims
 
void DiffSigmoid(const CuMatrixBase< Real > &value, const CuMatrixBase< Real > &diff)
Differentiate backward through the sigmoid function. 
 
CuSubMatrix< Real > ColRange(const MatrixIndexT col_offset, const MatrixIndexT num_cols) const
 
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation. 
 
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation. 
 
Matrix for CUDA computing. 
 
MatrixIndexT NumCols() const
 
Dropout(int32 dim_in, int32 dim_out)
 
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation. 
 
#define KALDI_ASSERT(cond)
 
CuVector< BaseFloat > diag_out_diff_out_
buffer for dot-products in BackpropagateFnc, 
 
void WriteIntegerVector(std::ostream &os, bool binary, const std::vector< T > &v)
Function for writing STL vectors of integer types. 
 
void ReadData(std::istream &is, bool binary)
Reads the component content. 
 
void WriteBasicType(std::ostream &os, bool binary, T t)
WriteBasicType is the name of the write function for bool, integer types, and floating-point types...
 
Abstract class, building block of the network. 
 
HiddenSoftmax(int32 dim_in, int32 dim_out)
 
int32 OutputDim() const
Get the dimension of the output,. 
 
void PropagateFnc(const CuMatrixBase< BaseFloat > &in, CuMatrixBase< BaseFloat > *out)
Abstract interface for propagation/backpropagation. 
 
MatrixIndexT NumRows() const
Dimensions. 
 
void ReadData(std::istream &is, bool binary)
Reads the component content. 
 
void Tanh(const CuMatrixBase< Real > &src)
Compute the hyperbolic tangent (tanh) function; element by element, *this = tanh(src). 
 
void MulRowsVec(const CuVectorBase< Real > &scale)
scale i'th row by scale[i] 
 
void AddDiagVecMat(const Real alpha, const CuVectorBase< Real > &v, const CuMatrixBase< Real > &M, MatrixTransposeType transM, Real beta=1.0)
*this = beta * *this + alpha * diag(v) * M [or M^T].