gmm-global-init-from-feats.cc
Go to the documentation of this file.
1 // gmmbin/gmm-global-init-from-feats.cc
2 
3 // Copyright 2013 Johns Hopkins University (author: Daniel Povey)
4 
5 // See ../../COPYING for clarification regarding multiple authors
6 //
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 //
11 // http://www.apache.org/licenses/LICENSE-2.0
12 //
13 // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
15 // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
16 // MERCHANTABLITY OR NON-INFRINGEMENT.
17 // See the Apache 2 License for the specific language governing permissions and
18 // limitations under the License.
19 
20 
21 #include "base/kaldi-common.h"
22 #include "util/common-utils.h"
23 #include "gmm/model-common.h"
24 #include "gmm/full-gmm.h"
25 #include "gmm/diag-gmm.h"
26 #include "gmm/mle-full-gmm.h"
27 
28 namespace kaldi {
29 
30 // We initialize the GMM parameters by setting the variance to the global
31 // variance of the features, and the means to distinct randomly chosen frames.
33  int32 num_gauss = gmm->NumGauss(), num_frames = feats.NumRows(),
34  dim = feats.NumCols();
35  KALDI_ASSERT(num_frames >= 10 * num_gauss && "Too few frames to train on");
36  Vector<double> mean(dim), var(dim);
37  for (int32 i = 0; i < num_frames; i++) {
38  mean.AddVec(1.0 / num_frames, feats.Row(i));
39  var.AddVec2(1.0 / num_frames, feats.Row(i));
40  }
41  var.AddVec2(-1.0, mean);
42  if (var.Max() <= 0.0)
43  KALDI_ERR << "Features do not have positive variance " << var;
44 
45  DiagGmmNormal gmm_normal(*gmm);
46 
47  std::set<int32> used_frames;
48  for (int32 g = 0; g < num_gauss; g++) {
49  int32 random_frame = RandInt(0, num_frames - 1);
50  while (used_frames.count(random_frame) != 0)
51  random_frame = RandInt(0, num_frames - 1);
52  used_frames.insert(random_frame);
53  gmm_normal.weights_(g) = 1.0 / num_gauss;
54  gmm_normal.means_.Row(g).CopyFromVec(feats.Row(random_frame));
55  gmm_normal.vars_.Row(g).CopyFromVec(var);
56  }
57  gmm->CopyFromNormal(gmm_normal);
58  gmm->ComputeGconsts();
59 }
60 
61 void TrainOneIter(const Matrix<BaseFloat> &feats,
62  const MleDiagGmmOptions &gmm_opts,
63  int32 iter,
64  int32 num_threads,
65  DiagGmm *gmm) {
66  AccumDiagGmm gmm_acc(*gmm, kGmmAll);
67 
68  Vector<BaseFloat> frame_weights(feats.NumRows(), kUndefined);
69  frame_weights.Set(1.0);
70 
71  double tot_like;
72  tot_like = gmm_acc.AccumulateFromDiagMultiThreaded(*gmm, feats, frame_weights,
73  num_threads);
74 
75  KALDI_LOG << "Likelihood per frame on iteration " << iter
76  << " was " << (tot_like / feats.NumRows()) << " over "
77  << feats.NumRows() << " frames.";
78 
79  BaseFloat objf_change, count;
80  MleDiagGmmUpdate(gmm_opts, gmm_acc, kGmmAll, gmm, &objf_change, &count);
81 
82  KALDI_LOG << "Objective-function change on iteration " << iter << " was "
83  << (objf_change / count) << " over " << count << " frames.";
84 }
85 
86 } // namespace kaldi
87 
88 int main(int argc, char *argv[]) {
89  try {
90  using namespace kaldi;
91 
92  const char *usage =
93  "This program initializes a single diagonal GMM and does multiple iterations of\n"
94  "training from features stored in memory.\n"
95  "Usage: gmm-global-init-from-feats [options] <feature-rspecifier> <model-out>\n"
96  "e.g.: gmm-global-init-from-feats scp:train.scp 1.mdl\n";
97 
98  ParseOptions po(usage);
99  MleDiagGmmOptions gmm_opts;
100 
101  bool binary = true;
102  int32 num_gauss = 100;
103  int32 num_gauss_init = 0;
104  int32 num_iters = 50;
105  int32 num_frames = 200000;
106  int32 srand_seed = 0;
107  int32 num_threads = 4;
108 
109  po.Register("binary", &binary, "Write output in binary mode");
110  po.Register("num-gauss", &num_gauss, "Number of Gaussians in the model");
111  po.Register("num-gauss-init", &num_gauss_init, "Number of Gaussians in "
112  "the model initially (if nonzero and less than num_gauss, "
113  "we'll do mixture splitting)");
114  po.Register("num-iters", &num_iters, "Number of iterations of training");
115  po.Register("num-frames", &num_frames, "Number of feature vectors to store in "
116  "memory and train on (randomly chosen from the input features)");
117  po.Register("srand", &srand_seed, "Seed for random number generator ");
118  po.Register("num-threads", &num_threads, "Number of threads used for "
119  "statistics accumulation");
120 
121  gmm_opts.Register(&po);
122 
123  po.Read(argc, argv);
124 
125  srand(srand_seed);
126 
127  if (po.NumArgs() != 2) {
128  po.PrintUsage();
129  exit(1);
130  }
131 
132  std::string feature_rspecifier = po.GetArg(1),
133  model_wxfilename = po.GetArg(2);
134 
135  Matrix<BaseFloat> feats;
136 
137  SequentialBaseFloatMatrixReader feature_reader(feature_rspecifier);
138 
139 
140  KALDI_ASSERT(num_frames > 0);
141 
142  int64 num_read = 0, dim = 0;
143 
144  KALDI_LOG << "Reading features (will keep " << num_frames << " frames.)";
145 
146  for (; !feature_reader.Done(); feature_reader.Next()) {
147  const Matrix<BaseFloat> &this_feats = feature_reader.Value();
148  for (int32 t = 0; t < this_feats.NumRows(); t++) {
149  num_read++;
150  if (dim == 0) {
151  dim = this_feats.NumCols();
152  feats.Resize(num_frames, dim);
153  } else if (this_feats.NumCols() != dim) {
154  KALDI_ERR << "Features have inconsistent dims "
155  << this_feats.NumCols() << " vs. " << dim
156  << " (current utt is) " << feature_reader.Key();
157  }
158  if (num_read <= num_frames) {
159  feats.Row(num_read - 1).CopyFromVec(this_feats.Row(t));
160  } else {
161  BaseFloat keep_prob = num_frames / static_cast<BaseFloat>(num_read);
162  if (WithProb(keep_prob)) { // With probability "keep_prob"
163  feats.Row(RandInt(0, num_frames - 1)).CopyFromVec(this_feats.Row(t));
164  }
165  }
166  }
167  }
168 
169  if (num_read < num_frames) {
170  KALDI_WARN << "Number of frames read " << num_read << " was less than "
171  << "target number " << num_frames << ", using all we read.";
172  feats.Resize(num_read, dim, kCopyData);
173  } else {
174  BaseFloat percent = num_frames * 100.0 / num_read;
175  KALDI_LOG << "Kept " << num_frames << " out of " << num_read
176  << " input frames = " << percent << "%.";
177  }
178 
179  if (num_gauss_init <= 0 || num_gauss_init > num_gauss)
180  num_gauss_init = num_gauss;
181 
182  DiagGmm gmm(num_gauss_init, dim);
183 
184  KALDI_LOG << "Initializing GMM means from random frames to "
185  << num_gauss_init << " Gaussians.";
186  InitGmmFromRandomFrames(feats, &gmm);
187 
188  // we'll increase the #Gaussians by splitting,
189  // till halfway through training.
190  int32 cur_num_gauss = num_gauss_init,
191  gauss_inc = (num_gauss - num_gauss_init) / (num_iters / 2);
192 
193  for (int32 iter = 0; iter < num_iters; iter++) {
194  TrainOneIter(feats, gmm_opts, iter, num_threads, &gmm);
195 
196  int32 next_num_gauss = std::min(num_gauss, cur_num_gauss + gauss_inc);
197  if (next_num_gauss > gmm.NumGauss()) {
198  KALDI_LOG << "Splitting to " << next_num_gauss << " Gaussians.";
199  gmm.Split(next_num_gauss, 0.1);
200  cur_num_gauss = next_num_gauss;
201  }
202  }
203 
204  WriteKaldiObject(gmm, model_wxfilename, binary);
205  KALDI_LOG << "Wrote model to " << model_wxfilename;
206  return 0;
207  } catch(const std::exception &e) {
208  std::cerr << e.what();
209  return -1;
210  }
211 }
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
Definition: chain.dox:20
BaseFloat AccumulateFromDiagMultiThreaded(const DiagGmm &gmm, const MatrixBase< BaseFloat > &data, const VectorBase< BaseFloat > &frame_weights, int32 num_threads)
This does the same job as AccumulateFromDiag, but using multiple threads.
void Split(int32 target_components, float perturb_factor, std::vector< int32 > *history=NULL)
Split the components and remember the order in which the components were split.
Definition: diag-gmm.cc:154
Definition for Gaussian Mixture Model with diagonal covariances in normal mode: where the parameters ...
MatrixIndexT NumCols() const
Returns number of columns (or zero for empty matrix).
Definition: kaldi-matrix.h:67
void MleDiagGmmUpdate(const MleDiagGmmOptions &config, const AccumDiagGmm &diag_gmm_acc, GmmFlagsType flags, DiagGmm *gmm, BaseFloat *obj_change_out, BaseFloat *count_out, int32 *floored_elements_out, int32 *floored_gaussians_out, int32 *removed_gaussians_out)
for computing the maximum-likelihood estimates of the parameters of a Gaussian mixture model...
void PrintUsage(bool print_command_line=false)
Prints the usage documentation [provided in the constructor].
bool WithProb(BaseFloat prob, struct RandomState *state)
Definition: kaldi-math.cc:72
int32 ComputeGconsts()
Sets the gconsts.
Definition: diag-gmm.cc:114
kaldi::int32 int32
int main(int argc, char *argv[])
void Register(const std::string &name, bool *ptr, const std::string &doc)
void TrainOneIter(const Matrix< BaseFloat > &feats, const MleDiagGmmOptions &gmm_opts, int32 iter, int32 num_threads, DiagGmm *gmm)
void AddVec2(const Real alpha, const VectorBase< Real > &v)
Add vector : *this = *this + alpha * rv^2 [element-wise squaring].
const size_t count
float BaseFloat
Definition: kaldi-types.h:29
The class ParseOptions is for parsing command-line options; see Parsing command-line options for more...
Definition: parse-options.h:36
const SubVector< Real > Row(MatrixIndexT i) const
Return specific row of matrix [const].
Definition: kaldi-matrix.h:188
void Register(OptionsItf *opts)
Definition: mle-diag-gmm.h:59
A templated class for reading objects sequentially from an archive or script file; see The Table conc...
Definition: kaldi-table.h:287
int Read(int argc, const char *const *argv)
Parses the command line options and fills the ParseOptions-registered variables.
#define KALDI_ERR
Definition: kaldi-error.h:147
Real Max() const
Returns the maximum value of any element, or -infinity for the empty vector.
#define KALDI_WARN
Definition: kaldi-error.h:150
std::string GetArg(int param) const
Returns one of the positional parameters; 1-based indexing for argc/argv compatibility.
Matrix< double > vars_
diagonal variance
int32 NumGauss() const
Returns the number of mixture components in the GMM.
Definition: diag-gmm.h:72
Configuration variables like variance floor, minimum occupancy, etc.
Definition: mle-diag-gmm.h:38
int NumArgs() const
Number of positional parameters (c.f. argc-1).
Matrix< double > means_
Means.
#define KALDI_ASSERT(cond)
Definition: kaldi-error.h:185
MatrixIndexT NumRows() const
Returns number of rows (or zero for empty matrix).
Definition: kaldi-matrix.h:64
void Set(Real f)
Set all members of a vector to a specified value.
void InitGmmFromRandomFrames(const Matrix< BaseFloat > &feats, DiagGmm *gmm)
Definition for Gaussian Mixture Model with diagonal covariances.
Definition: diag-gmm.h:42
void WriteKaldiObject(const C &c, const std::string &filename, bool binary)
Definition: kaldi-io.h:257
void Resize(const MatrixIndexT r, const MatrixIndexT c, MatrixResizeType resize_type=kSetZero, MatrixStrideType stride_type=kDefaultStride)
Sets matrix to a specified size (zero is OK as long as both r and c are zero).
Vector< double > weights_
weights (not log).
void CopyFromNormal(const DiagGmmNormal &diag_gmm_normal)
Copies from DiagGmmNormal; does not resize.
Definition: diag-gmm.cc:918
#define KALDI_LOG
Definition: kaldi-error.h:153
int32 RandInt(int32 min_val, int32 max_val, struct RandomState *state)
Definition: kaldi-math.cc:95