nnet-compute-prob.cc
Go to the documentation of this file.
1 // nnet2bin/nnet-compute-prob.cc
2 
3 // Copyright 2012 Johns Hopkins University (author: Daniel Povey)
4 
5 // See ../../COPYING for clarification regarding multiple authors
6 //
7 // Licensed under the Apache License, Version 2.0 (the "License");
8 // you may not use this file except in compliance with the License.
9 // You may obtain a copy of the License at
10 //
11 // http://www.apache.org/licenses/LICENSE-2.0
12 //
13 // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
15 // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
16 // MERCHANTABLITY OR NON-INFRINGEMENT.
17 // See the Apache 2 License for the specific language governing permissions and
18 // limitations under the License.
19 
20 #include "base/kaldi-common.h"
21 #include "util/common-utils.h"
22 #include "hmm/transition-model.h"
23 #include "nnet2/train-nnet.h"
24 #include "nnet2/am-nnet.h"
25 
26 
27 int main(int argc, char *argv[]) {
28  try {
29  using namespace kaldi;
30  using namespace kaldi::nnet2;
31  typedef kaldi::int32 int32;
32  typedef kaldi::int64 int64;
33 
34  const char *usage =
35  "Computes and prints the average log-prob per frame of the given data with a\n"
36  "neural net. The input of this is the output of e.g. nnet-get-egs\n"
37  "Aside from the logging output, which goes to the standard error, this program\n"
38  "prints the average log-prob per frame to the standard output.\n"
39  "Also see nnet-logprob, which produces a matrix of log-probs for each utterance.\n"
40  "\n"
41  "Usage: nnet-compute-prob [options] <model-in> <training-examples-in>\n"
42  "e.g.: nnet-compute-prob 1.nnet ark:valid.egs\n";
43 
44  ParseOptions po(usage);
45 
46  po.Read(argc, argv);
47 
48  if (po.NumArgs() != 2) {
49  po.PrintUsage();
50  exit(1);
51  }
52 
53  std::string nnet_rxfilename = po.GetArg(1),
54  examples_rspecifier = po.GetArg(2);
55 
56  TransitionModel trans_model;
57  AmNnet am_nnet;
58  {
59  bool binary_read;
60  Input ki(nnet_rxfilename, &binary_read);
61  trans_model.Read(ki.Stream(), binary_read);
62  am_nnet.Read(ki.Stream(), binary_read);
63  }
64 
65 
66  std::vector<NnetExample> examples;
67  double tot_weight = 0.0, tot_like = 0.0, tot_accuracy = 0.0;
68  int64 num_examples = 0;
69  SequentialNnetExampleReader example_reader(examples_rspecifier);
70  for (; !example_reader.Done(); example_reader.Next(), num_examples++) {
71  if (examples.size() == 1000) {
72  double accuracy;
73  tot_like += ComputeNnetObjf(am_nnet.GetNnet(), examples, &accuracy);
74  tot_accuracy += accuracy;
75  tot_weight += TotalNnetTrainingWeight(examples);
76  examples.clear();
77  }
78  examples.push_back(example_reader.Value());
79  if (num_examples % 5000 == 0 && num_examples > 0)
80  KALDI_LOG << "Saw " << num_examples << " examples, average "
81  << "probability is " << (tot_like / num_examples) << " with "
82  << "total weight " << num_examples;
83  }
84  if (!examples.empty()) {
85  double accuracy;
86  tot_like += ComputeNnetObjf(am_nnet.GetNnet(), examples, &accuracy);
87  tot_accuracy += accuracy;
88  tot_weight += TotalNnetTrainingWeight(examples);
89  }
90 
91  KALDI_LOG << "Saw " << num_examples << " examples, average "
92  << "probability is " << (tot_like / tot_weight)
93  << " and accuracy is " << (tot_accuracy / tot_weight) << " with "
94  << "total weight " << tot_weight;
95 
96  std::cout << (tot_like / tot_weight) << "\n";
97  return (num_examples == 0 ? 1 : 0);
98  } catch(const std::exception &e) {
99  std::cerr << e.what() << '\n';
100  return -1;
101  }
102 }
103 
104 
This code computes Goodness of Pronunciation (GOP) and extracts phone-level pronunciation feature for...
Definition: chain.dox:20
void PrintUsage(bool print_command_line=false)
Prints the usage documentation [provided in the constructor].
void Read(std::istream &is, bool binary)
Definition: am-nnet.cc:39
kaldi::int32 int32
double ComputeNnetObjf(const Nnet &nnet, const std::vector< NnetExample > &examples, double *tot_accuracy)
Computes objective function over a minibatch.
Definition: nnet-update.cc:258
int main(int argc, char *argv[])
std::istream & Stream()
Definition: kaldi-io.cc:826
The class ParseOptions is for parsing command-line options; see Parsing command-line options for more...
Definition: parse-options.h:36
void Read(std::istream &is, bool binary)
A templated class for reading objects sequentially from an archive or script file; see The Table conc...
Definition: kaldi-table.h:287
int Read(int argc, const char *const *argv)
Parses the command line options and fills the ParseOptions-registered variables.
std::string GetArg(int param) const
Returns one of the positional parameters; 1-based indexing for argc/argv compatibility.
BaseFloat TotalNnetTrainingWeight(const std::vector< NnetExample > &egs)
Returns the total weight summed over all the examples...
Definition: nnet-update.cc:248
int NumArgs() const
Number of positional parameters (c.f. argc-1).
#define KALDI_LOG
Definition: kaldi-error.h:153
const Nnet & GetNnet() const
Definition: am-nnet.h:61