forked from k2-fsa/icefall
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcompute_ppl.py
executable file
·109 lines (86 loc) · 2.8 KB
/
compute_ppl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/env python3
#
# Copyright 2023 Xiaomi Corp. (Author: Yifan Yang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
./pruned_transducer_stateless7/compute_ppl.py \
--ngram-lm-path ./download/lm/3gram_pruned_1e7.arpa
"""
import argparse
import logging
import math
from typing import Dict, List, Optional, Tuple
import kenlm
import torch
from asr_datamodule import GigaSpeechAsrDataModule
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--ngram-lm-path",
type=str,
default="download/lm/3gram_pruned_1e7.arpa",
help="The lang dir containing word table and LG graph",
)
return parser
def decode_dataset(
dl: torch.utils.data.DataLoader,
model: kenlm.Model,
) -> Dict[str, float]:
"""
Args:
dl:
PyTorch's dataloader containing the dataset to decode.
model:
A ngram lm of kenlm.Model object.
Returns:
Return the perplexity of the giving dataset.
"""
sum_score_log = 0
sum_n = 0
for batch_idx, batch in enumerate(dl):
texts = batch["supervisions"]["text"]
for text in texts:
sum_n += len(text.split()) + 1
sum_score_log += -1 * model.score(text)
ppl = math.pow(10.0, sum_score_log / sum_n)
return ppl
def main():
parser = get_parser()
GigaSpeechAsrDataModule.add_arguments(parser)
args = parser.parse_args()
logging.info("About to load ngram LM")
model = kenlm.Model(args.ngram_lm_path)
gigaspeech = GigaSpeechAsrDataModule(args)
dev_cuts = gigaspeech.dev_cuts()
test_cuts = gigaspeech.test_cuts()
dev_dl = gigaspeech.test_dataloaders(dev_cuts)
test_dl = gigaspeech.test_dataloaders(test_cuts)
test_sets = ["dev", "test"]
test_dls = [dev_dl, test_dl]
for test_set, test_dl in zip(test_sets, test_dls):
ppl = decode_dataset(
dl=test_dl,
model=model,
)
logging.info(f"{test_set} PPL: {ppl}")
logging.info("Done!")
if __name__ == "__main__":
formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
logging.basicConfig(format=formatter, level=logging.INFO)
main()