This repository has been archived by the owner on Jun 28, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 195
/
Copy pathhistory.sh
executable file
·214 lines (177 loc) · 6.19 KB
/
history.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
#!/usr/bin/env bash
#
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# Extract Jenkins metrics server historic data.
# Useful when re-setting the checkmetrics baseline data.
set -e
export KATA_HYPERVISOR="${KATA_HYPERVISOR:-qemu}"
# Base dir of where we store the downloaded data
datadir=$(dirname "$0")/data
# How many recent builds do we evaluate
NUM_BUILDS=5
# What is the default set of repos (Jenkins jobs) we evaluate
default_repos=()
default_repos+=("kata-containers-2.0-metrics-ubuntu-20-04-PR")
default_repos+=("kata-containers-2.0-tests-metrics-ubuntu-20-04-PR")
repos=()
# What test results do we evaluate for each build
tests=()
test_queries=()
tests+=("boot-times")
test_queries+=(".\"boot-times\".Results | [.[] | .\"to-workload\".Result] | add / length")
tests+=("blogbench")
test_queries+=(".\"blogbench\".Results | .[] | .write.Result")
tests+=("blogbench")
test_queries+=(".\"blogbench\".Results | .[] | .read.Result")
tests+=("memory-footprint")
test_queries+=(".\"memory-footprint\".Results | .[] | .average.Result")
tests+=("memory-footprint-ksm")
test_queries+=(".\"memory-footprint-ksm\".Results | .[] | .average.Result")
tests+=("memory-footprint-inside-container")
test_queries+=(".\"memory-footprint-inside-container\".Results | .[] | .memtotal.Result")
if [ "${KATA_HYPERVISOR}" == "cloud-hypervisor" ]; then
tests+=("network-iperf3")
test_queries+=(".\"network-iperf3\".Results | .[] | .cpu.Result")
tests+=("latency")
test_queries+=(".\"latency\".Results | .[] | .latency.Result")
tests+=("network-iperf3")
test_queries+=(".\"network-iperf3\".Results | .[] | .parallel.Result")
tests+=("network-iperf3")
test_queries+=(".\"network-iperf3\".Results | .[] | .jitter.Result")
tests+=("network-iperf3")
test_queries+=(".\"network-iperf3\".Results | .[] | .bandwidth.Result")
fi
# What is the base URL of the Jenkins server
url_base="http://jenkins.katacontainers.io/job"
# Where do we find the recent build number information
url_index="api/json"
# Where do we get the actual build results from
url_artifacts="artifact/go/src/github.com/kata-containers/tests/metrics/results/artifacts"
# Gather up the results (json) files from all the defined repos for the range
# of dates?
gather_data() {
for repo in "${repos[@]}"; do
echo "Getting history for repo $repo"
local outpath="${indexdir}/${repo}"
local outname="${outpath}/index.json"
mkdir -p "${outpath}"
local url="${url_base}/${repo}/${url_index}"
# First, we need the index file for the job so we can get the list of the
# last 'n' jobs run.
curl -L -o ${outname} $url
builds=$(jq '.builds | .[] | .number' ${outname} | head -n ${NUM_BUILDS})
echo "Examining builds: $builds"
# For each build, for each test, pull down the json results file, if it
# exists
for build in $builds; do
echo "Get results for build $build"
local builddir="${resultsdir}/${repo}/${build}"
mkdir -p ${builddir}
local build_url="${url_base}/${repo}/${build}/${url_artifacts}/${testfilename}"
echo "Pulling result from $build_url"
for test in "${tests[@]}"; do
local testfile=${builddir}/${KATA_HYPERVISOR}-${test}.json
local test_url="${build_url}/${KATA_HYPERVISOR}-${test}.json"
echo " $test_url"
# Can fail if the build failed to generate any results
curl -L -o ${testfile} $test_url || true
done
done
done
}
# For each test type, process all the relevant data files in the results subdir.
# *NOTE*, this does *not* take into account the number or list of build numbers we
# pulled down - it will evaluate all files it finds. If you want to only evaluate
# the data you pulled, ensure the result directory is empty (or non-existant) before
# you run the script.
process_data() {
local count=0
for test in "${tests[@]}"; do
query="${test_queries[$count]}"
echo "Processing $test"
echo " Query '$query'"
count=$((count+1))
local allvalues=""
local found=0
local total=0
local min=$(printf "%u" -1)
local max=0
files=$(find ${resultsdir} -name ${KATA_HYPERVISOR}-${test}.json -print)
for file in ${files}; do
echo " Look at file $file"
value=$(jq "$query" $file || true)
echo " Result $value"
if [ -n "$value" ]; then
allvalues="$value $allvalues"
found=$((found+1))
total=$(echo $total+$value | bc)
(( $(echo "$value > $max" | bc) )) && max=${value}
(( $(echo "$value < $min" | bc) )) && min=${value}
fi
done
mean=$(echo "scale=2; $total/$found" | bc)
minpc=$(echo "scale=2; ($min/$mean)*100" | bc)
maxpc=$(echo "scale=2; ($max/$mean)*100" | bc)
pc_95=$(echo "scale=2; $mean*0.95" | bc)
pc_105=$(echo "scale=2; $mean*1.05" | bc)
echo "allvalues are [$allvalues]"
echo "${test}: mean $mean, 95% mean ${pc_95}, 105% mean ${pc_105}"
echo " min $min ($minpc% of mean), max $max ($maxpc% of mean)"
done
}
help() {
usage=$(cat << EOF
Usage: $0 [-h] [options]
Description:
Gather statistics from recent Jenkins CI metrics builds. The resulting
data is useful for configuring the metrics slave checkmetrics baselines.
To change which metrics tests are evaluated, edit the values in this
script directly. Default tests evaluated are:
"${tests[@]}"
Options:
-d <path>, Directory to store downloaded data (default: ${datadir})
-h, Print this help
-n <n>, Fetch last 'n' build data from Jenkins server (default: ${NUM_BUILDS})
Note: The statistics calculations include *all* data files in the
directory: ${resultsdir}. If previous data exists, it will be counted.
-r <remote>, Which Jenkins build jobs to gather data from.
(default: "${default_repos[@]}")
EOF
)
echo "$usage"
}
main() {
local OPTIND
while getopts "d:hn:r:" opt;do
case ${opt} in
d)
datadir="${OPTARG}"
;;
h)
help
exit 0;
;;
n)
NUM_BUILDS="${OPTARG}"
;;
r)
repos+=("${OPTARG}")
;;
?)
# parse failure
help
echo "Failed to parse arguments" >&2
exit -1
;;
esac
done
shift $((OPTIND-1))
[ -z "${repos[@]}" ] && repos=(${default_repos[@]})
resultsdir="${datadir}/results"
indexdir="${datadir}/indexes"
gather_data
process_data
}
main "$@"