14 changed files with 799 additions and 218 deletions
@ -0,0 +1,266 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091,2016 |
||||
|
||||
analysis_list=() |
||||
|
||||
analysis_list+=(analysis_cleanup) |
||||
analysis_cleanup() { |
||||
local dir=${1:-.} |
||||
|
||||
rm -f "$dir"/analysis.json |
||||
rm -rf "$dir"/analysis |
||||
mkdir -p "$dir"/analysis |
||||
} |
||||
|
||||
analysis_list+=(analysis_block_arrivals) |
||||
analysis_block_arrivals() { |
||||
local dir=${1:-.} |
||||
cat "$dir"/logs/block-arrivals.gauge |
||||
json_file_append "$dir"/analysis.json ' |
||||
{ block_arrivals: $arrivals |
||||
}' --rawfile arrivals "$dir"/logs/block-arrivals.gauge <<<0 |
||||
} |
||||
|
||||
analysis_list+=(analysis_unpack) |
||||
analysis_unpack() { |
||||
local dir=${1:-.} |
||||
|
||||
tar x -C "$dir"/analysis -af "$dir"/logs/logs-explorer.tar.xz |
||||
tar x -C "$dir"/analysis -af "$dir"/logs/logs-nodes.tar.xz |
||||
} |
||||
|
||||
analysis_list+=(analysis_log_inventory) |
||||
analysis_log_inventory() |
||||
{ |
||||
local dir=${1:-.}; shift |
||||
local machines=("$@") |
||||
|
||||
collect_jsonlog_inventory "$dir"/analysis "${machines[@]}" \ |
||||
> "$dir"/analysis/log-inventory.json |
||||
|
||||
json_file_append "$dir"/analysis.json \ |
||||
'{ final_log_timestamp: ($logs | max_by(.latest) | .latest) |
||||
, first_node_log_timestamp: ($logs |
||||
| map (select(.name != "explorer" and |
||||
.name != "generator")) |
||||
| min_by(.earliest) | .earliest) |
||||
, final_node_log_timestamp: ($logs |
||||
| map (select(.name != "explorer" and |
||||
.name != "generator")) |
||||
| max_by(.latest) | .latest) |
||||
, logs: $logs |
||||
}' --slurpfile logs "$dir"/analysis/log-inventory.json <<<0 |
||||
} |
||||
|
||||
analysis_list+=(analysis_timetoblock) |
||||
analysis_timetoblock() { |
||||
local dir=${1:-.} |
||||
dir=$(realpath "$dir") |
||||
|
||||
pushd "$dir"/analysis >/dev/null || return 1 |
||||
|
||||
"$dir"/tools/analyse.sh \ |
||||
logs-explorer/generator \ |
||||
logs-explorer/node \ |
||||
"$dir"/analysis |
||||
|
||||
cp -f analysis/*.{csv,json} . |
||||
|
||||
popd >/dev/null || return 1 |
||||
|
||||
json_file_prepend "$dir"/analysis.json \ |
||||
'{ tx_stats: $txstats[0] |
||||
}' --slurpfile txstats "$dir"/analysis/tx-stats.json <<<0 |
||||
} |
||||
|
||||
analysis_list+=(analysis_submission_threads) |
||||
analysis_submission_threads() { |
||||
local dir=${1:-.} sub_tids tnum |
||||
|
||||
sub_tids="$("$dir"/tools/generator-logs.sh log-tids \ |
||||
"$dir"/analysis/logs-explorer/generator.json || true)" |
||||
json_file_append "$dir"/analysis.json \ |
||||
'{ submission_tids: '"$(jq --slurp <<<$sub_tids)"' }' <<<0 |
||||
|
||||
for tnum in $(seq 0 $(($(echo "$sub_tids" | wc -w) - 1))) |
||||
do "$dir"/tools/generator-logs.sh tid-trace "${tnum}" \ |
||||
"$dir"/analysis/logs-explorer/generator.json \ |
||||
> "$dir"/analysis/generator.submission-thread-trace."${tnum}".json |
||||
done |
||||
} |
||||
|
||||
analysis_list+=(analysis_from_benchmarking) |
||||
analysis_from_benchmarking() { |
||||
local dir=${1:-.} |
||||
local analysis aname files |
||||
|
||||
files=($(ls -- "$dir"/analysis/logs-node-*/node-*.json 2>/dev/null || true)) |
||||
if test ${#files[*]} -gt 0 |
||||
then for analysis in $(ls -- "$dir"/tools/node.*.sh 2>/dev/null || true) |
||||
do aname=$(sed 's_^.*/node\.\(.*\)\.sh$_\1_' <<<$analysis) |
||||
echo -n " $aname.node" |
||||
"$dir"/tools/node."$aname".sh "${files[@]}" \ |
||||
> "$dir"/analysis/node."$aname".json |
||||
test -x "$dir"/tools/tocsv."$aname".sh && |
||||
"$dir"/tools/tocsv."$aname".sh \ |
||||
< "$dir"/analysis/node."$aname".json \ |
||||
> "$dir"/analysis/node."$aname".csv; done; fi |
||||
|
||||
files=($(ls -- "$dir"/analysis/logs-explorer/node-*.json 2>/dev/null || true)) |
||||
if test ${#files[*]} -gt 0 |
||||
then for analysis in $(ls -- "$dir"/tools/explorer.*.sh 2>/dev/null || true) |
||||
do aname=$(sed 's_^.*/explorer\.\(.*\)\.sh$_\1_' <<<$analysis) |
||||
echo -n " $aname.explorer" |
||||
"$dir"/tools/explorer."$aname".sh "${files[@]}" \ |
||||
> "$dir"/analysis/explorer."$aname".json |
||||
test -x "$dir"/tools/tocsv."$aname".sh && |
||||
"$dir"/tools/tocsv."$aname".sh \ |
||||
< "$dir"/analysis/explorer."$aname".json \ |
||||
> "$dir"/analysis/explorer."$aname".csv; done; fi |
||||
|
||||
files=($(ls -- "$dir"/analysis/logs-explorer/generator*json 2>/dev/null || true)) |
||||
if test ${#files[*]} -gt 0 |
||||
then for analysis in $(ls -- "$dir"/tools/generator.*.sh 2>/dev/null || true) |
||||
do aname=$(sed 's_^.*/generator\.\(.*\)\.sh$_\1_' <<<$analysis) |
||||
echo -n " $aname.generator" |
||||
"$dir"/tools/generator."$aname".sh "${files[@]}" \ |
||||
> "$dir"/analysis/generator."$aname".json |
||||
test -x "$dir"/tools/tocsv."$aname".sh && |
||||
"$dir"/tools/tocsv."$aname".sh \ |
||||
< "$dir"/analysis/generator."$aname".json \ |
||||
> "$dir"/analysis/generator."$aname".csv; done; fi |
||||
} |
||||
|
||||
analysis_list+=(analysis_TraceForgeInvalidBlock) |
||||
analysis_TraceForgeInvalidBlock() { |
||||
local dir=${1:-.} msg |
||||
|
||||
msg=$(echo ${FUNCNAME[0]} | cut -d_ -f2) |
||||
files=($(ls -- "$dir"/analysis/logs-node-*/node-*.json 2>/dev/null || true)) |
||||
if test ${#files[*]} -eq 0 |
||||
then return; fi |
||||
|
||||
grep --quiet --no-filename -F "\"$msg\"" "${files[@]}" || true | |
||||
jq 'def katip_timestamp_to_iso8601: |
||||
.[:-4] + "Z"; |
||||
. |
||||
| map |
||||
( (.at | katip_timestamp_to_iso8601) |
||||
as $date_iso |
||||
| { date_iso: $date_iso |
||||
, timestamp: $date_iso | fromdateiso8601 |
||||
, reason: .data.reason |
||||
, slot: .data.slot |
||||
} |
||||
) |
||||
| sort_by (.timestamp) |
||||
| .[] |
||||
' --slurp --compact-output > "$dir"/analysis/node."$msg".json |
||||
} |
||||
|
||||
analysis_list+=(analysis_message_types) |
||||
analysis_message_types() { |
||||
local dir=${1:-.} mach tnum sub_tids; shift |
||||
local machines=("$@") |
||||
|
||||
for mach in ${machines[*]} |
||||
do echo -n .$mach >&2 |
||||
local types key |
||||
"$dir"/tools/msgtypes.sh \ |
||||
"$dir/analysis/logs-$mach"/node-*.json | |
||||
while read -r ty |
||||
test -n "$ty" |
||||
do key=$(jq .kind <<<$ty -r | sed 's_.*\.__g') |
||||
jq '{ key: .kind, value: $count }' <<<$ty \ |
||||
--argjson count "$(grep -Fh "$key\"" \ |
||||
"$dir/analysis/logs-$mach"/node-*.json | |
||||
wc -l)" |
||||
done | |
||||
jq '{ "\($name)": from_entries } |
||||
' --slurp --arg name "$mach" |
||||
# jq '{ "\($name)": $types } |
||||
# ' --arg name "$mach" --null-input \ |
||||
# --argjson types "$("$dir"/tools/msgtypes.sh \ |
||||
# "$dir/analysis/logs-$mach"/node-*.json | |
||||
# jq . --slurp)" |
||||
done | analysis_append "$dir" \ |
||||
'{ message_types: add |
||||
}' --slurp |
||||
} |
||||
|
||||
analysis_list+=(analysis_repackage_db) |
||||
analysis_repackage_db() { |
||||
local dir=${1:-.} |
||||
|
||||
tar x -C "$dir"/analysis -af "$dir"/logs/db-analysis.tar.xz \ |
||||
--wildcards '*.csv' '*.txt' |
||||
} |
||||
|
||||
# TODO: broken |
||||
# analysis_list+=(analysis_tx_losses) |
||||
analysis_tx_losses() { |
||||
local dir=${1:-.} |
||||
dir=$(realpath "$dir") |
||||
|
||||
pushd "$dir"/analysis >/dev/null || return 1 |
||||
if jqtest '(.tx_stats.tx_missing != 0)' "$dir"/analysis.json |
||||
then echo -n " missing-txs" |
||||
. "$dir"/tools/lib-loganalysis.sh |
||||
op_analyse_losses; fi |
||||
popd >/dev/null || return 1 |
||||
} |
||||
|
||||
analysis_list+=(analysis_derived) |
||||
analysis_derived() { |
||||
local dir=${1:-.} |
||||
local f="$dir"/analysis/node.TraceMempoolRejectedTx.json |
||||
|
||||
analysis_append "$dir" \ |
||||
'{ tx_stats: |
||||
($analysis.tx_stats |
||||
+ { tx_rejected: $rejected |
||||
, tx_utxo_invalid: $utxo_invalid |
||||
, tx_missing_input: $missing_input })} |
||||
' --argjson rejected "$(wc -l <$f)" \ |
||||
--argjson utxo_invalid "$(grep -F "(UTxOValidationUTxOError " $f | wc -l)" \ |
||||
--argjson missing_input "$(grep -F "(UTxOMissingInput " $f | wc -l)" \ |
||||
<<<0 |
||||
} |
||||
|
||||
analysis_list+=(analysis_sanity) |
||||
analysis_sanity() { |
||||
local dir=${1:-.} tag errors |
||||
tag=$(run_tag "$dir") |
||||
|
||||
errors="$(sanity_check_run "$dir")" |
||||
if test "$errors" != "[]" |
||||
then echo |
||||
oprint "sanity check failed for tag: $tag" |
||||
echo "$errors" >&2 |
||||
mark_run_broken "$dir" "$errors" |
||||
return 1; fi |
||||
} |
||||
|
||||
### |
||||
### Aux |
||||
### |
||||
jsonlog_inventory() { |
||||
local name=$1; shift |
||||
local args fs=("$@") |
||||
|
||||
args=(--arg name "$name" |
||||
--argjson earliest "$(head -n1 ${fs[0]})" |
||||
--argjson latest "$(tail -n1 ${fs[-1]})" |
||||
--argjson files "$(echo ${fs[*]} | shell_list_to_json)" |
||||
) |
||||
jq 'def katip_timestamp_to_iso8601: |
||||
.[:-4] + "Z"; |
||||
. |
||||
| { name: $name |
||||
, earliest: ($earliest.at |
||||
| katip_timestamp_to_iso8601 | fromdateiso8601) |
||||
, latest: ( $latest.at |
||||
| katip_timestamp_to_iso8601 | fromdateiso8601) |
||||
, files: $files |
||||
}' "${args[@]}" <<<0 |
||||
} |
@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091,2016 |
||||
|
||||
|
||||
collect_jsonlog_inventory() { |
||||
local dir=$1; shift |
||||
local constituents=("$@") |
||||
|
||||
for mach in ${constituents[*]} |
||||
do jsons=($(ls -- "$dir"/logs-"$mach"/node-*.json)) |
||||
jsonlog_inventory "$mach" "${jsons[@]}"; done |
||||
jsonlog_inventory "generator" "$dir"/logs-explorer/generator.json |
||||
} |
||||
|
||||
analysis_append() { |
||||
local dir=$1 expr=$2; shift 2 |
||||
json_file_append "$dir"/analysis.json ' |
||||
$meta[0] as $meta |
||||
| $analysis[0] as $analysis |
||||
| '"$expr |
||||
" --slurpfile meta "$dir/meta.json" \ |
||||
--slurpfile analysis "$dir/analysis.json" \ |
||||
"$@" |
||||
} |
||||
|
||||
analysis_prepend() { |
||||
local dir=$1 expr=$2; shift 2 |
||||
json_file_prepend "$dir"/analysis.json ' |
||||
$meta[0] as $meta |
||||
| $analysis[0] as $analysis |
||||
| '"$expr |
||||
" --slurpfile meta "$dir/meta.json" \ |
||||
--slurpfile analysis "$dir/analysis.json" \ |
||||
"$@" |
||||
} |
||||
|
||||
### |
||||
### |
||||
|
||||
analyse_run() { |
||||
while test $# -ge 1 |
||||
do case "$1" in |
||||
--list ) echo ${analysis_list[*]}; return;; |
||||
* ) break;; esac; shift; done |
||||
|
||||
local dir=${1:-.} tag meta |
||||
dir=$(realpath "$dir") |
||||
|
||||
if test ! -d "$dir" |
||||
then fail "run directory doesn't exist: $dir"; fi |
||||
if test ! -f "$dir/meta.json" |
||||
then fail "run directory doesn't has no metafile: $dir"; fi |
||||
run_fetch_benchmarking "$dir/tools" |
||||
|
||||
machines=($(jq '.machine_info | keys | join(" ") |
||||
' --raw-output <"$dir/deployment-explorer.json")) |
||||
meta=$(jq .meta "$dir/meta.json") |
||||
tag=$(jq .tag <<<$meta --raw-output) |
||||
|
||||
echo "--( processing logs in: $(basename "$dir")" |
||||
|
||||
for a in "${analysis_list[@]}" |
||||
do echo -n " $a" | sed 's/analysis_//' |
||||
$a "$dir" "${machines[@]}"; done |
||||
|
||||
patch_run "$dir" |
||||
|
||||
rm -rf "$dir"/analysis/{analysis,logs-node-*,logs-explorer,startup} |
||||
|
||||
oprint "analysed tag: ${tag}" |
||||
} |
||||
|
||||
runs_in() { |
||||
local dir=${1:-.} |
||||
dir=$(realpath $dir) |
||||
find "$dir" -maxdepth 2 -mindepth 2 -name meta.json -type f | cut -d/ -f$(($(tr -cd / <<<$dir | wc -c) + 2)) |
||||
} |
||||
|
||||
mass_analyse() { |
||||
local parallel= |
||||
while test $# -ge 1 |
||||
do case "$1" in |
||||
--parallel ) parallel=t;; |
||||
* ) break;; esac; shift; done |
||||
|
||||
local dir=${1:-.} runs |
||||
runs=($(runs_in "$dir")) |
||||
|
||||
oprint "analysing runs: ${runs[*]}" |
||||
|
||||
for run in "${runs[@]}" |
||||
do if test -n "$parallel" |
||||
then analyse_run "$dir/$run" & |
||||
else analyse_run "$dir/$run"; fi; done |
||||
} |
@ -0,0 +1,58 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091,2016 |
||||
|
||||
tag_format_timetoblock_header="tx id,tx time,block time,block no,delta t" |
||||
patch_run() { |
||||
local dir=${1:-.} |
||||
dir=$(realpath "$dir") |
||||
|
||||
if test "$(head -n1 "$dir"/analysis/timetoblock.csv)" \ |
||||
!= "${tag_format_timetoblock_header}" |
||||
then echo "---| patching $dir/analysis/timetoblock.csv" |
||||
sed -i "1 s_^_${tag_format_timetoblock_header}\n_; s_;_,_g" \ |
||||
"$dir"/analysis/timetoblock.csv |
||||
fi |
||||
|
||||
if test "$(head -n1 "$dir"/analysis/00-results-table.sql.csv)" \ |
||||
== "DROP TABLE" |
||||
then echo "---| patching $dir/analysis/00-results-table.sql.csv" |
||||
tail -n+3 "$dir"/analysis/00-results-table.sql.csv \ |
||||
> "$dir"/analysis/00-results-table.sql.csv.fixed |
||||
mv "$dir"/analysis/00-results-table.sql.csv.fixed \ |
||||
"$dir"/analysis/00-results-table.sql.csv; |
||||
fi |
||||
} |
||||
|
||||
run_report_name() { |
||||
local metafile meta prof suffix= |
||||
dir=${1:-.} |
||||
metafile="$dir"/meta.json |
||||
meta=$(jq .meta "$metafile" --raw-output) |
||||
prof=$(jq .profile <<<$meta --raw-output) |
||||
date=$(date +'%Y'-'%m'-'%d'-'%H.%M' --date=@"$(jq .timestamp <<<$meta)") |
||||
|
||||
test -n "$meta" -a -n "$prof" || |
||||
fail "Bad run meta.json format: $metafile" |
||||
|
||||
if is_run_broken "$dir" |
||||
then suffix='broken'; fi |
||||
|
||||
echo "$date.$prof${suffix:+.$suffix}" |
||||
} |
||||
|
||||
package_run() { |
||||
local tag report_name package |
||||
dir=${1:-.} |
||||
report_name=$(run_report_name "$dir") |
||||
|
||||
if is_run_broken "$dir" |
||||
then resultroot=$(realpath ../bench-results-bad) |
||||
else resultroot=$(realpath ../bench-results); fi |
||||
|
||||
package=${resultroot}/$report_name.tar.xz |
||||
|
||||
oprint "Packaging $tag as: $package" |
||||
ln -sf "./runs/$tag" "$report_name" |
||||
tar cf "$package" "$report_name" --xz --dereference |
||||
rm -f "$report_name" |
||||
} |
@ -0,0 +1,201 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091,2016 |
||||
|
||||
sanity_check_list=() |
||||
|
||||
sanity_check_list+=(sanity_check_start_log_spread) |
||||
sanity_check_start_log_spread() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$analysis.logs |
||||
| map |
||||
( (.earliest - $meta.timestamp | fabs) |
||||
as $delta |
||||
| select ($delta > $allowed.start_log_spread_s) |
||||
| . + |
||||
{ delta: $delta |
||||
, start: $meta.timestamp }) |
||||
' '. |
||||
| map |
||||
({ kind: "start-log-spread" |
||||
} + .) |
||||
| .[]' |
||||
} |
||||
sanity_check_list+=(sanity_check_last_log_spread) |
||||
sanity_check_last_log_spread() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$analysis.logs |
||||
| map ## Generator always finishes a bit early, and |
||||
## we have it analysed to death by other means.. |
||||
(select (.name != "generator")) |
||||
| map |
||||
( (.latest - $analysis.final_log_timestamp | fabs) |
||||
as $delta |
||||
| select ($delta > $allowed.last_log_spread_s) |
||||
| . + |
||||
{ delta: $delta |
||||
, final_log_timestamp: $analysis.final_log_timestamp }) |
||||
' '. |
||||
| map |
||||
({ kind: "latest-log-spread" |
||||
} + .) |
||||
| .[]' |
||||
} |
||||
sanity_check_list+=(sanity_check_not_even_started) |
||||
sanity_check_not_even_started() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$blocks |
||||
| length == 0 |
||||
' '. |
||||
| { kind: "blockchain-not-even-started" |
||||
}' --slurpfile blocks "$dir"/analysis/explorer.MsgBlock.json |
||||
} |
||||
sanity_check_list+=(sanity_check_silence_since_last_block) |
||||
sanity_check_silence_since_last_block() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$blocks[-1] // { timestamp: $analysis.first_node_log_timestamp } |
||||
| ($analysis.final_node_log_timestamp - .timestamp) |
||||
as $delta |
||||
| if $delta >= $allowed.silence_since_last_block_s |
||||
then $delta else empty end |
||||
' '. |
||||
| { kind: "blockchain-stopped" |
||||
, silence_since_last_block_s: . |
||||
, allowance: $allowed.silence_since_last_block_s |
||||
}' --slurpfile blocks "$dir"/analysis/explorer.MsgBlock.json |
||||
} |
||||
sanity_check_list+=(sanity_check_no_txs_in_blocks) |
||||
sanity_check_no_txs_in_blocks() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$txstats.tx_seen_in_blocks == 0' ' |
||||
{ kind: "no-txs-in-blocks" |
||||
}' |
||||
} |
||||
sanity_check_list+=(sanity_check_announced_less_txs_than_specified) |
||||
sanity_check_announced_less_txs_than_specified() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
## Guard against old logs, where tx_annced is 0: |
||||
$txstats.tx_annced >= $txstats.tx_sent and |
||||
$prof.generator.tx_count > $txstats.tx_annced' ' |
||||
{ kind: "announced-less-txs-than-specified" |
||||
, required: $prof.generator.tx_count |
||||
, sent: $txstats.tx_sent |
||||
}' |
||||
} |
||||
sanity_check_list+=(sanity_check_sent_less_txs_than_specified) |
||||
sanity_check_sent_less_txs_than_specified() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$prof.generator.tx_count > $txstats.tx_sent' ' |
||||
{ kind: "sent-less-txs-than-specified" |
||||
, required: $prof.generator.tx_count |
||||
, sent: $txstats.tx_sent |
||||
}' |
||||
} |
||||
sanity_check_list+=(sanity_check_tx_loss_over_threshold) |
||||
sanity_check_tx_loss_over_threshold() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
$txstats.tx_sent * (1.0 - $allowed.tx_loss_ratio) |
||||
> $txstats.tx_seen_in_blocks' ' |
||||
{ kind: "txs-loss-over-threshold" |
||||
, sent: $txstats.tx_sent |
||||
, threshold: ($txstats.tx_sent * (1.0 - $allowed.tx_loss_ratio)) |
||||
, received: $txstats.tx_seen_in_blocks |
||||
}' |
||||
} |
||||
sanity_check_list+=(sanity_check_chain_density) |
||||
sanity_check_chain_density() { |
||||
local dir=$1 t=${2:-${default_tolerances}} |
||||
sanity_check "$t" "$dir" ' |
||||
($blocks | length) |
||||
as $block_count |
||||
| ($analysis.final_node_log_timestamp |
||||
- $analysis.first_node_log_timestamp) |
||||
as $cluster_lifetime_s |
||||
| ($cluster_lifetime_s * 1000 / $genesis.slot_duration | floor) |
||||
as $cluster_lifetime_slots |
||||
| ($block_count / ($cluster_lifetime_slots)) |
||||
as $chain_density |
||||
| ($cluster_lifetime_slots - $block_count) |
||||
as $missed_slots |
||||
| if $chain_density < $allowed.minimum_chain_density or |
||||
$missed_slots > $allowed.maximum_missed_slots |
||||
then { lifetime_s: $cluster_lifetime_s |
||||
, lifetime_slots: $cluster_lifetime_slots |
||||
, block_count: $block_count |
||||
, missed_slots: $missed_slots |
||||
, chain_density: $chain_density |
||||
} else empty end' ' |
||||
{ kind: "insufficient_overall_chain_density" |
||||
, lifetime_s: .lifetime_s |
||||
, lifetime_slots: .lifetime_slots |
||||
, block_count: .block_count |
||||
, missed_slots: .missed_slots |
||||
, chain_density: .chain_density |
||||
}' --slurpfile blocks "$dir"/analysis/explorer.MsgBlock.json |
||||
} |
||||
# sanity_check_list+=(sanity_check_) |
||||
# sanity_check_() { |
||||
# local t=$1 dir=$2 |
||||
# } |
||||
|
||||
default_tolerances=' |
||||
{ "tx_loss_ratio": 0.0 |
||||
, "start_log_spread_s": 60 |
||||
, "last_log_spread_s": 60 |
||||
, "silence_since_last_block_s": 40 |
||||
, "cluster_startup_overhead_s": 60 |
||||
, "minimum_chain_density": 0.9 |
||||
, "maximum_missed_slots": 5 |
||||
}' |
||||
|
||||
sanity_check_run() { |
||||
local dir=${1:-.} metafile meta prof tolerances t |
||||
|
||||
for check in ${sanity_check_list[*]} |
||||
do $check "$dir" "${default_tolerances}" |
||||
done | jq --slurp ' |
||||
if length != 0 |
||||
then . + |
||||
[{ kind: "tolerances" } |
||||
+ $tolerances] else . end |
||||
' --argjson tolerances "$default_tolerances" |
||||
} |
||||
|
||||
sanity_check() { |
||||
local tolerances=$1 dir=$2 test=$3 err=$4; shift 4 |
||||
sanity_checker "$tolerances" "$dir" \ |
||||
" ($test)"' as $test |
||||
| if $test != {} and $test != [] and $test != "" and $test |
||||
then ($test | '"$err"') else empty end |
||||
' "$@" |
||||
} |
||||
|
||||
sanity_checker() { |
||||
local tolerances=$1 dir=$2 expr=$3; shift 3 |
||||
|
||||
jq ' $meta[0].meta as $meta |
||||
| $analysis[0] as $analysis |
||||
| $txstats[0] as $txstats |
||||
| ($meta.profile_content |
||||
## TODO: backward compat |
||||
// $meta.generator_params) |
||||
as $prof |
||||
| ($prof.genesis |
||||
## TODO: backward compat |
||||
// $prof.genesis_params) |
||||
as $genesis |
||||
| $prof.generator as $generator |
||||
| '"$expr"' |
||||
' --slurpfile meta "$dir/meta.json" \ |
||||
--slurpfile analysis "$dir/analysis.json" \ |
||||
--slurpfile txstats "$dir/analysis/tx-stats.json" \ |
||||
--argjson allowed "$tolerances" \ |
||||
"$@" <<<0 |
||||
} |
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091,2016 |
||||
|
||||
sheet_list=() |
||||
|
||||
sheet_list+=(sheet_message_types_summary) |
||||
sheet_message_types_summary() { |
||||
local dir=${1:-.} name |
||||
name=$(echo ${FUNCNAME[0]} | cut -d_ -f2-) |
||||
|
||||
mkdir -p "$dir"/report |
||||
|
||||
jq ' .message_types |
||||
| to_entries |
||||
| map ( .key as $mach |
||||
| .value |
||||
| to_entries |
||||
| map([ $mach, .key, .value | tostring])) |
||||
| add |
||||
| .[] |
||||
| join(",")' < "$dir"/analysis.json --raw-output \ |
||||
> "$dir"/report/"$name".csv |
||||
|
||||
sed -i '1inode, message, occurences' "$dir"/report/"$name".csv |
||||
} |
@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091,2016 |
||||
|
||||
run_tag() { |
||||
jq --raw-output .meta.tag "$(realpath "${1:-.}")/meta.json" |
||||
} |
||||
|
||||
cluster_last_meta_tag() { |
||||
local meta=./last-meta.json tag dir meta2 |
||||
jq . "${meta}" >/dev/null || fail "malformed run metadata: ${meta}" |
||||
|
||||
tag=$(jq --raw-output .meta.tag "${meta}") |
||||
test -n "${tag}" || fail "bad tag in run metadata: ${meta}" |
||||
|
||||
dir="./runs/${tag}" |
||||
test -d "${dir}" || |
||||
fail "bad tag in run metadata: ${meta} -- ${dir} is not a directory" |
||||
meta2=${dir}/meta.json |
||||
jq --exit-status . "${meta2}" >/dev/null || |
||||
fail "bad tag in run metadata: ${meta} -- ${meta2} is not valid JSON" |
||||
|
||||
test "$(realpath ./last-meta.json)" = "$(realpath "${meta2}")" || |
||||
fail "bad tag in run metadata: ${meta} -- ${meta2} is different from ${meta}" |
||||
echo "${tag}" |
||||
} |
||||
|
||||
fetch_tag() { |
||||
local tag |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
|
||||
fetch_run "./runs/${tag}" |
||||
} |
||||
|
||||
analyse_tag() { |
||||
local tag |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
|
||||
analyse_run "${tagroot}/${tag}" || true |
||||
} |
||||
|
||||
sanity_check_tag() { |
||||
local tag |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
|
||||
sanity_check_run "${tagroot}/${tag}" |
||||
} |
||||
|
||||
tag_report_name() { |
||||
local tag |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
|
||||
run_report_name "${tagroot}/${tag}" |
||||
} |
||||
|
||||
|
||||
package_tag() { |
||||
local tag |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
|
||||
package_run "${tagroot}/${tag}" |
||||
} |
@ -1,147 +0,0 @@
|
||||
#!/usr/bin/env bash |
||||
# shellcheck disable=1091 |
||||
|
||||
tmjq() { |
||||
jq .meta "${tagroot}/$1/"meta.json --raw-output |
||||
} |
||||
|
||||
tag_report_name() { |
||||
local tag metafile meta prof suffix= |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
metafile=${tagroot}/$tag/meta.json |
||||
meta=$(jq .meta "$metafile" --raw-output) |
||||
prof=$(jq .profile --raw-output <<<$meta) |
||||
date=$(date +'%Y'-'%m'-'%d'-'%H.%M' --date=@"$(jq .timestamp <<<$meta)") |
||||
|
||||
test -n "$meta" -a -n "$prof" || |
||||
fail "Bad tag meta.json format: $metafile" |
||||
|
||||
if is_run_broken "$tag" |
||||
then suffix='broken'; fi |
||||
|
||||
echo "$date.$prof${suffix:+.$suffix}" |
||||
} |
||||
|
||||
package_tag() { |
||||
local tag package report_name |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
report_name=$(tag_report_name "$tag") |
||||
|
||||
if is_run_broken "$tag" |
||||
then resultroot=$(realpath ../bench-results-bad) |
||||
else resultroot=$(realpath ../bench-results); fi |
||||
|
||||
package=${resultroot}/$report_name.tar.xz |
||||
|
||||
oprint "Packaging $tag as: $package" |
||||
ln -sf "./runs/$tag" "$report_name" |
||||
tar cf "$package" "$report_name" --xz --dereference |
||||
rm -f "$report_name" |
||||
} |
||||
|
||||
analyse_tag() { |
||||
local tag dir meta |
||||
tag=${1:-$(cluster_last_meta_tag)} |
||||
dir="${tagroot}/${tag}" |
||||
|
||||
pushd "${dir}" >/dev/null || return 1 |
||||
rm -rf 'analysis' |
||||
mkdir 'analysis' |
||||
cd 'analysis' |
||||
meta=$(tmjq "$tag" .) |
||||
|
||||
oprint "running log analyses: " |
||||
tar xaf '../logs/logs-explorer.tar.xz' |
||||
tar xaf '../logs/logs-nodes.tar.xz' |
||||
|
||||
echo " timetoblock.csv" |
||||
../tools/analyse.sh \ |
||||
'logs-explorer/generator' \ |
||||
'logs-explorer/node' \ |
||||
'last-run/analysis/' |
||||
cp analysis/timetoblock.csv . |
||||
|
||||
local blocks |
||||
echo -n "--( running log analyses: blocksizes" |
||||
blocks="$(../tools/blocksizes.sh logs-explorer/node-*.json | |
||||
jq . --slurp)" |
||||
|
||||
declare -A msgtys |
||||
local mach msgtys=() producers tnum sub_tids |
||||
producers=($(jq '.machine_info | keys | join(" ") |
||||
' --raw-output <'../deployment-explorer.json')) |
||||
|
||||
for mach in explorer ${producers[*]} |
||||
do echo -n " msgtys:${mach}" |
||||
msgtys[${mach}]="$(../tools/msgtypes.sh logs-explorer/node-*.json | |
||||
jq . --slurp)"; done |
||||
## NOTE: This is a bit too costly, and we know the generator pretty well. |
||||
# echo -n " msgtys:generator" |
||||
# msgtys_generator="$(../tools/msgtypes.sh logs-explorer/generator.json | |
||||
# jq . --slurp)" |
||||
msgtys_generator='[]' |
||||
|
||||
echo -n " node-to-node-submission-tids" |
||||
sub_tids="$(../tools/generator-logs.sh log-tids \ |
||||
logs-explorer/generator.json || true)" |
||||
for tnum in $(seq 0 $(($(echo "$sub_tids" | wc -w) - 1))) |
||||
do echo -n " node-to-node-submission:${tnum}" |
||||
../tools/generator-logs.sh tid-trace "${tnum}" \ |
||||
logs-explorer/generator.json \ |
||||
> generator.submission-thread-trace."${tnum}".json; done |
||||
|
||||
for p in ${producers[*]} |
||||
do echo -n " added-to-current-chain:$p" |
||||
../tools/added-to-current-chain.sh logs-node-*/node-*.json \ |
||||
> $p.added-to-current-chain.csv; done |
||||
|
||||
jq '{ tx_stats: $txstats[0] |
||||
, submission_tids: '"$(jq --slurp <<<$sub_tids)"' |
||||
, MsgBlock: '"${blocks}"' |
||||
, message_kinds: |
||||
({ generator: '"${msgtys_generator}"' |
||||
}'"$(for mach in ${!msgtys[*]} |
||||
do echo " + { \"$mach\": $(jq --slurp <<<${msgtys[$mach]}) }" |
||||
done)"') |
||||
}' --null-input \ |
||||
--slurpfile txstats 'analysis/tx-stats.json' \ |
||||
> ../analysis.json |
||||
|
||||
echo -n " adding db-analysis" |
||||
tar xaf '../logs/db-analysis.tar.xz' --wildcards '*.csv' '*.txt' |
||||
|
||||
if jqtest '(.tx_stats.tx_missing != 0)' ../analysis.json |
||||
then echo " missing-txs" |
||||
. ../tools/lib-loganalysis.sh |
||||
op_analyse_losses |
||||
else echo |
||||
fi |
||||
patch_local_tag "$tag" |
||||
|
||||
rm -rf analysis/ logs-node-*/ logs-explorer/ startup/ |
||||
|
||||
popd >/dev/null |
||||
|
||||
oprint "analysed tag: ${tag}" |
||||
} |
||||
|
||||
tag_format_timetoblock_header="tx id,tx time,block time,block no,delta t" |
||||
patch_local_tag() { |
||||
local tag=${1?missing tag} target |
||||
target=${tagroot}/${tag} |
||||
cd "${target}" >/dev/null || return 1 |
||||
|
||||
if test "$(head -n1 analysis/timetoblock.csv)" != "${tag_format_timetoblock_header}" |
||||
then echo "---| patching ${tag}/analysis/timetoblock.csv" |
||||
sed -i "1 s_^_${tag_format_timetoblock_header}\n_; s_;_,_g" \ |
||||
'analysis/timetoblock.csv' |
||||
fi |
||||
|
||||
if test "$(head -n1 analysis/00-results-table.sql.csv)" == "DROP TABLE" |
||||
then echo "---| patching ${tag}/analysis/00-results-table.sql.csv" |
||||
tail -n+3 analysis/00-results-table.sql.csv > analysis/00-results-table.sql.csv.fixed |
||||
mv analysis/00-results-table.sql.csv.fixed analysis/00-results-table.sql.csv; |
||||
fi |
||||
|
||||
cd - >/dev/null || return 1 |
||||
} |
Loading…
Reference in new issue