From f917cf1c0324de2816e73149a81c4c6dd43ce542 Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Fri, 14 Jul 2023 17:13:15 -0600 Subject: [PATCH] Fix the ZFS checksum error histograms with larger record sizes My analysis in PR #14716 was incorrect. Each histogram bucket contains the number of incorrect bits, by position in a 64-bit word, over the entire record. 8-bit buckets can overflow for record sizes above 2k. To forestall that, saturate each bucket at 255. That should still get the point across: either all bits are equally wrong, or just a couple are. Reviewed-by: Brian Behlendorf Signed-off-by: Alan Somers Sponsored-by: Axcient Closes #15049 --- module/zfs/zfs_fm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/module/zfs/zfs_fm.c b/module/zfs/zfs_fm.c index c42ef048d..2754ceec8 100644 --- a/module/zfs/zfs_fm.c +++ b/module/zfs/zfs_fm.c @@ -790,7 +790,7 @@ update_histogram(uint64_t value_arg, uint8_t *hist, uint32_t *count) /* We store the bits in big-endian (largest-first) order */ for (i = 0; i < 64; i++) { if (value & (1ull << i)) { - hist[63 - i]++; + hist[63 - i] = MAX(hist[63 - i], hist[63 - i] + 1); ++bits; } }