From e506a0ce40bd777a84ba1de8ed40df2154f7afb1 Mon Sep 17 00:00:00 2001 From: Richard Yao Date: Thu, 22 Sep 2022 14:28:33 -0400 Subject: [PATCH] Cleanup: Change 1 used in bitshifts to 1ULL Coverity complains about this. It is not a bug as long as we never shift by more than 31, but it is not terrible to change the constants from 1 to 1ULL as clean up. Reviewed-by: Ryan Moeller Reviewed-by: Brian Behlendorf Signed-off-by: Richard Yao Closes #13914 --- cmd/zdb/zdb.c | 6 +++--- module/zfs/dsl_scan.c | 2 +- module/zfs/metaslab.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index 92df3dd16..a3a363ca5 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -6203,10 +6203,10 @@ zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb) */ for (uint64_t inner_offset = 0; inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst); - inner_offset += 1 << vd->vdev_ashift) { + inner_offset += 1ULL << vd->vdev_ashift) { if (range_tree_contains(msp->ms_allocatable, - offset + inner_offset, 1 << vd->vdev_ashift)) { - obsolete_bytes += 1 << vd->vdev_ashift; + offset + inner_offset, 1ULL << vd->vdev_ashift)) { + obsolete_bytes += 1ULL << vd->vdev_ashift; } } diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c index 28afc3dea..5ad8ff1f3 100644 --- a/module/zfs/dsl_scan.c +++ b/module/zfs/dsl_scan.c @@ -3000,7 +3000,7 @@ scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue) * otherwise we leave shorter remnant every txg. */ uint64_t start; - uint64_t size = 1 << rt->rt_shift; + uint64_t size = 1ULL << rt->rt_shift; range_seg_t *addr_rs; if (queue->q_last_ext_addr != -1) { start = queue->q_last_ext_addr; diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c index 02cf121d8..4234f8ebf 100644 --- a/module/zfs/metaslab.c +++ b/module/zfs/metaslab.c @@ -1449,7 +1449,7 @@ metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) zfs_btree_t *size_tree = mrap->mra_bt; if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < - (1 << mrap->mra_floor_shift)) + (1ULL << mrap->mra_floor_shift)) return; zfs_btree_add(size_tree, rs); @@ -1461,7 +1461,7 @@ metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) metaslab_rt_arg_t *mrap = arg; zfs_btree_t *size_tree = mrap->mra_bt; - if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 << + if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL << mrap->mra_floor_shift)) return; @@ -3552,7 +3552,7 @@ metaslab_should_condense(metaslab_t *msp) { space_map_t *sm = msp->ms_sm; vdev_t *vd = msp->ms_group->mg_vd; - uint64_t vdev_blocksize = 1 << vd->vdev_ashift; + uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift; ASSERT(MUTEX_HELD(&msp->ms_lock)); ASSERT(msp->ms_loaded);