Prefer VERIFY0(n) over VERIFY3U(n, ==, 0)

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <alexander.motin@TrueNAS.com>
Signed-off-by: Rob Norris <robn@despairlabs.com>
Sponsored-by: https://despairlabs.com/sponsor/
Closes #17591
This commit is contained in:
Rob Norris 2025-08-04 20:22:42 +10:00 committed by Brian Behlendorf
parent c39e076f23
commit 5c7df3bcac
17 changed files with 33 additions and 33 deletions

View File

@ -1456,7 +1456,7 @@ get_obsolete_refcount(vdev_t *vd)
} }
} else { } else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0); ASSERT0(obsolete_sm_object);
} }
for (unsigned c = 0; c < vd->vdev_children; c++) { for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]); refcount += get_obsolete_refcount(vd->vdev_child[c]);

View File

@ -4006,7 +4006,7 @@ raidz_scratch_verify(void)
* requested by user, but scratch object was not created. * requested by user, but scratch object was not created.
*/ */
case RRSS_SCRATCH_NOT_IN_USE: case RRSS_SCRATCH_NOT_IN_USE:
ASSERT3U(offset, ==, 0); ASSERT0(offset);
break; break;
/* /*

View File

@ -156,7 +156,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (dbp[0]->db_offset != 0 || numbufs > 1) { if (dbp[0]->db_offset != 0 || numbufs > 1) {
for (i = 0; i < numbufs; i++) { for (i = 0; i < numbufs; i++) {
ASSERT(ISP2(dbp[i]->db_size)); ASSERT(ISP2(dbp[i]->db_size));
ASSERT3U((dbp[i]->db_offset % dbp[i]->db_size), ==, 0); ASSERT0((dbp[i]->db_offset % dbp[i]->db_size));
ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size); ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size);
} }
} }
@ -175,7 +175,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m); vm_page_sunbusy(m);
break; break;
} }
ASSERT3U(m->dirty, ==, 0); ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m)); ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE); ASSERT3U(db->db_size, >, PAGE_SIZE);
@ -201,7 +201,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (m != bogus_page) { if (m != bogus_page) {
vm_page_assert_xbusied(m); vm_page_assert_xbusied(m);
ASSERT(vm_page_none_valid(m)); ASSERT(vm_page_none_valid(m));
ASSERT3U(m->dirty, ==, 0); ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m)); ASSERT(!pmap_page_is_write_mapped(m));
va = zfs_map_page(m, &sf); va = zfs_map_page(m, &sf);
} }
@ -295,7 +295,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_sunbusy(m); vm_page_sunbusy(m);
break; break;
} }
ASSERT3U(m->dirty, ==, 0); ASSERT0(m->dirty);
ASSERT(!pmap_page_is_write_mapped(m)); ASSERT(!pmap_page_is_write_mapped(m));
ASSERT3U(db->db_size, >, PAGE_SIZE); ASSERT3U(db->db_size, >, PAGE_SIZE);

View File

@ -273,7 +273,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT(zp->z_unlinked); ASSERT(zp->z_unlinked);
ASSERT3U(zp->z_links, ==, 0); ASSERT0(zp->z_links);
VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
@ -437,7 +437,7 @@ zfs_rmnode(znode_t *zp)
uint64_t count; uint64_t count;
int error; int error;
ASSERT3U(zp->z_links, ==, 0); ASSERT0(zp->z_links);
if (zfsvfs->z_replay == B_FALSE) if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);

View File

@ -863,11 +863,11 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
* Validate there are no objects in use and free all the * Validate there are no objects in use and free all the
* spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
*/ */
ASSERT3U(skc->skc_slab_alloc, ==, 0); ASSERT0(skc->skc_slab_alloc);
ASSERT3U(skc->skc_obj_alloc, ==, 0); ASSERT0(skc->skc_obj_alloc);
ASSERT3U(skc->skc_slab_total, ==, 0); ASSERT0(skc->skc_slab_total);
ASSERT3U(skc->skc_obj_total, ==, 0); ASSERT0(skc->skc_obj_total);
ASSERT3U(skc->skc_obj_emergency, ==, 0); ASSERT0(skc->skc_obj_emergency);
ASSERT(list_empty(&skc->skc_complete_list)); ASSERT(list_empty(&skc->skc_complete_list));
ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);

View File

@ -986,7 +986,7 @@ zfs_drop_nlink_locked(znode_t *zp, dmu_tx_t *tx, boolean_t *unlinkedp)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs),
NULL, &links, sizeof (links)); NULL, &links, sizeof (links));
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT3U(error, ==, 0); ASSERT0(error);
if (unlinkedp != NULL) if (unlinkedp != NULL)
*unlinkedp = unlinked; *unlinkedp = unlinked;
@ -1058,7 +1058,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
/* The only error is !zfs_dirempty() and we checked earlier. */ /* The only error is !zfs_dirempty() and we checked earlier. */
error = zfs_drop_nlink_locked(zp, tx, &unlinked); error = zfs_drop_nlink_locked(zp, tx, &unlinked);
ASSERT3U(error, ==, 0); ASSERT0(error);
mutex_exit(&zp->z_lock); mutex_exit(&zp->z_lock);
} else { } else {
error = zfs_dropname(dl, zp, dzp, tx, flag); error = zfs_dropname(dl, zp, dzp, tx, flag);

View File

@ -563,7 +563,7 @@ abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
left -= csize; left -= csize;
off = 0; off = 0;
} }
ASSERT3U(left, ==, 0); ASSERT0(left);
} else { } else {
abd = abd_get_offset_scatter(abd, sabd, off, size); abd = abd_get_offset_scatter(abd, sabd, off, size);
} }

View File

@ -5554,7 +5554,7 @@ static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{ {
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); ASSERT0(HDR_GET_PSIZE(hdr));
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF); ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else { } else {
if (HDR_COMPRESSION_ENABLED(hdr)) { if (HDR_COMPRESSION_ENABLED(hdr)) {
@ -6973,7 +6973,7 @@ arc_write_done(zio_t *zio)
arc_buf_hdr_t *exists; arc_buf_hdr_t *exists;
kmutex_t *hash_lock; kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0); ASSERT0(zio->io_error);
arc_cksum_verify(buf); arc_cksum_verify(buf);

View File

@ -4477,7 +4477,7 @@ dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
ASSERT(MUTEX_HELD(&db->db_mtx)); ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
ASSERT3U(db->db_level, ==, 0); ASSERT0(db->db_level);
if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb; zbookmark_phys_t zb;

View File

@ -397,7 +397,7 @@ ddt_object_create(ddt_t *ddt, ddt_type_t type, ddt_class_t class,
ddt_object_name(ddt, type, class, name); ddt_object_name(ddt, type, class, name);
ASSERT3U(*objectp, ==, 0); ASSERT0(*objectp);
VERIFY0(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash)); VERIFY0(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash));
ASSERT3U(*objectp, !=, 0); ASSERT3U(*objectp, !=, 0);
@ -1421,7 +1421,7 @@ ddt_key_compare(const void *x1, const void *x2)
static void static void
ddt_create_dir(ddt_t *ddt, dmu_tx_t *tx) ddt_create_dir(ddt_t *ddt, dmu_tx_t *tx)
{ {
ASSERT3U(ddt->ddt_dir_object, ==, 0); ASSERT0(ddt->ddt_dir_object);
ASSERT3U(ddt->ddt_version, ==, DDT_VERSION_FDT); ASSERT3U(ddt->ddt_version, ==, DDT_VERSION_FDT);
char name[DDT_NAMELEN]; char name[DDT_NAMELEN];

View File

@ -116,7 +116,7 @@ static void
ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx) ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx)
{ {
ASSERT3U(ddt->ddt_dir_object, >, 0); ASSERT3U(ddt->ddt_dir_object, >, 0);
ASSERT3U(ddl->ddl_object, ==, 0); ASSERT0(ddl->ddl_object);
char name[DDT_NAMELEN]; char name[DDT_NAMELEN];
ddt_log_name(ddt, name, n); ddt_log_name(ddt, name, n);

View File

@ -2013,7 +2013,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
* Only a variable-sized attribute can be * Only a variable-sized attribute can be
* replaced here, and its size must be changing. * replaced here, and its size must be changing.
*/ */
ASSERT3U(reg_length, ==, 0); ASSERT0(reg_length);
ASSERT3U(length, !=, buflen); ASSERT3U(length, !=, buflen);
SA_ADD_BULK_ATTR(attr_desc, j, attr, SA_ADD_BULK_ATTR(attr_desc, j, attr,
locator, datastart, buflen); locator, datastart, buflen);

View File

@ -1295,7 +1295,7 @@ spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) { if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0); ASSERT0(tqs->stqs_count);
return; return;
} }
@ -2021,7 +2021,7 @@ spa_unload_log_sm_flush_all(spa_t *spa)
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND)); VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0); ASSERT0(spa->spa_log_flushall_txg);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx); dmu_tx_commit(tx);

View File

@ -590,7 +590,7 @@ vdev_draid_psize_to_asize(vdev_t *vd, uint64_t psize, uint64_t txg)
uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift; uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift;
ASSERT3U(asize, !=, 0); ASSERT3U(asize, !=, 0);
ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0); ASSERT0(asize % (vdc->vdc_groupwidth));
return (asize); return (asize);
} }
@ -1623,7 +1623,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
SPA_MAXBLOCKSIZE); SPA_MAXBLOCKSIZE);
ASSERT3U(vdev_draid_get_astart(vd, start), ==, start); ASSERT3U(vdev_draid_get_astart(vd, start), ==, start);
ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0); ASSERT0(asize % (vdc->vdc_groupwidth << ashift));
/* Chunks must evenly span all data columns in the group. */ /* Chunks must evenly span all data columns in the group. */
psize = (((psize >> ashift) / ndata) * ndata) << ashift; psize = (((psize >> ashift) / ndata) * ndata) << ashift;
@ -1634,7 +1634,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize,
uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start; uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start;
chunk_size = MIN(chunk_size, left); chunk_size = MIN(chunk_size, left);
ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0); ASSERT0(chunk_size % (vdc->vdc_groupwidth << ashift));
ASSERT3U(vdev_draid_offset_to_group(vd, start), ==, ASSERT3U(vdev_draid_offset_to_group(vd, start), ==,
vdev_draid_offset_to_group(vd, start + chunk_size - 1)); vdev_draid_offset_to_group(vd, start + chunk_size - 1));
@ -2272,7 +2272,7 @@ vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd)
ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks); ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks);
ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT); ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT); ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT);
ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0); ASSERT0(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT);
ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) % ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) %
vdc->vdc_ndisks, ==, 0); vdc->vdc_ndisks, ==, 0);

View File

@ -1546,7 +1546,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
boolean_t trunc = B_FALSE; boolean_t trunc = B_FALSE;
int err = 0; int err = 0;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0); ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries);
ASSERT3U(prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift); ASSERT3U(prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT3U(ZAP_HASH_IDX(hash, prefix_len), ==, prefix); ASSERT3U(ZAP_HASH_IDX(hash, prefix_len), ==, prefix);
@ -1564,7 +1564,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
uint64_t sl_hash = ZAP_PREFIX_HASH(sl_prefix, prefix_len); uint64_t sl_hash = ZAP_PREFIX_HASH(sl_prefix, prefix_len);
int slbit = prefix & 1; int slbit = prefix & 1;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0); ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries);
/* /*
* Check if there is a sibling by reading ptrtbl ptrs. * Check if there is a sibling by reading ptrtbl ptrs.

View File

@ -666,7 +666,7 @@ zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
/* Ensure there are no other locks */ /* Ensure there are no other locks */
ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1); ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);
ASSERT3U(lr->lr_offset, ==, 0); ASSERT0(lr->lr_offset);
ASSERT3U(lr->lr_type, ==, RL_WRITER); ASSERT3U(lr->lr_type, ==, RL_WRITER);
ASSERT(!lr->lr_proxy); ASSERT(!lr->lr_proxy);
ASSERT3U(lr->lr_length, ==, UINT64_MAX); ASSERT3U(lr->lr_length, ==, UINT64_MAX);

View File

@ -2204,7 +2204,7 @@ zvol_fini_impl(void)
rw_destroy(&zvol_state_lock); rw_destroy(&zvol_state_lock);
if (ztqs->tqs_taskq == NULL) { if (ztqs->tqs_taskq == NULL) {
ASSERT3U(ztqs->tqs_cnt, ==, 0); ASSERT0(ztqs->tqs_cnt);
} else { } else {
for (uint_t i = 0; i < ztqs->tqs_cnt; i++) { for (uint_t i = 0; i < ztqs->tqs_cnt; i++) {
ASSERT3P(ztqs->tqs_taskq[i], !=, NULL); ASSERT3P(ztqs->tqs_taskq[i], !=, NULL);