diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index 5ad6a4395..9515a65ff 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -1456,7 +1456,7 @@ get_obsolete_refcount(vdev_t *vd) } } else { ASSERT3P(vd->vdev_obsolete_sm, ==, NULL); - ASSERT3U(obsolete_sm_object, ==, 0); + ASSERT0(obsolete_sm_object); } for (unsigned c = 0; c < vd->vdev_children; c++) { refcount += get_obsolete_refcount(vd->vdev_child[c]); diff --git a/cmd/ztest.c b/cmd/ztest.c index e6e2e78fc..9e94be54f 100644 --- a/cmd/ztest.c +++ b/cmd/ztest.c @@ -4006,7 +4006,7 @@ raidz_scratch_verify(void) * requested by user, but scratch object was not created. */ case RRSS_SCRATCH_NOT_IN_USE: - ASSERT3U(offset, ==, 0); + ASSERT0(offset); break; /* diff --git a/module/os/freebsd/zfs/dmu_os.c b/module/os/freebsd/zfs/dmu_os.c index 364bbfc60..26cc7981b 100644 --- a/module/os/freebsd/zfs/dmu_os.c +++ b/module/os/freebsd/zfs/dmu_os.c @@ -156,7 +156,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count, if (dbp[0]->db_offset != 0 || numbufs > 1) { for (i = 0; i < numbufs; i++) { ASSERT(ISP2(dbp[i]->db_size)); - ASSERT3U((dbp[i]->db_offset % dbp[i]->db_size), ==, 0); + ASSERT0((dbp[i]->db_offset % dbp[i]->db_size)); ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size); } } @@ -175,7 +175,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count, vm_page_sunbusy(m); break; } - ASSERT3U(m->dirty, ==, 0); + ASSERT0(m->dirty); ASSERT(!pmap_page_is_write_mapped(m)); ASSERT3U(db->db_size, >, PAGE_SIZE); @@ -201,7 +201,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count, if (m != bogus_page) { vm_page_assert_xbusied(m); ASSERT(vm_page_none_valid(m)); - ASSERT3U(m->dirty, ==, 0); + ASSERT0(m->dirty); ASSERT(!pmap_page_is_write_mapped(m)); va = zfs_map_page(m, &sf); } @@ -295,7 +295,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count, vm_page_sunbusy(m); break; } - ASSERT3U(m->dirty, ==, 0); + ASSERT0(m->dirty); ASSERT(!pmap_page_is_write_mapped(m)); ASSERT3U(db->db_size, >, PAGE_SIZE); diff --git a/module/os/freebsd/zfs/zfs_dir.c b/module/os/freebsd/zfs/zfs_dir.c index 191df832d..75ba2ea0c 100644 --- a/module/os/freebsd/zfs/zfs_dir.c +++ b/module/os/freebsd/zfs/zfs_dir.c @@ -273,7 +273,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx) zfsvfs_t *zfsvfs = zp->z_zfsvfs; ASSERT(zp->z_unlinked); - ASSERT3U(zp->z_links, ==, 0); + ASSERT0(zp->z_links); VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx)); @@ -437,7 +437,7 @@ zfs_rmnode(znode_t *zp) uint64_t count; int error; - ASSERT3U(zp->z_links, ==, 0); + ASSERT0(zp->z_links); if (zfsvfs->z_replay == B_FALSE) ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); diff --git a/module/os/linux/spl/spl-kmem-cache.c b/module/os/linux/spl/spl-kmem-cache.c index b93b5dec9..0ae74cb70 100644 --- a/module/os/linux/spl/spl-kmem-cache.c +++ b/module/os/linux/spl/spl-kmem-cache.c @@ -863,11 +863,11 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc) * Validate there are no objects in use and free all the * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */ - ASSERT3U(skc->skc_slab_alloc, ==, 0); - ASSERT3U(skc->skc_obj_alloc, ==, 0); - ASSERT3U(skc->skc_slab_total, ==, 0); - ASSERT3U(skc->skc_obj_total, ==, 0); - ASSERT3U(skc->skc_obj_emergency, ==, 0); + ASSERT0(skc->skc_slab_alloc); + ASSERT0(skc->skc_obj_alloc); + ASSERT0(skc->skc_slab_total); + ASSERT0(skc->skc_obj_total); + ASSERT0(skc->skc_obj_emergency); ASSERT(list_empty(&skc->skc_complete_list)); ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); diff --git a/module/os/linux/zfs/zfs_dir.c b/module/os/linux/zfs/zfs_dir.c index 48b8e92e1..e8de53660 100644 --- a/module/os/linux/zfs/zfs_dir.c +++ b/module/os/linux/zfs/zfs_dir.c @@ -986,7 +986,7 @@ zfs_drop_nlink_locked(znode_t *zp, dmu_tx_t *tx, boolean_t *unlinkedp) SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, sizeof (links)); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); - ASSERT3U(error, ==, 0); + ASSERT0(error); if (unlinkedp != NULL) *unlinkedp = unlinked; @@ -1058,7 +1058,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, /* The only error is !zfs_dirempty() and we checked earlier. */ error = zfs_drop_nlink_locked(zp, tx, &unlinked); - ASSERT3U(error, ==, 0); + ASSERT0(error); mutex_exit(&zp->z_lock); } else { error = zfs_dropname(dl, zp, dzp, tx, flag); diff --git a/module/zfs/abd.c b/module/zfs/abd.c index 826928e67..bf9b13c30 100644 --- a/module/zfs/abd.c +++ b/module/zfs/abd.c @@ -563,7 +563,7 @@ abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size) left -= csize; off = 0; } - ASSERT3U(left, ==, 0); + ASSERT0(left); } else { abd = abd_get_offset_scatter(abd, sabd, off, size); } diff --git a/module/zfs/arc.c b/module/zfs/arc.c index c724d0aaa..cbd00e869 100644 --- a/module/zfs/arc.c +++ b/module/zfs/arc.c @@ -5554,7 +5554,7 @@ static void arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) { if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { - ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); + ASSERT0(HDR_GET_PSIZE(hdr)); ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF); } else { if (HDR_COMPRESSION_ENABLED(hdr)) { @@ -6973,7 +6973,7 @@ arc_write_done(zio_t *zio) arc_buf_hdr_t *exists; kmutex_t *hash_lock; - ASSERT3U(zio->io_error, ==, 0); + ASSERT0(zio->io_error); arc_cksum_verify(buf); diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 192cc265b..93a284808 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -4477,7 +4477,7 @@ dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr) ASSERT(MUTEX_HELD(&db->db_mtx)); ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT); - ASSERT3U(db->db_level, ==, 0); + ASSERT0(db->db_level); if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) { zbookmark_phys_t zb; diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c index e0b9fc395..8b29fc1e9 100644 --- a/module/zfs/ddt.c +++ b/module/zfs/ddt.c @@ -397,7 +397,7 @@ ddt_object_create(ddt_t *ddt, ddt_type_t type, ddt_class_t class, ddt_object_name(ddt, type, class, name); - ASSERT3U(*objectp, ==, 0); + ASSERT0(*objectp); VERIFY0(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash)); ASSERT3U(*objectp, !=, 0); @@ -1421,7 +1421,7 @@ ddt_key_compare(const void *x1, const void *x2) static void ddt_create_dir(ddt_t *ddt, dmu_tx_t *tx) { - ASSERT3U(ddt->ddt_dir_object, ==, 0); + ASSERT0(ddt->ddt_dir_object); ASSERT3U(ddt->ddt_version, ==, DDT_VERSION_FDT); char name[DDT_NAMELEN]; diff --git a/module/zfs/ddt_log.c b/module/zfs/ddt_log.c index dbd381aa9..2cf917105 100644 --- a/module/zfs/ddt_log.c +++ b/module/zfs/ddt_log.c @@ -116,7 +116,7 @@ static void ddt_log_create_one(ddt_t *ddt, ddt_log_t *ddl, uint_t n, dmu_tx_t *tx) { ASSERT3U(ddt->ddt_dir_object, >, 0); - ASSERT3U(ddl->ddl_object, ==, 0); + ASSERT0(ddl->ddl_object); char name[DDT_NAMELEN]; ddt_log_name(ddt, name, n); diff --git a/module/zfs/sa.c b/module/zfs/sa.c index c14d30334..7ad25d4d8 100644 --- a/module/zfs/sa.c +++ b/module/zfs/sa.c @@ -2013,7 +2013,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr, * Only a variable-sized attribute can be * replaced here, and its size must be changing. */ - ASSERT3U(reg_length, ==, 0); + ASSERT0(reg_length); ASSERT3U(length, !=, buflen); SA_ADD_BULK_ATTR(attr_desc, j, attr, locator, datastart, buflen); diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 946b1782a..a7271d2a3 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -1295,7 +1295,7 @@ spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; if (tqs->stqs_taskq == NULL) { - ASSERT3U(tqs->stqs_count, ==, 0); + ASSERT0(tqs->stqs_count); return; } @@ -2021,7 +2021,7 @@ spa_unload_log_sm_flush_all(spa_t *spa) dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT | DMU_TX_SUSPEND)); - ASSERT3U(spa->spa_log_flushall_txg, ==, 0); + ASSERT0(spa->spa_log_flushall_txg); spa->spa_log_flushall_txg = dmu_tx_get_txg(tx); dmu_tx_commit(tx); diff --git a/module/zfs/vdev_draid.c b/module/zfs/vdev_draid.c index feec5fd3c..86a0b0fb8 100644 --- a/module/zfs/vdev_draid.c +++ b/module/zfs/vdev_draid.c @@ -590,7 +590,7 @@ vdev_draid_psize_to_asize(vdev_t *vd, uint64_t psize, uint64_t txg) uint64_t asize = (rows * vdc->vdc_groupwidth) << ashift; ASSERT3U(asize, !=, 0); - ASSERT3U(asize % (vdc->vdc_groupwidth), ==, 0); + ASSERT0(asize % (vdc->vdc_groupwidth)); return (asize); } @@ -1623,7 +1623,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize, SPA_MAXBLOCKSIZE); ASSERT3U(vdev_draid_get_astart(vd, start), ==, start); - ASSERT3U(asize % (vdc->vdc_groupwidth << ashift), ==, 0); + ASSERT0(asize % (vdc->vdc_groupwidth << ashift)); /* Chunks must evenly span all data columns in the group. */ psize = (((psize >> ashift) / ndata) * ndata) << ashift; @@ -1634,7 +1634,7 @@ vdev_draid_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize, uint64_t left = vdev_draid_group_to_offset(vd, group + 1) - start; chunk_size = MIN(chunk_size, left); - ASSERT3U(chunk_size % (vdc->vdc_groupwidth << ashift), ==, 0); + ASSERT0(chunk_size % (vdc->vdc_groupwidth << ashift)); ASSERT3U(vdev_draid_offset_to_group(vd, start), ==, vdev_draid_offset_to_group(vd, start + chunk_size - 1)); @@ -2272,7 +2272,7 @@ vdev_draid_init(spa_t *spa, nvlist_t *nv, void **tsd) ASSERT3U(vdc->vdc_groupwidth, <=, vdc->vdc_ndisks); ASSERT3U(vdc->vdc_groupsz, >=, 2 * VDEV_DRAID_ROWHEIGHT); ASSERT3U(vdc->vdc_devslicesz, >=, VDEV_DRAID_ROWHEIGHT); - ASSERT3U(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT, ==, 0); + ASSERT0(vdc->vdc_devslicesz % VDEV_DRAID_ROWHEIGHT); ASSERT3U((vdc->vdc_groupwidth * vdc->vdc_ngroups) % vdc->vdc_ndisks, ==, 0); diff --git a/module/zfs/zap.c b/module/zfs/zap.c index 8de79a056..3e4e99779 100644 --- a/module/zfs/zap.c +++ b/module/zfs/zap.c @@ -1546,7 +1546,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx) boolean_t trunc = B_FALSE; int err = 0; - ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0); + ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries); ASSERT3U(prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift); ASSERT(RW_LOCK_HELD(&zap->zap_rwlock)); ASSERT3U(ZAP_HASH_IDX(hash, prefix_len), ==, prefix); @@ -1564,7 +1564,7 @@ zap_shrink(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx) uint64_t sl_hash = ZAP_PREFIX_HASH(sl_prefix, prefix_len); int slbit = prefix & 1; - ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_nentries, ==, 0); + ASSERT0(zap_leaf_phys(l)->l_hdr.lh_nentries); /* * Check if there is a sibling by reading ptrtbl ptrs. diff --git a/module/zfs/zfs_rlock.c b/module/zfs/zfs_rlock.c index 53eb3ef1b..4035baff7 100644 --- a/module/zfs/zfs_rlock.c +++ b/module/zfs/zfs_rlock.c @@ -666,7 +666,7 @@ zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len) /* Ensure there are no other locks */ ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1); - ASSERT3U(lr->lr_offset, ==, 0); + ASSERT0(lr->lr_offset); ASSERT3U(lr->lr_type, ==, RL_WRITER); ASSERT(!lr->lr_proxy); ASSERT3U(lr->lr_length, ==, UINT64_MAX); diff --git a/module/zfs/zvol.c b/module/zfs/zvol.c index 91c962f35..f1922f1d5 100644 --- a/module/zfs/zvol.c +++ b/module/zfs/zvol.c @@ -2204,7 +2204,7 @@ zvol_fini_impl(void) rw_destroy(&zvol_state_lock); if (ztqs->tqs_taskq == NULL) { - ASSERT3U(ztqs->tqs_cnt, ==, 0); + ASSERT0(ztqs->tqs_cnt); } else { for (uint_t i = 0; i < ztqs->tqs_cnt; i++) { ASSERT3P(ztqs->tqs_taskq[i], !=, NULL);