mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2026-01-14 17:22:05 +03:00
Prefer VERIFY0P(n) over VERIFY(n == NULL)
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Alexander Motin <alexander.motin@TrueNAS.com> Signed-off-by: Rob Norris <robn@despairlabs.com> Sponsored-by: https://despairlabs.com/sponsor/ Closes #17591
This commit is contained in:
parent
611b95da18
commit
f7bdd84328
@ -3109,7 +3109,7 @@ dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
|
||||
static int
|
||||
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
|
||||
{
|
||||
ASSERT(arg == NULL);
|
||||
ASSERT0P(arg);
|
||||
if (dump_opt['d'] >= 5) {
|
||||
char buf[128];
|
||||
(void) snprintf(buf, sizeof (buf),
|
||||
@ -9743,7 +9743,7 @@ main(int argc, char **argv)
|
||||
if (error == 0) {
|
||||
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
|
||||
ASSERT(checkpoint_pool != NULL);
|
||||
ASSERT(checkpoint_target == NULL);
|
||||
ASSERT0P(checkpoint_target);
|
||||
|
||||
error = spa_open(checkpoint_pool, &spa, FTAG);
|
||||
if (error != 0) {
|
||||
|
||||
@ -77,7 +77,7 @@ do_thread_create(caddr_t stk, size_t stksize, void (*proc)(void *), void *arg,
|
||||
/*
|
||||
* Be sure there are no surprises.
|
||||
*/
|
||||
ASSERT(stk == NULL);
|
||||
ASSERT0P(stk);
|
||||
ASSERT0(len);
|
||||
ASSERT(state == TS_RUN);
|
||||
|
||||
|
||||
@ -742,8 +742,8 @@ dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync,
|
||||
dmu_buf_evict_func_t *evict_func_async,
|
||||
dmu_buf_t **clear_on_evict_dbufp __maybe_unused)
|
||||
{
|
||||
ASSERT(dbu->dbu_evict_func_sync == NULL);
|
||||
ASSERT(dbu->dbu_evict_func_async == NULL);
|
||||
ASSERT0P(dbu->dbu_evict_func_sync);
|
||||
ASSERT0P(dbu->dbu_evict_func_async);
|
||||
|
||||
/* must have at least one evict func */
|
||||
IMPLY(evict_func_sync == NULL, evict_func_async != NULL);
|
||||
|
||||
@ -5560,7 +5560,7 @@ zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
|
||||
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
|
||||
*cp = '\0';
|
||||
sendfs = nonpackage_sendfs;
|
||||
VERIFY(finalsnap == NULL);
|
||||
VERIFY0P(finalsnap);
|
||||
}
|
||||
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
|
||||
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
|
||||
|
||||
@ -225,7 +225,7 @@ avl_nearest(avl_tree_t *tree, avl_index_t where, int direction)
|
||||
size_t off = tree->avl_offset;
|
||||
|
||||
if (node == NULL) {
|
||||
ASSERT(tree->avl_root == NULL);
|
||||
ASSERT0P(tree->avl_root);
|
||||
return (NULL);
|
||||
}
|
||||
data = AVL_NODE2DATA(node, off);
|
||||
@ -495,10 +495,10 @@ avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where)
|
||||
AVL_SETBALANCE(node, 0);
|
||||
AVL_SETPARENT(node, parent);
|
||||
if (parent != NULL) {
|
||||
ASSERT(parent->avl_child[which_child] == NULL);
|
||||
ASSERT0P(parent->avl_child[which_child]);
|
||||
parent->avl_child[which_child] = node;
|
||||
} else {
|
||||
ASSERT(tree->avl_root == NULL);
|
||||
ASSERT0P(tree->avl_root);
|
||||
tree->avl_root = node;
|
||||
}
|
||||
/*
|
||||
@ -608,7 +608,7 @@ avl_insert_here(
|
||||
ASSERT(diff > 0 ? child == 1 : child == 0);
|
||||
#endif
|
||||
}
|
||||
ASSERT(node->avl_child[child] == NULL);
|
||||
ASSERT0P(node->avl_child[child]);
|
||||
|
||||
avl_insert(tree, new_data, AVL_MKINDEX(node, child));
|
||||
}
|
||||
@ -898,7 +898,7 @@ avl_destroy(avl_tree_t *tree)
|
||||
{
|
||||
ASSERT(tree);
|
||||
ASSERT0(tree->avl_numnodes);
|
||||
ASSERT(tree->avl_root == NULL);
|
||||
ASSERT0P(tree->avl_root);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -66,9 +66,9 @@ void
|
||||
__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
|
||||
{
|
||||
ASSERT(cvp);
|
||||
ASSERT(name == NULL);
|
||||
ASSERT0P(name);
|
||||
ASSERT(type == CV_DEFAULT);
|
||||
ASSERT(arg == NULL);
|
||||
ASSERT0P(arg);
|
||||
|
||||
cvp->cv_magic = CV_MAGIC;
|
||||
init_waitqueue_head(&cvp->cv_event);
|
||||
@ -83,7 +83,7 @@ static int
|
||||
cv_destroy_wakeup(kcondvar_t *cvp)
|
||||
{
|
||||
if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
|
||||
ASSERT(cvp->cv_mutex == NULL);
|
||||
ASSERT0P(cvp->cv_mutex);
|
||||
ASSERT(!waitqueue_active(&cvp->cv_event));
|
||||
return (1);
|
||||
}
|
||||
|
||||
@ -679,8 +679,8 @@ spl_kmem_cache_create(const char *name, size_t size, size_t align,
|
||||
/*
|
||||
* Unsupported flags
|
||||
*/
|
||||
ASSERT(vmp == NULL);
|
||||
ASSERT(reclaim == NULL);
|
||||
ASSERT0P(vmp);
|
||||
ASSERT0P(reclaim);
|
||||
|
||||
might_sleep();
|
||||
|
||||
|
||||
@ -80,7 +80,7 @@ __thread_create(caddr_t stk, size_t stksize, thread_func_t func,
|
||||
|
||||
/* Option pp is simply ignored */
|
||||
/* Variable stack size unsupported */
|
||||
ASSERT(stk == NULL);
|
||||
ASSERT0P(stk);
|
||||
|
||||
tp = kmem_alloc(sizeof (thread_priv_t), KM_PUSHPAGE);
|
||||
if (tp == NULL)
|
||||
|
||||
@ -2205,7 +2205,7 @@ top:
|
||||
|
||||
error = zfs_aclset_common(zp, aclp, cr, tx);
|
||||
ASSERT0(error);
|
||||
ASSERT(zp->z_acl_cached == NULL);
|
||||
ASSERT0P(zp->z_acl_cached);
|
||||
zp->z_acl_cached = aclp;
|
||||
|
||||
if (fuid_dirtied)
|
||||
|
||||
@ -590,7 +590,7 @@ zfsctl_inode_lookup(zfsvfs_t *zfsvfs, uint64_t id,
|
||||
int
|
||||
zfsctl_create(zfsvfs_t *zfsvfs)
|
||||
{
|
||||
ASSERT(zfsvfs->z_ctldir == NULL);
|
||||
ASSERT0P(zfsvfs->z_ctldir);
|
||||
|
||||
zfsvfs->z_ctldir = zfsctl_inode_alloc(zfsvfs, ZFSCTL_INO_ROOT,
|
||||
&zpl_fops_root, &zpl_ops_root, 0);
|
||||
|
||||
@ -223,7 +223,7 @@ zfs_kobj_add(zfs_mod_kobj_t *zkobj, struct kobject *parent, const char *name)
|
||||
{
|
||||
/* zko_default_group.attrs must be NULL terminated */
|
||||
ASSERT(zkobj->zko_default_group.attrs != NULL);
|
||||
ASSERT(zkobj->zko_default_group.attrs[zkobj->zko_attr_count] == NULL);
|
||||
ASSERT0P(zkobj->zko_default_group.attrs[zkobj->zko_attr_count]);
|
||||
|
||||
kobject_init(&zkobj->zko_kobj, &zkobj->zko_kobj_type);
|
||||
return (kobject_add(&zkobj->zko_kobj, parent, name));
|
||||
|
||||
@ -178,13 +178,13 @@ zfs_znode_init(void)
|
||||
* backed by kmalloc() when on the Linux slab in order that any
|
||||
* wait_on_bit() operations on the related inode operate properly.
|
||||
*/
|
||||
ASSERT(znode_cache == NULL);
|
||||
ASSERT0P(znode_cache);
|
||||
znode_cache = kmem_cache_create("zfs_znode_cache",
|
||||
sizeof (znode_t), 0, zfs_znode_cache_constructor,
|
||||
zfs_znode_cache_destructor, NULL, NULL, NULL,
|
||||
KMC_SLAB | KMC_RECLAIMABLE);
|
||||
|
||||
ASSERT(znode_hold_cache == NULL);
|
||||
ASSERT0P(znode_hold_cache);
|
||||
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
|
||||
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
|
||||
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
|
||||
@ -327,8 +327,8 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
|
||||
|
||||
mutex_enter(&zp->z_lock);
|
||||
|
||||
ASSERT(zp->z_sa_hdl == NULL);
|
||||
ASSERT(zp->z_acl_cached == NULL);
|
||||
ASSERT0P(zp->z_sa_hdl);
|
||||
ASSERT0P(zp->z_acl_cached);
|
||||
if (sa_hdl == NULL) {
|
||||
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
|
||||
SA_HDL_SHARED, &zp->z_sa_hdl));
|
||||
@ -530,7 +530,7 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
|
||||
return (NULL);
|
||||
|
||||
zp = ITOZ(ip);
|
||||
ASSERT(zp->z_dirlocks == NULL);
|
||||
ASSERT0P(zp->z_dirlocks);
|
||||
ASSERT3P(zp->z_acl_cached, ==, NULL);
|
||||
ASSERT3P(zp->z_xattr_cached, ==, NULL);
|
||||
zp->z_unlinked = B_FALSE;
|
||||
@ -1200,7 +1200,7 @@ zfs_rezget(znode_t *zp)
|
||||
}
|
||||
rw_exit(&zp->z_xattr_lock);
|
||||
|
||||
ASSERT(zp->z_sa_hdl == NULL);
|
||||
ASSERT0P(zp->z_sa_hdl);
|
||||
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
|
||||
if (err) {
|
||||
zfs_znode_hold_exit(zfsvfs, zh);
|
||||
|
||||
@ -3796,7 +3796,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, uint64_t *real_evicted)
|
||||
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
|
||||
|
||||
if (HDR_HAS_L2HDR(hdr)) {
|
||||
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
|
||||
ASSERT0P(hdr->b_l1hdr.b_pabd);
|
||||
ASSERT(!HDR_HAS_RABD(hdr));
|
||||
/*
|
||||
* This buffer is cached on the 2nd Level ARC;
|
||||
|
||||
@ -160,8 +160,8 @@ bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
|
||||
memset(bpo, 0, sizeof (*bpo));
|
||||
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
|
||||
ASSERT(bpo->bpo_dbuf == NULL);
|
||||
ASSERT(bpo->bpo_phys == NULL);
|
||||
ASSERT0P(bpo->bpo_dbuf);
|
||||
ASSERT0P(bpo->bpo_phys);
|
||||
ASSERT(object != 0);
|
||||
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
|
||||
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
|
||||
|
||||
@ -1128,8 +1128,8 @@ dbuf_verify(dmu_buf_impl_t *db)
|
||||
DB_DNODE_ENTER(db);
|
||||
dn = DB_DNODE(db);
|
||||
if (dn == NULL) {
|
||||
ASSERT(db->db_parent == NULL);
|
||||
ASSERT(db->db_blkptr == NULL);
|
||||
ASSERT0P(db->db_parent);
|
||||
ASSERT0P(db->db_blkptr);
|
||||
} else {
|
||||
ASSERT3U(db->db.db_object, ==, dn->dn_object);
|
||||
ASSERT3P(db->db_objset, ==, dn->dn_objset);
|
||||
@ -1180,7 +1180,7 @@ dbuf_verify(dmu_buf_impl_t *db)
|
||||
/* db is pointed to by the dnode */
|
||||
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
|
||||
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
|
||||
ASSERT(db->db_parent == NULL);
|
||||
ASSERT0P(db->db_parent);
|
||||
else
|
||||
ASSERT(db->db_parent != NULL);
|
||||
if (db->db_blkid != DMU_SPILL_BLKID)
|
||||
@ -1384,8 +1384,8 @@ dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
|
||||
* All reads are synchronous, so we must have a hold on the dbuf
|
||||
*/
|
||||
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT(db->db.db_data == NULL);
|
||||
ASSERT0P(db->db_buf);
|
||||
ASSERT0P(db->db.db_data);
|
||||
if (buf == NULL) {
|
||||
/* i/o error */
|
||||
ASSERT(zio == NULL || zio->io_error != 0);
|
||||
@ -1584,7 +1584,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, dnode_t *dn, zio_t *zio, dmu_flags_t flags,
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(MUTEX_HELD(&db->db_mtx));
|
||||
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT0P(db->db_buf);
|
||||
ASSERT(db->db_parent == NULL ||
|
||||
RW_LOCK_HELD(&db->db_parent->db_rwlock));
|
||||
|
||||
@ -1901,8 +1901,8 @@ dbuf_noread(dmu_buf_impl_t *db, dmu_flags_t flags)
|
||||
while (db->db_state == DB_READ || db->db_state == DB_FILL)
|
||||
cv_wait(&db->db_changed, &db->db_mtx);
|
||||
if (db->db_state == DB_UNCACHED) {
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT(db->db.db_data == NULL);
|
||||
ASSERT0P(db->db_buf);
|
||||
ASSERT0P(db->db.db_data);
|
||||
dbuf_set_data(db, dbuf_alloc_arcbuf(db));
|
||||
db->db_state = DB_FILL;
|
||||
DTRACE_SET_STATE(db, "assigning filled buffer");
|
||||
@ -2017,7 +2017,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
|
||||
if (db->db_state == DB_UNCACHED ||
|
||||
db->db_state == DB_NOFILL ||
|
||||
db->db_state == DB_EVICTING) {
|
||||
ASSERT(db->db.db_data == NULL);
|
||||
ASSERT0P(db->db.db_data);
|
||||
mutex_exit(&db->db_mtx);
|
||||
continue;
|
||||
}
|
||||
@ -3209,7 +3209,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
|
||||
VERIFY(!dbuf_undirty(db, tx));
|
||||
db->db_state = DB_UNCACHED;
|
||||
}
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT0P(db->db_buf);
|
||||
dbuf_set_data(db, buf);
|
||||
db->db_state = DB_FILL;
|
||||
DTRACE_SET_STATE(db, "filling assigned arcbuf");
|
||||
@ -3269,7 +3269,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
}
|
||||
|
||||
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
|
||||
ASSERT(db->db_data_pending == NULL);
|
||||
ASSERT0P(db->db_data_pending);
|
||||
ASSERT(list_is_empty(&db->db_dirty_records));
|
||||
|
||||
db->db_state = DB_EVICTING;
|
||||
@ -3321,11 +3321,11 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
|
||||
db->db_parent = NULL;
|
||||
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT(db->db.db_data == NULL);
|
||||
ASSERT(db->db_hash_next == NULL);
|
||||
ASSERT(db->db_blkptr == NULL);
|
||||
ASSERT(db->db_data_pending == NULL);
|
||||
ASSERT0P(db->db_buf);
|
||||
ASSERT0P(db->db.db_data);
|
||||
ASSERT0P(db->db_hash_next);
|
||||
ASSERT0P(db->db_blkptr);
|
||||
ASSERT0P(db->db_data_pending);
|
||||
ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
|
||||
ASSERT(!multilist_link_active(&db->db_cache_link));
|
||||
|
||||
@ -4064,7 +4064,7 @@ dbuf_create_bonus(dnode_t *dn)
|
||||
{
|
||||
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
|
||||
|
||||
ASSERT(dn->dn_bonus == NULL);
|
||||
ASSERT0P(dn->dn_bonus);
|
||||
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
|
||||
dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
|
||||
dn->dn_bonus->db_pending_evict = FALSE;
|
||||
@ -4416,7 +4416,7 @@ dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
|
||||
* inappropriate to hook it in (i.e., nlevels mismatch).
|
||||
*/
|
||||
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
|
||||
ASSERT(db->db_parent == NULL);
|
||||
ASSERT0P(db->db_parent);
|
||||
db->db_parent = dn->dn_dbuf;
|
||||
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
|
||||
DBUF_VERIFY(db);
|
||||
|
||||
@ -724,7 +724,7 @@ dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
|
||||
|
||||
if (err == 0) {
|
||||
mutex_enter(&ds->ds_lock);
|
||||
ASSERT(ds->ds_objset == NULL);
|
||||
ASSERT0P(ds->ds_objset);
|
||||
ds->ds_objset = os;
|
||||
mutex_exit(&ds->ds_lock);
|
||||
}
|
||||
|
||||
@ -318,7 +318,7 @@ dnode_kstats_update(kstat_t *ksp, int rw)
|
||||
void
|
||||
dnode_init(void)
|
||||
{
|
||||
ASSERT(dnode_cache == NULL);
|
||||
ASSERT0P(dnode_cache);
|
||||
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
|
||||
0, dnode_cons, dnode_dest, NULL, NULL, NULL, KMC_RECLAIMABLE);
|
||||
kmem_cache_set_move(dnode_cache, dnode_move);
|
||||
|
||||
@ -450,7 +450,7 @@ dsl_dataset_evict_sync(void *dbu)
|
||||
{
|
||||
dsl_dataset_t *ds = dbu;
|
||||
|
||||
ASSERT(ds->ds_owner == NULL);
|
||||
ASSERT0P(ds->ds_owner);
|
||||
|
||||
unique_remove(ds->ds_fsid_guid);
|
||||
}
|
||||
@ -460,7 +460,7 @@ dsl_dataset_evict_async(void *dbu)
|
||||
{
|
||||
dsl_dataset_t *ds = dbu;
|
||||
|
||||
ASSERT(ds->ds_owner == NULL);
|
||||
ASSERT0P(ds->ds_owner);
|
||||
|
||||
ds->ds_dbuf = NULL;
|
||||
|
||||
|
||||
@ -1037,7 +1037,7 @@ dsl_livelist_iterate(void *arg, const blkptr_t *bp, boolean_t bp_freed,
|
||||
avl_tree_t *avl = lia->avl;
|
||||
bplist_t *to_free = lia->to_free;
|
||||
zthr_t *t = lia->t;
|
||||
ASSERT(tx == NULL);
|
||||
ASSERT0P(tx);
|
||||
|
||||
if ((t != NULL) && (zthr_has_waiters(t) || zthr_iscancelled(t)))
|
||||
return (SET_ERROR(EINTR));
|
||||
|
||||
@ -728,7 +728,7 @@ kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
|
||||
*/
|
||||
dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
|
||||
} else {
|
||||
ASSERT(zilog == NULL);
|
||||
ASSERT0P(zilog);
|
||||
ASSERT3U(BP_GET_BIRTH(bp), >,
|
||||
dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
|
||||
(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
|
||||
|
||||
@ -1077,7 +1077,7 @@ upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
|
||||
dsl_dataset_phys(prev)->ds_num_children++;
|
||||
|
||||
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
|
||||
ASSERT(ds->ds_prev == NULL);
|
||||
ASSERT0P(ds->ds_prev);
|
||||
VERIFY0(dsl_dataset_hold_obj(dp,
|
||||
dsl_dataset_phys(ds)->ds_prev_snap_obj,
|
||||
ds, &ds->ds_prev));
|
||||
@ -1173,7 +1173,7 @@ dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
|
||||
dsl_dataset_t *ds;
|
||||
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
ASSERT(dp->dp_origin_snap == NULL);
|
||||
ASSERT0P(dp->dp_origin_snap);
|
||||
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
|
||||
|
||||
/* create the origin dir, ds, & snap-ds */
|
||||
|
||||
@ -391,7 +391,7 @@ static kstat_t *metaslab_ksp;
|
||||
void
|
||||
metaslab_stat_init(void)
|
||||
{
|
||||
ASSERT(metaslab_alloc_trace_cache == NULL);
|
||||
ASSERT0P(metaslab_alloc_trace_cache);
|
||||
metaslab_alloc_trace_cache = kmem_cache_create(
|
||||
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
|
||||
0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
@ -465,7 +465,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
|
||||
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
|
||||
avl_destroy(&mca->mca_tree);
|
||||
mutex_destroy(&mca->mca_lock);
|
||||
ASSERT(mca->mca_rotor == NULL);
|
||||
ASSERT0P(mca->mca_rotor);
|
||||
ASSERT0(mca->mca_reserved);
|
||||
}
|
||||
mutex_destroy(&mc->mc_lock);
|
||||
@ -1087,8 +1087,8 @@ metaslab_group_destroy(metaslab_group_t *mg)
|
||||
{
|
||||
spa_t *spa = mg->mg_class->mc_spa;
|
||||
|
||||
ASSERT(mg->mg_prev == NULL);
|
||||
ASSERT(mg->mg_next == NULL);
|
||||
ASSERT0P(mg->mg_prev);
|
||||
ASSERT0P(mg->mg_next);
|
||||
/*
|
||||
* We may have gone below zero with the activation count
|
||||
* either because we never activated in the first place or
|
||||
@ -1118,8 +1118,8 @@ metaslab_group_activate(metaslab_group_t *mg)
|
||||
|
||||
ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
|
||||
|
||||
ASSERT(mg->mg_prev == NULL);
|
||||
ASSERT(mg->mg_next == NULL);
|
||||
ASSERT0P(mg->mg_prev);
|
||||
ASSERT0P(mg->mg_next);
|
||||
ASSERT(mg->mg_activation_count <= 0);
|
||||
|
||||
if (++mg->mg_activation_count <= 0)
|
||||
@ -1164,8 +1164,8 @@ metaslab_group_passivate(metaslab_group_t *mg)
|
||||
if (--mg->mg_activation_count != 0) {
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++)
|
||||
ASSERT(mc->mc_allocator[i].mca_rotor != mg);
|
||||
ASSERT(mg->mg_prev == NULL);
|
||||
ASSERT(mg->mg_next == NULL);
|
||||
ASSERT0P(mg->mg_prev);
|
||||
ASSERT0P(mg->mg_next);
|
||||
ASSERT(mg->mg_activation_count < 0);
|
||||
return;
|
||||
}
|
||||
@ -1345,7 +1345,7 @@ metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
|
||||
static void
|
||||
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
|
||||
{
|
||||
ASSERT(msp->ms_group == NULL);
|
||||
ASSERT0P(msp->ms_group);
|
||||
mutex_enter(&mg->mg_lock);
|
||||
msp->ms_group = mg;
|
||||
msp->ms_weight = 0;
|
||||
@ -3017,7 +3017,7 @@ metaslab_fini(metaslab_t *msp)
|
||||
metaslab_group_remove(mg, msp);
|
||||
|
||||
mutex_enter(&msp->ms_lock);
|
||||
VERIFY(msp->ms_group == NULL);
|
||||
VERIFY0P(msp->ms_group);
|
||||
|
||||
/*
|
||||
* If this metaslab hasn't been through metaslab_sync_done() yet its
|
||||
|
||||
@ -260,7 +260,7 @@ mmp_thread_stop(spa_t *spa)
|
||||
zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
|
||||
spa_name(spa), gethrtime());
|
||||
|
||||
ASSERT(mmp->mmp_thread == NULL);
|
||||
ASSERT0P(mmp->mmp_thread);
|
||||
mmp->mmp_thread_exiting = 0;
|
||||
}
|
||||
|
||||
|
||||
@ -155,7 +155,7 @@ rrw_destroy(rrwlock_t *rrl)
|
||||
{
|
||||
mutex_destroy(&rrl->rr_lock);
|
||||
cv_destroy(&rrl->rr_cv);
|
||||
ASSERT(rrl->rr_writer == NULL);
|
||||
ASSERT0P(rrl->rr_writer);
|
||||
zfs_refcount_destroy(&rrl->rr_anon_rcount);
|
||||
zfs_refcount_destroy(&rrl->rr_linked_rcount);
|
||||
}
|
||||
@ -188,7 +188,7 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, const void *tag)
|
||||
} else {
|
||||
(void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
|
||||
}
|
||||
ASSERT(rrl->rr_writer == NULL);
|
||||
ASSERT0P(rrl->rr_writer);
|
||||
mutex_exit(&rrl->rr_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -1836,9 +1836,9 @@ static void
|
||||
spa_deactivate(spa_t *spa)
|
||||
{
|
||||
ASSERT(spa->spa_sync_on == B_FALSE);
|
||||
ASSERT(spa->spa_dsl_pool == NULL);
|
||||
ASSERT(spa->spa_root_vdev == NULL);
|
||||
ASSERT(spa->spa_async_zio_root == NULL);
|
||||
ASSERT0P(spa->spa_dsl_pool);
|
||||
ASSERT0P(spa->spa_root_vdev);
|
||||
ASSERT0P(spa->spa_async_zio_root);
|
||||
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
|
||||
|
||||
spa_evicting_os_wait(spa);
|
||||
@ -2280,7 +2280,7 @@ spa_unload(spa_t *spa)
|
||||
*/
|
||||
if (spa->spa_root_vdev)
|
||||
vdev_free(spa->spa_root_vdev);
|
||||
ASSERT(spa->spa_root_vdev == NULL);
|
||||
ASSERT0P(spa->spa_root_vdev);
|
||||
|
||||
/*
|
||||
* Close the dsl pool.
|
||||
@ -3275,7 +3275,7 @@ static int
|
||||
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
|
||||
dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT(tx == NULL);
|
||||
ASSERT0P(tx);
|
||||
livelist_new_arg_t *lna = arg;
|
||||
if (bp_freed) {
|
||||
bplist_append(lna->frees, bp);
|
||||
@ -4091,11 +4091,11 @@ spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
|
||||
nvlist_free(spa->spa_load_info);
|
||||
spa->spa_load_info = fnvlist_alloc();
|
||||
|
||||
ASSERT(spa->spa_comment == NULL);
|
||||
ASSERT0P(spa->spa_comment);
|
||||
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
|
||||
spa->spa_comment = spa_strdup(comment);
|
||||
|
||||
ASSERT(spa->spa_compatibility == NULL);
|
||||
ASSERT0P(spa->spa_compatibility);
|
||||
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
|
||||
&compatibility) == 0)
|
||||
spa->spa_compatibility = spa_strdup(compatibility);
|
||||
@ -10541,7 +10541,7 @@ spa_sync_tq_create(spa_t *spa, const char *name)
|
||||
{
|
||||
kthread_t **kthreads;
|
||||
|
||||
ASSERT(spa->spa_sync_tq == NULL);
|
||||
ASSERT0P(spa->spa_sync_tq);
|
||||
ASSERT3S(spa->spa_alloc_count, <=, boot_ncpus);
|
||||
|
||||
/*
|
||||
|
||||
@ -471,7 +471,7 @@ spa_config_lock_destroy(spa_t *spa)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
mutex_destroy(&scl->scl_lock);
|
||||
cv_destroy(&scl->scl_cv);
|
||||
ASSERT(scl->scl_writer == NULL);
|
||||
ASSERT0P(scl->scl_writer);
|
||||
ASSERT0(scl->scl_write_wanted);
|
||||
ASSERT0(scl->scl_count);
|
||||
}
|
||||
|
||||
@ -817,7 +817,7 @@ space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
|
||||
space_map_t *sm;
|
||||
int error;
|
||||
|
||||
ASSERT(*smp == NULL);
|
||||
ASSERT0P(*smp);
|
||||
ASSERT(os != NULL);
|
||||
ASSERT(object != 0);
|
||||
|
||||
|
||||
@ -554,7 +554,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
|
||||
vdev_t **newchild;
|
||||
|
||||
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
|
||||
ASSERT(cvd->vdev_parent == NULL);
|
||||
ASSERT0P(cvd->vdev_parent);
|
||||
|
||||
cvd->vdev_parent = pvd;
|
||||
|
||||
@ -578,7 +578,7 @@ vdev_add_child(vdev_t *pvd, vdev_t *cvd)
|
||||
pvd->vdev_nonrot &= cvd->vdev_nonrot;
|
||||
|
||||
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
|
||||
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
|
||||
ASSERT0P(cvd->vdev_top->vdev_parent->vdev_parent);
|
||||
|
||||
/*
|
||||
* Walk up all ancestors to update guid sum.
|
||||
@ -1133,7 +1133,7 @@ vdev_free(vdev_t *vd)
|
||||
for (int c = 0; c < vd->vdev_children; c++)
|
||||
vdev_free(vd->vdev_child[c]);
|
||||
|
||||
ASSERT(vd->vdev_child == NULL);
|
||||
ASSERT0P(vd->vdev_child);
|
||||
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
|
||||
|
||||
if (vd->vdev_ops->vdev_op_fini != NULL)
|
||||
@ -1162,7 +1162,7 @@ vdev_free(vdev_t *vd)
|
||||
*/
|
||||
vdev_remove_child(vd->vdev_parent, vd);
|
||||
|
||||
ASSERT(vd->vdev_parent == NULL);
|
||||
ASSERT0P(vd->vdev_parent);
|
||||
ASSERT(!list_link_active(&vd->vdev_leaf_node));
|
||||
|
||||
/*
|
||||
|
||||
@ -8143,7 +8143,7 @@ zfsdev_ioctl_common(uint_t vecnum, zfs_cmd_t *zc, int flag)
|
||||
spa_t *spa;
|
||||
nvlist_t *lognv = NULL;
|
||||
|
||||
ASSERT(vec->zvec_legacy_func == NULL);
|
||||
ASSERT0P(vec->zvec_legacy_func);
|
||||
|
||||
/*
|
||||
* Add the innvl to the lognv before calling the func,
|
||||
|
||||
@ -1250,7 +1250,7 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
|
||||
blkptr_t *bp;
|
||||
int error;
|
||||
|
||||
ASSERT(tx == NULL);
|
||||
ASSERT0P(tx);
|
||||
|
||||
error = dmu_objset_from_ds(ds, &os);
|
||||
if (error != 0) {
|
||||
|
||||
@ -821,7 +821,7 @@ zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
|
||||
boolean_t waiting = B_FALSE;
|
||||
|
||||
mutex_enter(&zio->io_lock);
|
||||
ASSERT(zio->io_stall == NULL);
|
||||
ASSERT0P(zio->io_stall);
|
||||
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
|
||||
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
|
||||
continue;
|
||||
@ -1941,7 +1941,7 @@ zio_write_compress(zio_t *zio)
|
||||
}
|
||||
|
||||
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
|
||||
ASSERT(zio->io_bp_override == NULL);
|
||||
ASSERT0P(zio->io_bp_override);
|
||||
|
||||
if (!BP_IS_HOLE(bp) && BP_GET_BIRTH(bp) == zio->io_txg) {
|
||||
/*
|
||||
@ -2436,7 +2436,7 @@ __zio_execute(zio_t *zio)
|
||||
|
||||
ASSERT(!MUTEX_HELD(&zio->io_lock));
|
||||
ASSERT(ISP2(stage));
|
||||
ASSERT(zio->io_stall == NULL);
|
||||
ASSERT0P(zio->io_stall);
|
||||
|
||||
do {
|
||||
stage <<= 1;
|
||||
@ -2590,8 +2590,8 @@ zio_reexecute(void *arg)
|
||||
|
||||
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
|
||||
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
|
||||
ASSERT(pio->io_gang_leader == NULL);
|
||||
ASSERT(pio->io_gang_tree == NULL);
|
||||
ASSERT0P(pio->io_gang_leader);
|
||||
ASSERT0P(pio->io_gang_tree);
|
||||
|
||||
mutex_enter(&pio->io_lock);
|
||||
pio->io_flags = pio->io_orig_flags;
|
||||
@ -2689,7 +2689,7 @@ zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
|
||||
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
|
||||
ASSERT(zio != spa->spa_suspend_zio_root);
|
||||
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
|
||||
ASSERT(zio_unique_parent(zio) == NULL);
|
||||
ASSERT0P(zio_unique_parent(zio));
|
||||
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
|
||||
zio_add_child(spa->spa_suspend_zio_root, zio);
|
||||
}
|
||||
@ -2908,7 +2908,7 @@ zio_gang_node_alloc(zio_gang_node_t **gnpp, uint64_t gangblocksize)
|
||||
{
|
||||
zio_gang_node_t *gn;
|
||||
|
||||
ASSERT(*gnpp == NULL);
|
||||
ASSERT0P(*gnpp);
|
||||
|
||||
gn = kmem_zalloc(sizeof (*gn) +
|
||||
(gbh_nblkptrs(gangblocksize) * sizeof (gn)), KM_SLEEP);
|
||||
@ -2925,7 +2925,7 @@ zio_gang_node_free(zio_gang_node_t **gnpp)
|
||||
zio_gang_node_t *gn = *gnpp;
|
||||
|
||||
for (int g = 0; g < gbh_nblkptrs(gn->gn_allocsize); g++)
|
||||
ASSERT(gn->gn_child[g] == NULL);
|
||||
ASSERT0P(gn->gn_child[g]);
|
||||
|
||||
zio_buf_free(gn->gn_gbh, gn->gn_allocsize);
|
||||
kmem_free(gn, sizeof (*gn) +
|
||||
@ -3366,7 +3366,7 @@ zio_nop_write(zio_t *zio)
|
||||
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
|
||||
ASSERT(zp->zp_nopwrite);
|
||||
ASSERT(!zp->zp_dedup);
|
||||
ASSERT(zio->io_bp_override == NULL);
|
||||
ASSERT0P(zio->io_bp_override);
|
||||
ASSERT(IO_IS_ALLOCATING(zio));
|
||||
|
||||
/*
|
||||
@ -3495,7 +3495,7 @@ zio_ddt_read_start(zio_t *zio)
|
||||
ddt_univ_phys_t *ddp = dde->dde_phys;
|
||||
blkptr_t blk;
|
||||
|
||||
ASSERT(zio->io_vsd == NULL);
|
||||
ASSERT0P(zio->io_vsd);
|
||||
zio->io_vsd = dde;
|
||||
|
||||
if (v_self == DDT_PHYS_NONE)
|
||||
@ -3560,7 +3560,7 @@ zio_ddt_read_done(zio_t *zio)
|
||||
zio->io_vsd = NULL;
|
||||
}
|
||||
|
||||
ASSERT(zio->io_vsd == NULL);
|
||||
ASSERT0P(zio->io_vsd);
|
||||
|
||||
return (zio);
|
||||
}
|
||||
@ -4415,7 +4415,7 @@ static void
|
||||
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
|
||||
{
|
||||
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg || BP_IS_HOLE(bp));
|
||||
ASSERT(zio->io_bp_override == NULL);
|
||||
ASSERT0P(zio->io_bp_override);
|
||||
|
||||
if (!BP_IS_HOLE(bp)) {
|
||||
metaslab_free(zio->io_spa, bp, BP_GET_BIRTH(bp), B_TRUE);
|
||||
@ -4751,7 +4751,7 @@ zio_vdev_io_done(zio_t *zio)
|
||||
ops->vdev_op_io_done(zio);
|
||||
|
||||
if (unexpected_error && vd->vdev_remove_wanted == B_FALSE)
|
||||
VERIFY(vdev_probe(vd, zio) == NULL);
|
||||
VERIFY0P(vdev_probe(vd, zio));
|
||||
|
||||
return (zio);
|
||||
}
|
||||
|
||||
Loading…
Reference in New Issue
Block a user