mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2026-01-14 17:22:05 +03:00
Prefer VERIFY0(n) over VERIFY(n == 0)
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Reviewed-by: Alexander Motin <alexander.motin@TrueNAS.com> Signed-off-by: Rob Norris <robn@despairlabs.com> Sponsored-by: https://despairlabs.com/sponsor/ Closes #17591
This commit is contained in:
parent
2564308cb2
commit
c39e076f23
@ -892,9 +892,9 @@ dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
|
||||
size_t nvsize = *(uint64_t *)data;
|
||||
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
|
||||
|
||||
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
|
||||
VERIFY0(dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
|
||||
|
||||
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
|
||||
VERIFY0(nvlist_unpack(packed, nvsize, &nv, 0));
|
||||
|
||||
umem_free(packed, nvsize);
|
||||
|
||||
@ -2043,10 +2043,10 @@ dump_ddt_object(ddt_t *ddt, ddt_type_t type, ddt_class_t class)
|
||||
|
||||
if (error == ENOENT)
|
||||
return;
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
error = ddt_object_count(ddt, type, class, &count);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
@ -3520,8 +3520,8 @@ dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
|
||||
uint64_t fuid_obj;
|
||||
|
||||
/* first find the fuid object. It lives in the master node */
|
||||
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
|
||||
8, 1, &fuid_obj) == 0);
|
||||
VERIFY0(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
|
||||
8, 1, &fuid_obj));
|
||||
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
|
||||
(void) zfs_fuid_table_load(os, fuid_obj,
|
||||
&idx_tree, &domain_tree);
|
||||
|
||||
10
cmd/zhack.c
10
cmd/zhack.c
@ -162,9 +162,9 @@ zhack_import(char *target, boolean_t readonly)
|
||||
|
||||
props = NULL;
|
||||
if (readonly) {
|
||||
VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
|
||||
VERIFY(nvlist_add_uint64(props,
|
||||
zpool_prop_to_name(ZPOOL_PROP_READONLY), 1) == 0);
|
||||
VERIFY0(nvlist_alloc(&props, NV_UNIQUE_NAME, 0));
|
||||
VERIFY0(nvlist_add_uint64(props,
|
||||
zpool_prop_to_name(ZPOOL_PROP_READONLY), 1));
|
||||
}
|
||||
|
||||
zfeature_checks_disable = B_TRUE;
|
||||
@ -218,8 +218,8 @@ dump_obj(objset_t *os, uint64_t obj, const char *name)
|
||||
} else {
|
||||
ASSERT(za->za_integer_length == 1);
|
||||
char val[1024];
|
||||
VERIFY(zap_lookup(os, obj, za->za_name,
|
||||
1, sizeof (val), val) == 0);
|
||||
VERIFY0(zap_lookup(os, obj, za->za_name,
|
||||
1, sizeof (val), val));
|
||||
(void) printf("\t%s = %s\n", za->za_name, val);
|
||||
}
|
||||
}
|
||||
|
||||
@ -12377,7 +12377,7 @@ zpool_do_events_next(ev_opts_t *opts)
|
||||
nvlist_free(nvl);
|
||||
}
|
||||
|
||||
VERIFY(0 == close(zevent_fd));
|
||||
VERIFY0(close(zevent_fd));
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
@ -2277,8 +2277,8 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
||||
|
||||
ztest_block_tag_t rbt;
|
||||
|
||||
VERIFY(dmu_read(os, lr->lr_foid, offset,
|
||||
sizeof (rbt), &rbt, flags) == 0);
|
||||
VERIFY0(dmu_read(os, lr->lr_foid, offset,
|
||||
sizeof (rbt), &rbt, flags));
|
||||
if (rbt.bt_magic == BT_MAGIC) {
|
||||
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
|
||||
offset, gen, txg, crtxg);
|
||||
@ -5536,8 +5536,8 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
||||
}
|
||||
|
||||
if (i == 1) {
|
||||
VERIFY(dmu_buf_hold(os, bigobj, off,
|
||||
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
|
||||
VERIFY0(dmu_buf_hold(os, bigobj, off,
|
||||
FTAG, &dbt, DMU_READ_NO_PREFETCH));
|
||||
}
|
||||
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
|
||||
VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
|
||||
|
||||
@ -78,7 +78,7 @@ do_thread_create(caddr_t stk, size_t stksize, void (*proc)(void *), void *arg,
|
||||
* Be sure there are no surprises.
|
||||
*/
|
||||
ASSERT(stk == NULL);
|
||||
ASSERT(len == 0);
|
||||
ASSERT0(len);
|
||||
ASSERT(state == TS_RUN);
|
||||
|
||||
if (pp == &p0)
|
||||
|
||||
@ -73,7 +73,7 @@ extern "C" {
|
||||
pflags |= attr; \
|
||||
else \
|
||||
pflags &= ~attr; \
|
||||
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(ZTOZSB(zp)), \
|
||||
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(ZTOZSB(zp)), \
|
||||
&pflags, sizeof (pflags), tx)); \
|
||||
}
|
||||
|
||||
|
||||
@ -81,7 +81,7 @@ get_stats_for_obj(differ_info_t *di, const char *dsname, uint64_t obj,
|
||||
/* we can get stats even if we failed to get a path */
|
||||
(void) memcpy(sb, &zc.zc_stat, sizeof (zfs_stat_t));
|
||||
if (error == 0) {
|
||||
ASSERT(di->zerr == 0);
|
||||
ASSERT0(di->zerr);
|
||||
(void) strlcpy(pn, zc.zc_value, maxlen);
|
||||
return (0);
|
||||
}
|
||||
@ -404,7 +404,7 @@ write_free_diffs(FILE *fp, differ_info_t *di, dmu_diff_record_t *dr)
|
||||
(void) strlcpy(zc.zc_name, di->fromsnap, sizeof (zc.zc_name));
|
||||
zc.zc_obj = dr->ddr_first - 1;
|
||||
|
||||
ASSERT(di->zerr == 0);
|
||||
ASSERT0(di->zerr);
|
||||
|
||||
while (zc.zc_obj < dr->ddr_last) {
|
||||
int err;
|
||||
|
||||
@ -122,7 +122,7 @@ const pool_config_ops_t libzfs_config_ops = {
|
||||
static uint64_t
|
||||
label_offset(uint64_t size, int l)
|
||||
{
|
||||
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
|
||||
ASSERT0(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t));
|
||||
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
|
||||
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
|
||||
}
|
||||
|
||||
@ -516,7 +516,7 @@ zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags,
|
||||
} else if (rc == ENOTSUP) {
|
||||
int spa_version;
|
||||
|
||||
VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
|
||||
VERIFY0(zfs_spa_version(zhp, &spa_version));
|
||||
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
||||
"Can't mount a version %llu "
|
||||
"file system on a version %d pool. Pool must be"
|
||||
|
||||
@ -2505,7 +2505,7 @@ zfs_send_cb_impl(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
|
||||
err = ENOENT;
|
||||
|
||||
if (sdd.cleanup_fd != -1) {
|
||||
VERIFY(0 == close(sdd.cleanup_fd));
|
||||
VERIFY0(close(sdd.cleanup_fd));
|
||||
sdd.cleanup_fd = -1;
|
||||
}
|
||||
|
||||
@ -2531,7 +2531,7 @@ err_out:
|
||||
fnvlist_free(sdd.snapholds);
|
||||
|
||||
if (sdd.cleanup_fd != -1)
|
||||
VERIFY(0 == close(sdd.cleanup_fd));
|
||||
VERIFY0(close(sdd.cleanup_fd));
|
||||
return (err);
|
||||
}
|
||||
|
||||
@ -5108,7 +5108,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
|
||||
nvlist_t *holds, *errors = NULL;
|
||||
int cleanup_fd = -1;
|
||||
|
||||
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
|
||||
VERIFY0(nvlist_alloc(&holds, 0, KM_SLEEP));
|
||||
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
|
||||
pair != NULL;
|
||||
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
|
||||
|
||||
@ -369,7 +369,7 @@ cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime)
|
||||
if (delta <= 0)
|
||||
return (-1);
|
||||
|
||||
VERIFY(gettimeofday(&tv, NULL) == 0);
|
||||
VERIFY0(gettimeofday(&tv, NULL));
|
||||
|
||||
ts.tv_sec = tv.tv_sec + delta / hz;
|
||||
ts.tv_nsec = tv.tv_usec * NSEC_PER_USEC + (delta % hz) * (NANOSEC / hz);
|
||||
|
||||
@ -137,12 +137,10 @@ show_pool_stats(spa_t *spa)
|
||||
nvlist_t *config, *nvroot;
|
||||
const char *name;
|
||||
|
||||
VERIFY(spa_get_stats(spa_name(spa), &config, NULL, 0) == 0);
|
||||
VERIFY0(spa_get_stats(spa_name(spa), &config, NULL, 0));
|
||||
|
||||
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
||||
&nvroot) == 0);
|
||||
VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
|
||||
&name) == 0);
|
||||
VERIFY0(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot));
|
||||
VERIFY0(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &name));
|
||||
|
||||
show_vdev_stats(name, ZPOOL_CONFIG_CHILDREN, nvroot, 0);
|
||||
show_vdev_stats(NULL, ZPOOL_CONFIG_L2CACHE, nvroot, 0);
|
||||
|
||||
@ -917,7 +917,7 @@ error:
|
||||
static uint64_t
|
||||
label_offset(uint64_t size, int l)
|
||||
{
|
||||
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
|
||||
ASSERT0(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t));
|
||||
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
|
||||
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
|
||||
}
|
||||
|
||||
@ -478,7 +478,7 @@ avl_insert(avl_tree_t *tree, void *new_data, avl_index_t where)
|
||||
size_t off = tree->avl_offset;
|
||||
|
||||
#ifdef _LP64
|
||||
ASSERT(((uintptr_t)new_data & 0x7) == 0);
|
||||
ASSERT0(((uintptr_t)new_data & 0x7));
|
||||
#endif
|
||||
|
||||
node = AVL_DATA2NODE(new_data, off);
|
||||
@ -881,7 +881,7 @@ avl_create(avl_tree_t *tree, int (*compar) (const void *, const void *),
|
||||
ASSERT(size > 0);
|
||||
ASSERT(size >= offset + sizeof (avl_node_t));
|
||||
#ifdef _LP64
|
||||
ASSERT((offset & 0x7) == 0);
|
||||
ASSERT0((offset & 0x7));
|
||||
#endif
|
||||
|
||||
tree->avl_compar = compar;
|
||||
@ -897,7 +897,7 @@ void
|
||||
avl_destroy(avl_tree_t *tree)
|
||||
{
|
||||
ASSERT(tree);
|
||||
ASSERT(tree->avl_numnodes == 0);
|
||||
ASSERT0(tree->avl_numnodes);
|
||||
ASSERT(tree->avl_root == NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -124,7 +124,7 @@ kcf_context_cache_destructor(void *buf, void *cdrarg)
|
||||
(void) cdrarg;
|
||||
kcf_context_t *kctx = (kcf_context_t *)buf;
|
||||
|
||||
ASSERT(kctx->kc_refcnt == 0);
|
||||
ASSERT0(kctx->kc_refcnt);
|
||||
}
|
||||
|
||||
void
|
||||
|
||||
@ -236,16 +236,16 @@ aes_encrypt_atomic(crypto_mechanism_t *mechanism,
|
||||
aes_xor_block);
|
||||
if (ret != CRYPTO_SUCCESS)
|
||||
goto out;
|
||||
ASSERT(aes_ctx.ac_remainder_len == 0);
|
||||
ASSERT0(aes_ctx.ac_remainder_len);
|
||||
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE) {
|
||||
ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
|
||||
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
|
||||
aes_copy_block, aes_xor_block);
|
||||
if (ret != CRYPTO_SUCCESS)
|
||||
goto out;
|
||||
ASSERT(aes_ctx.ac_remainder_len == 0);
|
||||
ASSERT0(aes_ctx.ac_remainder_len);
|
||||
} else {
|
||||
ASSERT(aes_ctx.ac_remainder_len == 0);
|
||||
ASSERT0(aes_ctx.ac_remainder_len);
|
||||
}
|
||||
|
||||
if (plaintext != ciphertext) {
|
||||
@ -337,7 +337,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
|
||||
ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
|
||||
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
|
||||
aes_copy_block, aes_xor_block);
|
||||
ASSERT(aes_ctx.ac_remainder_len == 0);
|
||||
ASSERT0(aes_ctx.ac_remainder_len);
|
||||
if ((ret == CRYPTO_SUCCESS) &&
|
||||
(ciphertext != plaintext)) {
|
||||
plaintext->cd_length =
|
||||
@ -349,7 +349,7 @@ aes_decrypt_atomic(crypto_mechanism_t *mechanism,
|
||||
ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
|
||||
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
|
||||
aes_xor_block);
|
||||
ASSERT(aes_ctx.ac_remainder_len == 0);
|
||||
ASSERT0(aes_ctx.ac_remainder_len);
|
||||
if ((ret == CRYPTO_SUCCESS) &&
|
||||
(ciphertext != plaintext)) {
|
||||
plaintext->cd_length =
|
||||
|
||||
@ -296,7 +296,7 @@ spl_slab_free(spl_kmem_slab_t *sks,
|
||||
spl_kmem_cache_t *skc;
|
||||
|
||||
ASSERT(sks->sks_magic == SKS_MAGIC);
|
||||
ASSERT(sks->sks_ref == 0);
|
||||
ASSERT0(sks->sks_ref);
|
||||
|
||||
skc = sks->sks_cache;
|
||||
ASSERT(skc->skc_magic == SKC_MAGIC);
|
||||
@ -598,7 +598,7 @@ static void
|
||||
spl_magazine_free(spl_kmem_magazine_t *skm)
|
||||
{
|
||||
ASSERT(skm->skm_magic == SKM_MAGIC);
|
||||
ASSERT(skm->skm_avail == 0);
|
||||
ASSERT0(skm->skm_avail);
|
||||
kfree(skm);
|
||||
}
|
||||
|
||||
@ -610,7 +610,7 @@ spl_magazine_create(spl_kmem_cache_t *skc)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
|
||||
ASSERT0((skc->skc_flags & KMC_SLAB));
|
||||
|
||||
skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
|
||||
num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
|
||||
@ -640,7 +640,7 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
|
||||
spl_kmem_magazine_t *skm;
|
||||
int i = 0;
|
||||
|
||||
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
|
||||
ASSERT0((skc->skc_flags & KMC_SLAB));
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
skm = skc->skc_mag[i];
|
||||
@ -986,7 +986,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
|
||||
|
||||
ASSERT0(flags & ~KM_PUBLIC_MASK);
|
||||
ASSERT(skc->skc_magic == SKC_MAGIC);
|
||||
ASSERT((skc->skc_flags & KMC_SLAB) == 0);
|
||||
ASSERT0((skc->skc_flags & KMC_SLAB));
|
||||
|
||||
*obj = NULL;
|
||||
|
||||
|
||||
@ -541,7 +541,7 @@ __kstat_create(const char *ks_module, int ks_instance, const char *ks_name,
|
||||
kstat_t *ksp;
|
||||
|
||||
ASSERT(ks_module);
|
||||
ASSERT(ks_instance == 0);
|
||||
ASSERT0(ks_instance);
|
||||
ASSERT(ks_name);
|
||||
|
||||
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
|
||||
|
||||
@ -1900,7 +1900,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
|
||||
if (!(flag & IS_ROOT_NODE) &&
|
||||
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
|
||||
!(dzp->z_pflags & ZFS_XATTR)) {
|
||||
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
|
||||
VERIFY0(zfs_acl_node_read(dzp, B_TRUE,
|
||||
&paclp, B_FALSE));
|
||||
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
|
||||
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
|
||||
@ -2204,7 +2204,7 @@ top:
|
||||
}
|
||||
|
||||
error = zfs_aclset_common(zp, aclp, cr, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
ASSERT(zp->z_acl_cached == NULL);
|
||||
zp->z_acl_cached = aclp;
|
||||
|
||||
|
||||
@ -463,7 +463,7 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
|
||||
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
||||
|
||||
ASSERT(zp->z_unlinked);
|
||||
ASSERT(ZTOI(zp)->i_nlink == 0);
|
||||
ASSERT0(ZTOI(zp)->i_nlink);
|
||||
|
||||
VERIFY3U(0, ==,
|
||||
zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
|
||||
@ -662,8 +662,8 @@ zfs_rmnode(znode_t *zp)
|
||||
uint64_t links;
|
||||
int error;
|
||||
|
||||
ASSERT(ZTOI(zp)->i_nlink == 0);
|
||||
ASSERT(atomic_read(&ZTOI(zp)->i_count) == 0);
|
||||
ASSERT0(ZTOI(zp)->i_nlink);
|
||||
ASSERT0(atomic_read(&ZTOI(zp)->i_count));
|
||||
|
||||
/*
|
||||
* If this is an attribute directory, purge its contents.
|
||||
@ -710,7 +710,7 @@ zfs_rmnode(znode_t *zp)
|
||||
&xattr_obj, sizeof (xattr_obj));
|
||||
if (error == 0 && xattr_obj) {
|
||||
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
}
|
||||
|
||||
acl_obj = zfs_external_acl(zp);
|
||||
@ -744,12 +744,12 @@ zfs_rmnode(znode_t *zp)
|
||||
}
|
||||
|
||||
if (xzp) {
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
mutex_enter(&xzp->z_lock);
|
||||
xzp->z_unlinked = B_TRUE; /* mark xzp for deletion */
|
||||
clear_nlink(ZTOI(xzp)); /* no more links to it */
|
||||
links = 0;
|
||||
VERIFY(0 == sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
|
||||
VERIFY0(sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
|
||||
&links, sizeof (links), tx));
|
||||
mutex_exit(&xzp->z_lock);
|
||||
zfs_unlinked_add(xzp, tx);
|
||||
@ -872,7 +872,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
|
||||
ctime);
|
||||
}
|
||||
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
mutex_exit(&zp->z_lock);
|
||||
|
||||
@ -894,7 +894,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
|
||||
&dzp->z_pflags, sizeof (dzp->z_pflags));
|
||||
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
|
||||
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
mutex_exit(&dzp->z_lock);
|
||||
|
||||
return (0);
|
||||
@ -1083,7 +1083,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
|
||||
NULL, &dzp->z_pflags, sizeof (dzp->z_pflags));
|
||||
zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime);
|
||||
error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
mutex_exit(&dzp->z_lock);
|
||||
|
||||
if (unlinkedp != NULL)
|
||||
@ -1167,7 +1167,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
|
||||
ASSERT(error == 0 && parent == zp->z_id);
|
||||
#endif
|
||||
|
||||
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
|
||||
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
|
||||
sizeof (xzp->z_id), tx));
|
||||
|
||||
if (!zp->z_unlinked)
|
||||
|
||||
@ -1676,7 +1676,7 @@ zfs_umount(struct super_block *sb)
|
||||
|
||||
if (zfsvfs->z_arc_prune != NULL)
|
||||
arc_remove_prune_callback(zfsvfs->z_arc_prune);
|
||||
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
|
||||
VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
|
||||
os = zfsvfs->z_os;
|
||||
|
||||
/*
|
||||
@ -1802,8 +1802,8 @@ zfs_vget(struct super_block *sb, struct inode **ipp, fid_t *fidp)
|
||||
ASSERT(*ipp != NULL);
|
||||
|
||||
if (object == ZFSCTL_INO_SNAPDIR) {
|
||||
VERIFY(zfsctl_root_lookup(*ipp, "snapshot", ipp,
|
||||
0, kcred, NULL, NULL) == 0);
|
||||
VERIFY0(zfsctl_root_lookup(*ipp, "snapshot", ipp,
|
||||
0, kcred, NULL, NULL));
|
||||
} else {
|
||||
/*
|
||||
* Must have an existing ref, so igrab()
|
||||
@ -1905,7 +1905,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
|
||||
goto bail;
|
||||
|
||||
ds->ds_dir->dd_activity_cancelled = B_FALSE;
|
||||
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0);
|
||||
VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
|
||||
|
||||
zfs_set_fuid_feature(zfsvfs);
|
||||
zfsvfs->z_rollback_time = jiffies;
|
||||
@ -2078,7 +2078,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
|
||||
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
|
||||
ASSERT0(error);
|
||||
|
||||
VERIFY(0 == sa_set_sa_object(os, sa_obj));
|
||||
VERIFY0(sa_set_sa_object(os, sa_obj));
|
||||
sa_register_update_callback(os, zfs_sa_upgrade);
|
||||
}
|
||||
|
||||
|
||||
@ -2483,10 +2483,10 @@ top:
|
||||
new_mode = zp->z_mode;
|
||||
}
|
||||
err = zfs_acl_chown_setattr(zp);
|
||||
ASSERT(err == 0);
|
||||
ASSERT0(err);
|
||||
if (attrzp) {
|
||||
err = zfs_acl_chown_setattr(attrzp);
|
||||
ASSERT(err == 0);
|
||||
ASSERT0(err);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2600,7 +2600,7 @@ out:
|
||||
if (err == 0 && xattr_count > 0) {
|
||||
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
|
||||
xattr_count, tx);
|
||||
ASSERT(err2 == 0);
|
||||
ASSERT0(err2);
|
||||
}
|
||||
|
||||
if (aclp)
|
||||
@ -3654,8 +3654,8 @@ top:
|
||||
* operation are sync safe.
|
||||
*/
|
||||
if (is_tmpfile) {
|
||||
VERIFY(zap_remove_int(zfsvfs->z_os,
|
||||
zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
|
||||
VERIFY0(zap_remove_int(zfsvfs->z_os,
|
||||
zfsvfs->z_unlinkedobj, szp->z_id, tx));
|
||||
} else {
|
||||
if (flags & FIGNORECASE)
|
||||
txtype |= TX_CI;
|
||||
|
||||
@ -330,7 +330,7 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
|
||||
ASSERT(zp->z_sa_hdl == NULL);
|
||||
ASSERT(zp->z_acl_cached == NULL);
|
||||
if (sa_hdl == NULL) {
|
||||
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
|
||||
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
|
||||
SA_HDL_SHARED, &zp->z_sa_hdl));
|
||||
} else {
|
||||
zp->z_sa_hdl = sa_hdl;
|
||||
@ -811,7 +811,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
|
||||
}
|
||||
|
||||
/* Now add in all of the "SA" attributes */
|
||||
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
|
||||
VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
|
||||
&sa_hdl));
|
||||
|
||||
/*
|
||||
@ -901,7 +901,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
|
||||
acl_ids->z_fuid, acl_ids->z_fgid);
|
||||
}
|
||||
|
||||
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
|
||||
VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
|
||||
|
||||
if (!(flag & IS_ROOT_NODE)) {
|
||||
/*
|
||||
@ -1314,9 +1314,9 @@ zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
|
||||
zh = zfs_znode_hold_enter(zfsvfs, obj);
|
||||
if (acl_obj) {
|
||||
VERIFY(!zp->z_is_sa);
|
||||
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
|
||||
VERIFY0(dmu_object_free(os, acl_obj, tx));
|
||||
}
|
||||
VERIFY(0 == dmu_object_free(os, obj, tx));
|
||||
VERIFY0(dmu_object_free(os, obj, tx));
|
||||
zfs_znode_dmu_fini(zp);
|
||||
zfs_znode_hold_exit(zfsvfs, zh);
|
||||
}
|
||||
@ -1536,7 +1536,7 @@ zfs_extend(znode_t *zp, uint64_t end)
|
||||
|
||||
zp->z_size = end;
|
||||
|
||||
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
|
||||
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
|
||||
&zp->z_size, sizeof (zp->z_size), tx));
|
||||
|
||||
zfs_rangelock_exit(lr);
|
||||
@ -1726,7 +1726,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
|
||||
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
|
||||
NULL, &zp->z_pflags, 8);
|
||||
}
|
||||
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
|
||||
VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
|
||||
|
||||
dmu_tx_commit(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
@ -1793,7 +1793,7 @@ log:
|
||||
NULL, &zp->z_pflags, 8);
|
||||
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
|
||||
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
|
||||
|
||||
@ -1840,7 +1840,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
moid = MASTER_NODE_OBJ;
|
||||
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
/*
|
||||
* Set starting attributes.
|
||||
@ -1853,7 +1853,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
const char *name;
|
||||
|
||||
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
|
||||
VERIFY(nvpair_value_uint64(elem, &val) == 0);
|
||||
VERIFY0(nvpair_value_uint64(elem, &val));
|
||||
name = nvpair_name(elem);
|
||||
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
|
||||
if (val < version)
|
||||
@ -1861,7 +1861,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
} else {
|
||||
error = zap_update(os, moid, name, 8, 1, &val, tx);
|
||||
}
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
|
||||
norm = val;
|
||||
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
|
||||
@ -1869,7 +1869,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
}
|
||||
ASSERT(version != 0);
|
||||
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
/*
|
||||
* Create zap object used for SA attribute registration
|
||||
@ -1879,7 +1879,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
} else {
|
||||
sa_obj = 0;
|
||||
}
|
||||
@ -1889,7 +1889,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
|
||||
|
||||
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
/*
|
||||
* Create root znode. Create minimal znode/inode/zfsvfs/sb
|
||||
@ -1922,7 +1922,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
|
||||
&zfsvfs->z_attr_table);
|
||||
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
/*
|
||||
* Fold case on file systems that are always or sometimes case
|
||||
@ -1946,12 +1946,12 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
|
||||
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
|
||||
}
|
||||
|
||||
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
|
||||
VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
|
||||
cr, NULL, &acl_ids, zfs_init_idmap));
|
||||
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
|
||||
ASSERT3P(zp, ==, rootzp);
|
||||
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
|
||||
atomic_set(&ZTOI(rootzp)->i_count, 0);
|
||||
|
||||
@ -247,7 +247,7 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
|
||||
* and fifos, but we want to know if this behavior ever changes.
|
||||
*/
|
||||
if (S_ISSOCK(mode) || S_ISFIFO(mode))
|
||||
ASSERT(rdev == 0);
|
||||
ASSERT0(rdev);
|
||||
|
||||
crhold(cr);
|
||||
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
|
||||
|
||||
@ -49,7 +49,7 @@ zpl_inode_alloc(struct super_block *sb)
|
||||
static void
|
||||
zpl_inode_free(struct inode *ip)
|
||||
{
|
||||
ASSERT(atomic_read(&ip->i_count) == 0);
|
||||
ASSERT0(atomic_read(&ip->i_count));
|
||||
zfs_inode_free(ip);
|
||||
}
|
||||
#endif
|
||||
@ -57,7 +57,7 @@ zpl_inode_free(struct inode *ip)
|
||||
static void
|
||||
zpl_inode_destroy(struct inode *ip)
|
||||
{
|
||||
ASSERT(atomic_read(&ip->i_count) == 0);
|
||||
ASSERT0(atomic_read(&ip->i_count));
|
||||
zfs_inode_destroy(ip);
|
||||
}
|
||||
|
||||
|
||||
@ -7007,7 +7007,7 @@ arc_write_done(zio_t *zio)
|
||||
ASSERT(ARC_BUF_LAST(hdr->b_l1hdr.b_buf));
|
||||
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
|
||||
ASSERT(BP_GET_DEDUP(zio->io_bp));
|
||||
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
|
||||
ASSERT0(BP_GET_LEVEL(zio->io_bp));
|
||||
}
|
||||
}
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
|
||||
|
||||
@ -523,7 +523,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
|
||||
return;
|
||||
|
||||
/* Only data blocks support the attachment of user data. */
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT0(db->db_level);
|
||||
|
||||
/* Clients must resolve a dbuf before attaching user data. */
|
||||
ASSERT(db->db.db_data != NULL);
|
||||
@ -1219,7 +1219,7 @@ dbuf_verify(dmu_buf_impl_t *db)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < db->db.db_size >> 3; i++) {
|
||||
ASSERT(buf[i] == 0);
|
||||
ASSERT0(buf[i]);
|
||||
}
|
||||
} else {
|
||||
blkptr_t *bps = db->db.db_data;
|
||||
@ -1682,7 +1682,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
|
||||
|
||||
ASSERT(MUTEX_HELD(&db->db_mtx));
|
||||
ASSERT(db->db.db_data != NULL);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT0(db->db_level);
|
||||
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
|
||||
|
||||
if (dr == NULL ||
|
||||
@ -1929,7 +1929,7 @@ dbuf_unoverride(dbuf_dirty_record_t *dr)
|
||||
* comes from dbuf_dirty() callers who must also hold a range lock.
|
||||
*/
|
||||
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT0(db->db_level);
|
||||
|
||||
if (db->db_blkid == DMU_BONUS_BLKID ||
|
||||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
|
||||
@ -2932,7 +2932,7 @@ dmu_buf_will_fill_flags(dmu_buf_t *db_fake, dmu_tx_t *tx, boolean_t canfail,
|
||||
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT0(db->db_level);
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
|
||||
@ -3144,7 +3144,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx,
|
||||
{
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT0(db->db_level);
|
||||
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
|
||||
ASSERT(buf != NULL);
|
||||
ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
|
||||
@ -4588,7 +4588,7 @@ dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
|
||||
|
||||
/* ensure that everything is zero after our data */
|
||||
for (; datap_end < datap_max; datap_end++)
|
||||
ASSERT(*datap_end == 0);
|
||||
ASSERT0(*datap_end);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -1343,7 +1343,7 @@ dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
|
||||
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
|
||||
FALSE, FTAG, &numbufs, &dbp));
|
||||
|
||||
for (i = 0; i < numbufs; i++) {
|
||||
@ -1872,7 +1872,7 @@ dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
|
||||
*/
|
||||
BP_SET_LSIZE(bp, db->db_size);
|
||||
} else if (!BP_IS_EMBEDDED(bp)) {
|
||||
ASSERT(BP_GET_LEVEL(bp) == 0);
|
||||
ASSERT0(BP_GET_LEVEL(bp));
|
||||
BP_SET_FILL(bp, 1);
|
||||
}
|
||||
}
|
||||
@ -2405,7 +2405,7 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
|
||||
}
|
||||
}
|
||||
} else if (wp & WP_NOFILL) {
|
||||
ASSERT(level == 0);
|
||||
ASSERT0(level);
|
||||
|
||||
/*
|
||||
* If we're writing preallocated blocks, we aren't actually
|
||||
@ -2865,7 +2865,7 @@ byteswap_uint64_array(void *vbuf, size_t size)
|
||||
size_t count = size >> 3;
|
||||
int i;
|
||||
|
||||
ASSERT((size & 7) == 0);
|
||||
ASSERT0((size & 7));
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
buf[i] = BSWAP_64(buf[i]);
|
||||
@ -2878,7 +2878,7 @@ byteswap_uint32_array(void *vbuf, size_t size)
|
||||
size_t count = size >> 2;
|
||||
int i;
|
||||
|
||||
ASSERT((size & 3) == 0);
|
||||
ASSERT0((size & 3));
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
buf[i] = BSWAP_32(buf[i]);
|
||||
@ -2891,7 +2891,7 @@ byteswap_uint16_array(void *vbuf, size_t size)
|
||||
size_t count = size >> 1;
|
||||
int i;
|
||||
|
||||
ASSERT((size & 1) == 0);
|
||||
ASSERT0((size & 1));
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
buf[i] = BSWAP_16(buf[i]);
|
||||
|
||||
@ -2226,7 +2226,7 @@ dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
|
||||
rf |= DB_RF_HAVESTRUCT;
|
||||
error = dmu_spill_hold_by_dnode(dn, rf,
|
||||
FTAG, (dmu_buf_t **)&db);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
mutex_enter(&db->db_mtx);
|
||||
data = (before) ? db->db.db_data :
|
||||
dmu_objset_userquota_find_data(db, tx);
|
||||
|
||||
@ -126,7 +126,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
|
||||
* problem, but there's no way for it to happen (for
|
||||
* now, at least).
|
||||
*/
|
||||
ASSERT(dn->dn_assigned_txg == 0);
|
||||
ASSERT0(dn->dn_assigned_txg);
|
||||
dn->dn_assigned_txg = tx->tx_txg;
|
||||
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
@ -443,7 +443,7 @@ dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
int err;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
|
||||
if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
|
||||
return;
|
||||
@ -607,7 +607,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
int err;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
|
||||
dmu_tx_count_dnode(txh);
|
||||
|
||||
@ -681,7 +681,7 @@ dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
|
||||
{
|
||||
dmu_tx_hold_t *txh;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
|
||||
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
|
||||
object, THT_BONUS, 0, 0);
|
||||
@ -706,7 +706,7 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
|
||||
{
|
||||
dmu_tx_hold_t *txh;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
|
||||
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
|
||||
DMU_NEW_OBJECT, THT_SPACE, space, 0);
|
||||
@ -1232,7 +1232,7 @@ dmu_tx_assign(dmu_tx_t *tx, dmu_tx_flag_t flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
ASSERT0(flags & ~(DMU_TX_WAIT | DMU_TX_NOTHROTTLE | DMU_TX_SUSPEND));
|
||||
IMPLY(flags & DMU_TX_SUSPEND, flags & DMU_TX_WAIT);
|
||||
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
|
||||
@ -1328,7 +1328,7 @@ dmu_tx_wait(dmu_tx_t *tx)
|
||||
dsl_pool_t *dp = tx->tx_pool;
|
||||
hrtime_t before;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
ASSERT(!dsl_pool_config_held(tx->tx_pool));
|
||||
|
||||
/*
|
||||
@ -1644,12 +1644,12 @@ dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
|
||||
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
|
||||
|
||||
if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
dmu_tx_hold_spill(tx, object);
|
||||
} else {
|
||||
DB_DNODE_ENTER(db);
|
||||
if (DB_DNODE(db)->dn_have_spill) {
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(tx->tx_txg);
|
||||
dmu_tx_hold_spill(tx, object);
|
||||
}
|
||||
DB_DNODE_EXIT(db);
|
||||
|
||||
@ -509,7 +509,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
|
||||
int i = 0;
|
||||
|
||||
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
|
||||
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
|
||||
ASSERT0((size & (sizeof (dnode_phys_t)-1)));
|
||||
|
||||
while (i < size) {
|
||||
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
|
||||
@ -673,7 +673,7 @@ dnode_destroy(dnode_t *dn)
|
||||
objset_t *os = dn->dn_objset;
|
||||
boolean_t complete_os_eviction = B_FALSE;
|
||||
|
||||
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
|
||||
ASSERT0((dn->dn_id_flags & DN_ID_NEW_EXIST));
|
||||
|
||||
mutex_enter(&os->os_lock);
|
||||
POINTER_INVALIDATE(&dn->dn_objset);
|
||||
@ -958,7 +958,7 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
|
||||
ndn->dn_dirty_txg = odn->dn_dirty_txg;
|
||||
ndn->dn_dirtyctx = odn->dn_dirtyctx;
|
||||
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
|
||||
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
|
||||
ASSERT0(zfs_refcount_count(&odn->dn_tx_holds));
|
||||
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
|
||||
ASSERT(avl_is_empty(&ndn->dn_dbufs));
|
||||
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
|
||||
@ -2304,7 +2304,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
|
||||
if ((off >> blkshift) > dn->dn_maxblkid)
|
||||
return;
|
||||
} else {
|
||||
ASSERT(dn->dn_maxblkid == 0);
|
||||
ASSERT0(dn->dn_maxblkid);
|
||||
if (off == 0 && len >= blksz) {
|
||||
/*
|
||||
* Freeing the whole block; fast-track this request.
|
||||
@ -2524,7 +2524,7 @@ dnode_diduse_space(dnode_t *dn, int64_t delta)
|
||||
}
|
||||
space += delta;
|
||||
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
|
||||
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
|
||||
ASSERT0((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES));
|
||||
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
|
||||
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
|
||||
} else {
|
||||
|
||||
@ -209,8 +209,8 @@ free_verify(dmu_buf_impl_t *db, uint64_t start, uint64_t end, dmu_tx_t *tx)
|
||||
rw_exit(&dn->dn_struct_rwlock);
|
||||
if (err == ENOENT)
|
||||
continue;
|
||||
ASSERT(err == 0);
|
||||
ASSERT(child->db_level == 0);
|
||||
ASSERT0(err);
|
||||
ASSERT0(child->db_level);
|
||||
dr = dbuf_find_dirty_eq(child, txg);
|
||||
|
||||
/* data_old better be zeroed */
|
||||
|
||||
@ -534,7 +534,7 @@ out:
|
||||
static void
|
||||
dsl_crypto_key_free(dsl_crypto_key_t *dck)
|
||||
{
|
||||
ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
|
||||
ASSERT0(zfs_refcount_count(&dck->dck_holds));
|
||||
|
||||
/* destroy the zio_crypt_key_t */
|
||||
zio_crypt_key_destroy(&dck->dck_key);
|
||||
|
||||
@ -1187,7 +1187,7 @@ dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
|
||||
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
|
||||
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
|
||||
ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
|
||||
|
||||
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
|
||||
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
|
||||
@ -2112,7 +2112,7 @@ dsl_dataset_sync(dsl_dataset_t *ds, zio_t *rio, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
ASSERT(ds->ds_objset != NULL);
|
||||
ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
|
||||
ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
|
||||
|
||||
/*
|
||||
* in case we had to change ds_fsid_guid when we opened it,
|
||||
@ -4180,7 +4180,7 @@ dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
|
||||
dsl_pool_t *dp = dmu_tx_pool(tx);
|
||||
int64_t unused_refres_delta;
|
||||
|
||||
ASSERT(clone->ds_reserved == 0);
|
||||
ASSERT0(clone->ds_reserved);
|
||||
/*
|
||||
* NOTE: On DEBUG kernels there could be a race between this and
|
||||
* the check function if spa_asize_inflation is adjusted...
|
||||
|
||||
@ -102,7 +102,7 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
|
||||
nvlist_t *perms;
|
||||
nvpair_t *permpair = NULL;
|
||||
|
||||
VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);
|
||||
VERIFY0(nvpair_value_nvlist(whopair, &perms));
|
||||
|
||||
while ((permpair = nvlist_next_nvpair(perms, permpair))) {
|
||||
const char *perm = nvpair_name(permpair);
|
||||
@ -189,8 +189,7 @@ dsl_deleg_set_sync(void *arg, dmu_tx_t *tx)
|
||||
const char *perm = nvpair_name(permpair);
|
||||
uint64_t n = 0;
|
||||
|
||||
VERIFY(zap_update(mos, jumpobj,
|
||||
perm, 8, 1, &n, tx) == 0);
|
||||
VERIFY0(zap_update(mos, jumpobj, perm, 8, 1, &n, tx));
|
||||
spa_history_log_internal_dd(dd, "permission update", tx,
|
||||
"%s %s", whokey, perm);
|
||||
}
|
||||
@ -225,7 +224,7 @@ dsl_deleg_unset_sync(void *arg, dmu_tx_t *tx)
|
||||
if (zap_lookup(mos, zapobj, whokey, 8,
|
||||
1, &jumpobj) == 0) {
|
||||
(void) zap_remove(mos, zapobj, whokey, tx);
|
||||
VERIFY(0 == zap_destroy(mos, jumpobj, tx));
|
||||
VERIFY0(zap_destroy(mos, jumpobj, tx));
|
||||
}
|
||||
spa_history_log_internal_dd(dd, "permission who remove",
|
||||
tx, "%s", whokey);
|
||||
@ -243,7 +242,7 @@ dsl_deleg_unset_sync(void *arg, dmu_tx_t *tx)
|
||||
if (zap_count(mos, jumpobj, &n) == 0 && n == 0) {
|
||||
(void) zap_remove(mos, zapobj,
|
||||
whokey, tx);
|
||||
VERIFY(0 == zap_destroy(mos,
|
||||
VERIFY0(zap_destroy(mos,
|
||||
jumpobj, tx));
|
||||
}
|
||||
spa_history_log_internal_dd(dd, "permission remove", tx,
|
||||
@ -332,7 +331,7 @@ dsl_deleg_get(const char *ddname, nvlist_t **nvp)
|
||||
basezc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
|
||||
baseza = zap_attribute_alloc();
|
||||
source = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
|
||||
VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
for (dd = startdd; dd != NULL; dd = dd->dd_parent) {
|
||||
nvlist_t *sp_nvp;
|
||||
@ -706,7 +705,7 @@ copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
|
||||
ZFS_DELEG_LOCAL, &uid);
|
||||
if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) == ENOENT) {
|
||||
jumpobj = zap_create(mos, DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
|
||||
VERIFY(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx) == 0);
|
||||
VERIFY0(zap_add(mos, zapobj, whokey, 8, 1, &jumpobj, tx));
|
||||
}
|
||||
|
||||
za = zap_attribute_alloc();
|
||||
@ -716,8 +715,7 @@ copy_create_perms(dsl_dir_t *dd, uint64_t pzapobj,
|
||||
uint64_t zero = 0;
|
||||
ASSERT(za->za_integer_length == 8 && za->za_num_integers == 1);
|
||||
|
||||
VERIFY(zap_update(mos, jumpobj, za->za_name,
|
||||
8, 1, &zero, tx) == 0);
|
||||
VERIFY0(zap_update(mos, jumpobj, za->za_name, 8, 1, &zero, tx));
|
||||
}
|
||||
zap_cursor_fini(&zc);
|
||||
zap_attribute_free(za);
|
||||
@ -761,10 +759,10 @@ dsl_deleg_destroy(objset_t *mos, uint64_t zapobj, dmu_tx_t *tx)
|
||||
zap_cursor_retrieve(&zc, za) == 0;
|
||||
zap_cursor_advance(&zc)) {
|
||||
ASSERT(za->za_integer_length == 8 && za->za_num_integers == 1);
|
||||
VERIFY(0 == zap_destroy(mos, za->za_first_integer, tx));
|
||||
VERIFY0(zap_destroy(mos, za->za_first_integer, tx));
|
||||
}
|
||||
zap_cursor_fini(&zc);
|
||||
VERIFY(0 == zap_destroy(mos, zapobj, tx));
|
||||
VERIFY0(zap_destroy(mos, zapobj, tx));
|
||||
zap_attribute_free(za);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -525,7 +525,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
|
||||
|
||||
/* remove from snapshot namespace */
|
||||
dsl_dataset_t *ds_head;
|
||||
ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
|
||||
ASSERT0(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
|
||||
VERIFY0(dsl_dataset_hold_obj(dp,
|
||||
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
|
||||
VERIFY0(dsl_dataset_get_snapname(ds));
|
||||
|
||||
@ -151,8 +151,8 @@ dsl_dir_evict_async(void *dbu)
|
||||
|
||||
for (t = 0; t < TXG_SIZE; t++) {
|
||||
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
|
||||
ASSERT(dd->dd_tempreserved[t] == 0);
|
||||
ASSERT(dd->dd_space_towrite[t] == 0);
|
||||
ASSERT0(dd->dd_tempreserved[t]);
|
||||
ASSERT0(dd->dd_space_towrite[t]);
|
||||
}
|
||||
|
||||
if (dd->dd_parent)
|
||||
|
||||
@ -522,8 +522,8 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops __attribute__((unused)),
|
||||
|
||||
/* create and open the free_bplist */
|
||||
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
|
||||
VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
|
||||
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
|
||||
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
|
||||
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
|
||||
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
|
||||
dp->dp_meta_objset, obj));
|
||||
}
|
||||
@ -1250,7 +1250,7 @@ dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
|
||||
{
|
||||
objset_t *mos = dp->dp_meta_objset;
|
||||
|
||||
ASSERT(dp->dp_tmp_userrefs_obj == 0);
|
||||
ASSERT0(dp->dp_tmp_userrefs_obj);
|
||||
ASSERT(dmu_tx_is_syncing(tx));
|
||||
|
||||
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
|
||||
|
||||
@ -815,7 +815,7 @@ dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
|
||||
*/
|
||||
err = zap_update(mos, zapobj, recvdstr,
|
||||
intsz, numints, value, tx);
|
||||
ASSERT(err == 0);
|
||||
ASSERT0(err);
|
||||
break;
|
||||
case (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED):
|
||||
/*
|
||||
@ -1166,7 +1166,7 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
|
||||
if (nvlist_exists(nv, propname))
|
||||
continue;
|
||||
|
||||
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
if (za->za_integer_length == 1) {
|
||||
/*
|
||||
* String property
|
||||
@ -1179,8 +1179,7 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
|
||||
kmem_free(tmp, za->za_num_integers);
|
||||
break;
|
||||
}
|
||||
VERIFY(nvlist_add_string(propval, ZPROP_VALUE,
|
||||
tmp) == 0);
|
||||
VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, tmp));
|
||||
kmem_free(tmp, za->za_num_integers);
|
||||
} else {
|
||||
/*
|
||||
@ -1191,8 +1190,8 @@ dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
|
||||
za->za_first_integer);
|
||||
}
|
||||
|
||||
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, source) == 0);
|
||||
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
|
||||
VERIFY0(nvlist_add_string(propval, ZPROP_SOURCE, source));
|
||||
VERIFY0(nvlist_add_nvlist(nv, propname, propval));
|
||||
nvlist_free(propval);
|
||||
}
|
||||
zap_cursor_fini(&zc);
|
||||
@ -1215,7 +1214,7 @@ dsl_prop_get_all_ds(dsl_dataset_t *ds, nvlist_t **nvp,
|
||||
int err = 0;
|
||||
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
|
||||
|
||||
VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
if (ds->ds_is_snapshot)
|
||||
flags |= DSL_PROP_GET_SNAPSHOT;
|
||||
@ -1333,18 +1332,18 @@ dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value)
|
||||
uint64_t default_value;
|
||||
|
||||
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
|
||||
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
|
||||
VERIFY0(nvlist_add_uint64(propval, ZPROP_VALUE, value));
|
||||
return;
|
||||
}
|
||||
|
||||
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
|
||||
VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
VERIFY0(nvlist_add_uint64(propval, ZPROP_VALUE, value));
|
||||
/* Indicate the default source if we can. */
|
||||
if (dodefault(prop, 8, 1, &default_value) == 0 &&
|
||||
value == default_value) {
|
||||
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, "") == 0);
|
||||
VERIFY0(nvlist_add_string(propval, ZPROP_SOURCE, ""));
|
||||
}
|
||||
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
|
||||
VERIFY0(nvlist_add_nvlist(nv, propname, propval));
|
||||
nvlist_free(propval);
|
||||
}
|
||||
|
||||
@ -1355,13 +1354,13 @@ dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value)
|
||||
const char *propname = zfs_prop_to_name(prop);
|
||||
|
||||
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
|
||||
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
|
||||
VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, value));
|
||||
return;
|
||||
}
|
||||
|
||||
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
|
||||
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
|
||||
VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, value));
|
||||
VERIFY0(nvlist_add_nvlist(nv, propname, propval));
|
||||
nvlist_free(propval);
|
||||
}
|
||||
|
||||
|
||||
@ -1784,7 +1784,7 @@ dsl_scan_zil_block(zilog_t *zilog, const blkptr_t *bp, void *arg,
|
||||
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
|
||||
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
|
||||
|
||||
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
|
||||
VERIFY0(scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1820,7 +1820,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
|
||||
lr->lr_foid, ZB_ZIL_LEVEL,
|
||||
lr->lr_offset / BP_GET_LSIZE(bp));
|
||||
|
||||
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
|
||||
VERIFY0(scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -335,7 +335,7 @@ dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
|
||||
|
||||
dduha.dduha_holds = holds;
|
||||
/* chkholds can have non-unique name */
|
||||
VERIFY(0 == nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
|
||||
VERIFY0(nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
|
||||
dduha.dduha_errlist = errlist;
|
||||
dduha.dduha_minor = cleanup_minor;
|
||||
|
||||
|
||||
@ -337,7 +337,7 @@ zfs_zevent_next(zfs_zevent_t *ze, nvlist_t **event, uint64_t *event_size,
|
||||
}
|
||||
}
|
||||
|
||||
VERIFY(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE) == 0);
|
||||
VERIFY0(nvlist_size(ev->ev_nvl, &size, NV_ENCODE_NATIVE));
|
||||
if (size > *event_size) {
|
||||
*event_size = size;
|
||||
error = ENOMEM;
|
||||
|
||||
@ -456,10 +456,10 @@ metaslab_class_destroy(metaslab_class_t *mc)
|
||||
{
|
||||
spa_t *spa = mc->mc_spa;
|
||||
|
||||
ASSERT(mc->mc_alloc == 0);
|
||||
ASSERT(mc->mc_deferred == 0);
|
||||
ASSERT(mc->mc_space == 0);
|
||||
ASSERT(mc->mc_dspace == 0);
|
||||
ASSERT0(mc->mc_alloc);
|
||||
ASSERT0(mc->mc_deferred);
|
||||
ASSERT0(mc->mc_space);
|
||||
ASSERT0(mc->mc_dspace);
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
|
||||
@ -5997,7 +5997,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
|
||||
}
|
||||
|
||||
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
|
||||
ASSERT(BP_GET_NDVAS(bp) == 0);
|
||||
ASSERT0(BP_GET_NDVAS(bp));
|
||||
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
|
||||
ASSERT3P(zal, !=, NULL);
|
||||
|
||||
@ -6029,7 +6029,7 @@ metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
|
||||
smallest_psize = MIN(cur_psize, smallest_psize);
|
||||
}
|
||||
}
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
ASSERT(BP_GET_NDVAS(bp) == ndvas);
|
||||
if (actual_psize)
|
||||
*actual_psize = smallest_psize;
|
||||
|
||||
@ -108,7 +108,7 @@ rrn_add(rrwlock_t *rrl, const void *tag)
|
||||
rn->rn_rrl = rrl;
|
||||
rn->rn_next = tsd_get(rrw_tsd_key);
|
||||
rn->rn_tag = tag;
|
||||
VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
|
||||
VERIFY0(tsd_set(rrw_tsd_key, rn));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -129,7 +129,7 @@ rrn_find_and_remove(rrwlock_t *rrl, const void *tag)
|
||||
if (prev)
|
||||
prev->rn_next = rn->rn_next;
|
||||
else
|
||||
VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
|
||||
VERIFY0(tsd_set(rrw_tsd_key, rn->rn_next));
|
||||
kmem_free(rn, sizeof (*rn));
|
||||
return (B_TRUE);
|
||||
}
|
||||
|
||||
@ -304,7 +304,7 @@ sa_get_spill(sa_handle_t *hdl)
|
||||
if (hdl->sa_spill == NULL) {
|
||||
if ((rc = dmu_spill_hold_existing(hdl->sa_bonus, NULL,
|
||||
&hdl->sa_spill)) == 0)
|
||||
VERIFY(0 == sa_build_index(hdl, SA_SPILL));
|
||||
VERIFY0(sa_build_index(hdl, SA_SPILL));
|
||||
} else {
|
||||
rc = 0;
|
||||
}
|
||||
@ -432,7 +432,7 @@ sa_add_layout_entry(objset_t *os, const sa_attr_type_t *attrs, int attr_count,
|
||||
|
||||
(void) snprintf(attr_name, sizeof (attr_name),
|
||||
"%d", (int)lot_num);
|
||||
VERIFY(0 == zap_update(os, os->os_sa->sa_layout_attr_obj,
|
||||
VERIFY0(zap_update(os, os->os_sa->sa_layout_attr_obj,
|
||||
attr_name, 2, attr_count, attrs, tx));
|
||||
}
|
||||
|
||||
@ -505,7 +505,7 @@ sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
|
||||
}
|
||||
|
||||
error = dbuf_spill_set_blksz(hdl->sa_spill, blocksize, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -717,7 +717,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
|
||||
|
||||
if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
|
||||
hdl->sa_spill->db_size)
|
||||
VERIFY(0 == sa_resize_spill(hdl,
|
||||
VERIFY0(sa_resize_spill(hdl,
|
||||
BUF_SPACE_NEEDED(spill_used, spillhdrsize), tx));
|
||||
}
|
||||
|
||||
@ -791,7 +791,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
|
||||
hdl->sa_bonus_tab = NULL;
|
||||
}
|
||||
if (!sa->sa_force_spill)
|
||||
VERIFY(0 == sa_build_index(hdl, SA_BONUS));
|
||||
VERIFY0(sa_build_index(hdl, SA_BONUS));
|
||||
if (hdl->sa_spill) {
|
||||
sa_idx_tab_rele(hdl->sa_os, hdl->sa_spill_tab);
|
||||
if (!spilling) {
|
||||
@ -801,10 +801,10 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
|
||||
dmu_buf_rele(hdl->sa_spill, NULL);
|
||||
hdl->sa_spill = NULL;
|
||||
hdl->sa_spill_tab = NULL;
|
||||
VERIFY(0 == dmu_rm_spill(hdl->sa_os,
|
||||
VERIFY0(dmu_rm_spill(hdl->sa_os,
|
||||
sa_handle_object(hdl), tx));
|
||||
} else {
|
||||
VERIFY(0 == sa_build_index(hdl, SA_SPILL));
|
||||
VERIFY0(sa_build_index(hdl, SA_SPILL));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1733,10 +1733,10 @@ sa_add_projid(sa_handle_t *hdl, dmu_tx_t *tx, uint64_t projid)
|
||||
NULL, dxattr_obj, dxattr_size);
|
||||
}
|
||||
|
||||
VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
|
||||
VERIFY(sa_replace_all_by_template_locked(hdl, attrs, count, tx) == 0);
|
||||
VERIFY0(dmu_set_bonustype(db, DMU_OT_SA, tx));
|
||||
VERIFY0(sa_replace_all_by_template_locked(hdl, attrs, count, tx));
|
||||
if (znode_acl.z_acl_extern_obj) {
|
||||
VERIFY(0 == dmu_object_free(zfsvfs->z_os,
|
||||
VERIFY0(dmu_object_free(zfsvfs->z_os,
|
||||
znode_acl.z_acl_extern_obj, tx));
|
||||
}
|
||||
|
||||
@ -1858,7 +1858,7 @@ sa_attr_register_sync(sa_handle_t *hdl, dmu_tx_t *tx)
|
||||
continue;
|
||||
ATTR_ENCODE(attr_value, tb[i].sa_attr, tb[i].sa_length,
|
||||
tb[i].sa_byteswap);
|
||||
VERIFY(0 == zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
|
||||
VERIFY0(zap_update(hdl->sa_os, sa->sa_reg_attr_obj,
|
||||
tb[i].sa_name, 8, 1, &attr_value, tx));
|
||||
tb[i].sa_registered = B_TRUE;
|
||||
}
|
||||
|
||||
@ -426,10 +426,10 @@ spa_prop_add_user(nvlist_t *nvl, const char *propname, char *strval,
|
||||
{
|
||||
nvlist_t *propval;
|
||||
|
||||
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
|
||||
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
|
||||
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
|
||||
VERIFY0(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
VERIFY0(nvlist_add_uint64(propval, ZPROP_SOURCE, src));
|
||||
VERIFY0(nvlist_add_string(propval, ZPROP_VALUE, strval));
|
||||
VERIFY0(nvlist_add_nvlist(nvl, propname, propval));
|
||||
nvlist_free(propval);
|
||||
}
|
||||
|
||||
@ -965,7 +965,7 @@ spa_prop_set(spa_t *spa, nvlist_t *nvp)
|
||||
uint64_t ver = 0;
|
||||
|
||||
if (prop == ZPOOL_PROP_VERSION) {
|
||||
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
|
||||
VERIFY0(nvpair_value_uint64(elem, &ver));
|
||||
} else {
|
||||
ASSERT(zpool_prop_feature(nvpair_name(elem)));
|
||||
ver = SPA_VERSION_FEATURES;
|
||||
@ -2418,8 +2418,8 @@ spa_load_spares(spa_t *spa)
|
||||
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
|
||||
KM_SLEEP);
|
||||
for (i = 0; i < spa->spa_spares.sav_count; i++) {
|
||||
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
|
||||
VDEV_ALLOC_SPARE) == 0);
|
||||
VERIFY0(spa_config_parse(spa, &vd, spares[i], NULL, 0,
|
||||
VDEV_ALLOC_SPARE));
|
||||
ASSERT(vd != NULL);
|
||||
|
||||
spa->spa_spares.sav_vdevs[i] = vd;
|
||||
@ -2546,8 +2546,8 @@ spa_load_l2cache(spa_t *spa)
|
||||
/*
|
||||
* Create new vdev
|
||||
*/
|
||||
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
|
||||
VDEV_ALLOC_L2CACHE) == 0);
|
||||
VERIFY0(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
|
||||
VDEV_ALLOC_L2CACHE));
|
||||
ASSERT(vd != NULL);
|
||||
newvdevs[i] = vd;
|
||||
|
||||
@ -9091,7 +9091,7 @@ spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
|
||||
int
|
||||
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
|
||||
{
|
||||
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
|
||||
ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
|
||||
|
||||
if (dsl_scan_resilvering(spa->spa_dsl_pool))
|
||||
return (SET_ERROR(EBUSY));
|
||||
@ -9102,7 +9102,7 @@ spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
|
||||
int
|
||||
spa_scan_stop(spa_t *spa)
|
||||
{
|
||||
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
|
||||
ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
|
||||
if (dsl_scan_resilvering(spa->spa_dsl_pool))
|
||||
return (SET_ERROR(EBUSY));
|
||||
|
||||
@ -9119,7 +9119,7 @@ int
|
||||
spa_scan_range(spa_t *spa, pool_scan_func_t func, uint64_t txgstart,
|
||||
uint64_t txgend)
|
||||
{
|
||||
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
|
||||
ASSERT0(spa_config_held(spa, SCL_ALL, RW_WRITER));
|
||||
|
||||
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
|
||||
return (SET_ERROR(ENOTSUP));
|
||||
@ -9548,7 +9548,7 @@ spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
|
||||
{
|
||||
zio_t *zio = zio_root(spa, NULL, NULL, 0);
|
||||
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
|
||||
VERIFY(zio_wait(zio) == 0);
|
||||
VERIFY0(zio_wait(zio));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -9587,7 +9587,7 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
|
||||
size_t nvsize = 0;
|
||||
dmu_buf_t *db;
|
||||
|
||||
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
|
||||
VERIFY0(nvlist_size(nv, &nvsize, NV_ENCODE_XDR));
|
||||
|
||||
/*
|
||||
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
|
||||
@ -9597,15 +9597,15 @@ spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
|
||||
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
|
||||
packed = vmem_alloc(bufsize, KM_SLEEP);
|
||||
|
||||
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
|
||||
KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
|
||||
KM_SLEEP));
|
||||
memset(packed + nvsize, 0, bufsize - nvsize);
|
||||
|
||||
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
|
||||
|
||||
vmem_free(packed, bufsize);
|
||||
|
||||
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
|
||||
VERIFY0(dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
|
||||
dmu_buf_will_dirty(db, tx);
|
||||
*(uint64_t *)db->db_data = nvsize;
|
||||
dmu_buf_rele(db, FTAG);
|
||||
|
||||
@ -472,8 +472,8 @@ spa_config_lock_destroy(spa_t *spa)
|
||||
mutex_destroy(&scl->scl_lock);
|
||||
cv_destroy(&scl->scl_cv);
|
||||
ASSERT(scl->scl_writer == NULL);
|
||||
ASSERT(scl->scl_write_wanted == 0);
|
||||
ASSERT(scl->scl_count == 0);
|
||||
ASSERT0(scl->scl_write_wanted);
|
||||
ASSERT0(scl->scl_count);
|
||||
}
|
||||
}
|
||||
|
||||
@ -784,24 +784,23 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
|
||||
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
|
||||
list_insert_head(&spa->spa_config_list, dp);
|
||||
|
||||
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
|
||||
KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
if (config != NULL) {
|
||||
nvlist_t *features;
|
||||
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
|
||||
&features) == 0) {
|
||||
VERIFY(nvlist_dup(features, &spa->spa_label_features,
|
||||
0) == 0);
|
||||
VERIFY0(nvlist_dup(features,
|
||||
&spa->spa_label_features, 0));
|
||||
}
|
||||
|
||||
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
|
||||
VERIFY0(nvlist_dup(config, &spa->spa_config, 0));
|
||||
}
|
||||
|
||||
if (spa->spa_label_features == NULL) {
|
||||
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
|
||||
KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
|
||||
KM_SLEEP));
|
||||
}
|
||||
|
||||
spa->spa_min_ashift = INT_MAX;
|
||||
|
||||
@ -718,7 +718,7 @@ spa_mmp_history_set(spa_t *spa, uint64_t mmp_node_id, int io_error,
|
||||
for (smh = list_tail(&shl->procfs_list.pl_list); smh != NULL;
|
||||
smh = list_prev(&shl->procfs_list.pl_list, smh)) {
|
||||
if (smh->mmp_node_id == mmp_node_id) {
|
||||
ASSERT(smh->io_error == 0);
|
||||
ASSERT0(smh->io_error);
|
||||
smh->io_error = io_error;
|
||||
smh->duration = duration;
|
||||
error = 0;
|
||||
|
||||
@ -149,6 +149,6 @@ space_reftree_generate_map(avl_tree_t *t, zfs_range_tree_t *rt, int64_t minref)
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT(refcnt == 0);
|
||||
ASSERT0(refcnt);
|
||||
ASSERT(start == -1ULL);
|
||||
}
|
||||
|
||||
@ -1464,7 +1464,7 @@ vdev_remove_parent(vdev_t *cvd)
|
||||
if (cvd == cvd->vdev_top)
|
||||
vdev_top_transfer(mvd, cvd);
|
||||
|
||||
ASSERT(mvd->vdev_children == 0);
|
||||
ASSERT0(mvd->vdev_children);
|
||||
vdev_free(mvd);
|
||||
}
|
||||
|
||||
@ -2134,14 +2134,14 @@ vdev_open(vdev_t *vd)
|
||||
* faulted, bail out of the open.
|
||||
*/
|
||||
if (!vd->vdev_removed && vd->vdev_faulted) {
|
||||
ASSERT(vd->vdev_children == 0);
|
||||
ASSERT0(vd->vdev_children);
|
||||
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
|
||||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
|
||||
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
|
||||
vd->vdev_label_aux);
|
||||
return (SET_ERROR(ENXIO));
|
||||
} else if (vd->vdev_offline) {
|
||||
ASSERT(vd->vdev_children == 0);
|
||||
ASSERT0(vd->vdev_children);
|
||||
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
|
||||
return (SET_ERROR(ENXIO));
|
||||
}
|
||||
@ -2197,7 +2197,7 @@ vdev_open(vdev_t *vd)
|
||||
* the vdev is accessible. If we're faulted, bail.
|
||||
*/
|
||||
if (vd->vdev_faulted) {
|
||||
ASSERT(vd->vdev_children == 0);
|
||||
ASSERT0(vd->vdev_children);
|
||||
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
|
||||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
|
||||
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
|
||||
@ -2206,7 +2206,7 @@ vdev_open(vdev_t *vd)
|
||||
}
|
||||
|
||||
if (vd->vdev_degraded) {
|
||||
ASSERT(vd->vdev_children == 0);
|
||||
ASSERT0(vd->vdev_children);
|
||||
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
|
||||
VDEV_AUX_ERR_EXCEEDED);
|
||||
} else {
|
||||
@ -5194,7 +5194,7 @@ vdev_stat_update(zio_t *zio, uint64_t psize)
|
||||
int64_t
|
||||
vdev_deflated_space(vdev_t *vd, int64_t space)
|
||||
{
|
||||
ASSERT((space & (SPA_MINBLOCKSIZE-1)) == 0);
|
||||
ASSERT0((space & (SPA_MINBLOCKSIZE-1)));
|
||||
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
|
||||
|
||||
return ((space >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio);
|
||||
@ -5286,8 +5286,8 @@ vdev_config_dirty(vdev_t *vd)
|
||||
|
||||
if (nvlist_lookup_nvlist_array(sav->sav_config,
|
||||
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
|
||||
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
|
||||
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
|
||||
VERIFY0(nvlist_lookup_nvlist_array(sav->sav_config,
|
||||
ZPOOL_CONFIG_SPARES, &aux, &naux));
|
||||
}
|
||||
|
||||
ASSERT(c < naux);
|
||||
@ -5675,7 +5675,7 @@ vdev_expand(vdev_t *vd, uint64_t txg)
|
||||
(vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
|
||||
vdev_is_concrete(vd)) {
|
||||
vdev_metaslab_group_create(vd);
|
||||
VERIFY(vdev_metaslab_init(vd, txg) == 0);
|
||||
VERIFY0(vdev_metaslab_init(vd, txg));
|
||||
vdev_config_dirty(vd);
|
||||
}
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ uint64_t
|
||||
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
|
||||
{
|
||||
ASSERT(offset < sizeof (vdev_label_t));
|
||||
ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
|
||||
ASSERT0(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t));
|
||||
|
||||
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
|
||||
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
|
||||
@ -768,12 +768,12 @@ vdev_top_config_generate(spa_t *spa, nvlist_t *config)
|
||||
}
|
||||
|
||||
if (idx) {
|
||||
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
|
||||
array, idx) == 0);
|
||||
VERIFY0(nvlist_add_uint64_array(config,
|
||||
ZPOOL_CONFIG_HOLE_ARRAY, array, idx));
|
||||
}
|
||||
|
||||
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
|
||||
rvd->vdev_children) == 0);
|
||||
VERIFY0(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
|
||||
rvd->vdev_children));
|
||||
|
||||
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
|
||||
}
|
||||
@ -1189,8 +1189,8 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
|
||||
* vdev uses as described above, and automatically expires if we
|
||||
* fail.
|
||||
*/
|
||||
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
|
||||
crtxg) == 0);
|
||||
VERIFY0(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
|
||||
crtxg));
|
||||
}
|
||||
|
||||
buf = vp->vp_nvlist;
|
||||
|
||||
@ -3363,7 +3363,7 @@ vdev_raidz_io_done_reconstruct_known_missing(zio_t *zio, raidz_map_t *rm,
|
||||
* also have been fewer parity errors than parity
|
||||
* columns or, again, we wouldn't be in this code path.
|
||||
*/
|
||||
ASSERT(parity_untried == 0);
|
||||
ASSERT0(parity_untried);
|
||||
ASSERT(parity_errors < rr->rr_firstdatacol);
|
||||
|
||||
/*
|
||||
|
||||
@ -344,10 +344,10 @@ spa_vdev_remove_aux(nvlist_t *config, const char *name, nvlist_t **dev,
|
||||
for (int i = 0, j = 0; i < count; i++) {
|
||||
if (dev[i] == dev_to_remove)
|
||||
continue;
|
||||
VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP));
|
||||
}
|
||||
|
||||
VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
|
||||
VERIFY0(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY));
|
||||
fnvlist_add_nvlist_array(config, name, (const nvlist_t * const *)newdev,
|
||||
count - 1);
|
||||
|
||||
|
||||
@ -921,7 +921,7 @@ fzap_add_cd(zap_name_t *zn,
|
||||
|
||||
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
|
||||
ASSERT(!zap->zap_ismicro);
|
||||
ASSERT(fzap_check(zn, integer_size, num_integers) == 0);
|
||||
ASSERT0(fzap_check(zn, integer_size, num_integers));
|
||||
|
||||
err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
|
||||
if (err != 0)
|
||||
@ -1386,7 +1386,7 @@ again:
|
||||
}
|
||||
err = zap_entry_read_name(zap, &zeh,
|
||||
za->za_name_len, za->za_name);
|
||||
ASSERT(err == 0);
|
||||
ASSERT0(err);
|
||||
|
||||
za->za_normalization_conflict =
|
||||
zap_entry_normalization_conflict(&zeh,
|
||||
|
||||
@ -346,7 +346,7 @@ zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
|
||||
{
|
||||
zap_name_t *zn = kmem_cache_alloc(zap_name_cache, KM_SLEEP);
|
||||
|
||||
ASSERT(zap->zap_normflags == 0);
|
||||
ASSERT0(zap->zap_normflags);
|
||||
zn->zn_zap = zap;
|
||||
zn->zn_key_intlen = sizeof (*key);
|
||||
zn->zn_key_orig = zn->zn_key_norm = key;
|
||||
@ -1876,7 +1876,7 @@ zap_cursor_serialize(zap_cursor_t *zc)
|
||||
return (-1ULL);
|
||||
if (zc->zc_zap == NULL)
|
||||
return (zc->zc_serialized);
|
||||
ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
|
||||
ASSERT0((zc->zc_hash & zap_maxcd(zc->zc_zap)));
|
||||
ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
|
||||
|
||||
/*
|
||||
@ -1911,7 +1911,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
|
||||
* we must add to the existing zc_cd, which may already
|
||||
* be 1 due to the zap_cursor_advance.
|
||||
*/
|
||||
ASSERT(zc->zc_hash == 0);
|
||||
ASSERT0(zc->zc_hash);
|
||||
hb = zap_hashbits(zc->zc_zap);
|
||||
zc->zc_hash = zc->zc_serialized << (64 - hb);
|
||||
zc->zc_cd += zc->zc_serialized >> hb;
|
||||
|
||||
@ -210,8 +210,8 @@ spa_features_check(spa_t *spa, boolean_t for_write,
|
||||
za->za_name, 1, MAXPATHLEN, buf) == 0)
|
||||
desc = buf;
|
||||
|
||||
VERIFY(nvlist_add_string(unsup_feat,
|
||||
za->za_name, desc) == 0);
|
||||
VERIFY0(nvlist_add_string(unsup_feat,
|
||||
za->za_name, desc));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,8 +112,7 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
|
||||
uint64_t fuid_size;
|
||||
|
||||
ASSERT(fuid_obj != 0);
|
||||
VERIFY(0 == dmu_bonus_hold(os, fuid_obj,
|
||||
FTAG, &db));
|
||||
VERIFY0(dmu_bonus_hold(os, fuid_obj, FTAG, &db));
|
||||
fuid_size = *(uint64_t *)db->db_data;
|
||||
dmu_buf_rele(db, FTAG);
|
||||
|
||||
@ -125,22 +124,21 @@ zfs_fuid_table_load(objset_t *os, uint64_t fuid_obj, avl_tree_t *idx_tree,
|
||||
int i;
|
||||
|
||||
packed = kmem_alloc(fuid_size, KM_SLEEP);
|
||||
VERIFY(dmu_read(os, fuid_obj, 0,
|
||||
fuid_size, packed, DMU_READ_PREFETCH) == 0);
|
||||
VERIFY(nvlist_unpack(packed, fuid_size,
|
||||
&nvp, 0) == 0);
|
||||
VERIFY(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
|
||||
&fuidnvp, &count) == 0);
|
||||
VERIFY0(dmu_read(os, fuid_obj, 0,
|
||||
fuid_size, packed, DMU_READ_PREFETCH));
|
||||
VERIFY0(nvlist_unpack(packed, fuid_size, &nvp, 0));
|
||||
VERIFY0(nvlist_lookup_nvlist_array(nvp, FUID_NVP_ARRAY,
|
||||
&fuidnvp, &count));
|
||||
|
||||
for (i = 0; i != count; i++) {
|
||||
fuid_domain_t *domnode;
|
||||
const char *domain;
|
||||
uint64_t idx;
|
||||
|
||||
VERIFY(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
|
||||
&domain) == 0);
|
||||
VERIFY(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
|
||||
&idx) == 0);
|
||||
VERIFY0(nvlist_lookup_string(fuidnvp[i], FUID_DOMAIN,
|
||||
&domain));
|
||||
VERIFY0(nvlist_lookup_uint64(fuidnvp[i], FUID_IDX,
|
||||
&idx));
|
||||
|
||||
domnode = kmem_alloc(sizeof (fuid_domain_t), KM_SLEEP);
|
||||
|
||||
@ -246,35 +244,33 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
|
||||
&zfsvfs->z_fuid_obj, tx) == 0);
|
||||
}
|
||||
|
||||
VERIFY(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&nvp, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
numnodes = avl_numnodes(&zfsvfs->z_fuid_idx);
|
||||
fuids = kmem_alloc(numnodes * sizeof (void *), KM_SLEEP);
|
||||
for (i = 0, domnode = avl_first(&zfsvfs->z_fuid_domain); domnode; i++,
|
||||
domnode = AVL_NEXT(&zfsvfs->z_fuid_domain, domnode)) {
|
||||
VERIFY(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY(nvlist_add_uint64(fuids[i], FUID_IDX,
|
||||
domnode->f_idx) == 0);
|
||||
VERIFY(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0) == 0);
|
||||
VERIFY(nvlist_add_string(fuids[i], FUID_DOMAIN,
|
||||
domnode->f_ksid->kd_name) == 0);
|
||||
VERIFY0(nvlist_alloc(&fuids[i], NV_UNIQUE_NAME, KM_SLEEP));
|
||||
VERIFY0(nvlist_add_uint64(fuids[i], FUID_IDX,
|
||||
domnode->f_idx));
|
||||
VERIFY0(nvlist_add_uint64(fuids[i], FUID_OFFSET, 0));
|
||||
VERIFY0(nvlist_add_string(fuids[i], FUID_DOMAIN,
|
||||
domnode->f_ksid->kd_name));
|
||||
}
|
||||
fnvlist_add_nvlist_array(nvp, FUID_NVP_ARRAY,
|
||||
(const nvlist_t * const *)fuids, numnodes);
|
||||
for (i = 0; i != numnodes; i++)
|
||||
nvlist_free(fuids[i]);
|
||||
kmem_free(fuids, numnodes * sizeof (void *));
|
||||
VERIFY(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR) == 0);
|
||||
VERIFY0(nvlist_size(nvp, &nvsize, NV_ENCODE_XDR));
|
||||
packed = kmem_alloc(nvsize, KM_SLEEP);
|
||||
VERIFY(nvlist_pack(nvp, &packed, &nvsize,
|
||||
NV_ENCODE_XDR, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_pack(nvp, &packed, &nvsize, NV_ENCODE_XDR, KM_SLEEP));
|
||||
nvlist_free(nvp);
|
||||
zfsvfs->z_fuid_size = nvsize;
|
||||
dmu_write(zfsvfs->z_os, zfsvfs->z_fuid_obj, 0,
|
||||
zfsvfs->z_fuid_size, packed, tx);
|
||||
kmem_free(packed, zfsvfs->z_fuid_size);
|
||||
VERIFY(0 == dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj,
|
||||
FTAG, &db));
|
||||
VERIFY0(dmu_bonus_hold(zfsvfs->z_os, zfsvfs->z_fuid_obj, FTAG, &db));
|
||||
dmu_buf_will_dirty(db, tx);
|
||||
*(uint64_t *)db->db_data = zfsvfs->z_fuid_size;
|
||||
dmu_buf_rele(db, FTAG);
|
||||
|
||||
@ -1493,7 +1493,7 @@ zfs_ioc_pool_create(zfs_cmd_t *zc)
|
||||
goto pool_props_bad;
|
||||
(void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS);
|
||||
|
||||
VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
error = zfs_fill_zplprops_root(version, rootprops,
|
||||
zplprops, NULL);
|
||||
if (error != 0)
|
||||
@ -2245,7 +2245,7 @@ nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop)
|
||||
*/
|
||||
if ((error = zfs_get_zplprop(os, prop, &value)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0);
|
||||
VERIFY0(nvlist_add_uint64(props, zfs_prop_to_name(prop), value));
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -2280,7 +2280,7 @@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
|
||||
dmu_objset_type(os) == DMU_OST_ZFS) {
|
||||
nvlist_t *nv;
|
||||
|
||||
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 &&
|
||||
(err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 &&
|
||||
(err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 &&
|
||||
@ -2483,7 +2483,7 @@ zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
|
||||
|
||||
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
|
||||
nvlist_t *attrs;
|
||||
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
|
||||
VERIFY0(nvpair_value_nvlist(pair, &attrs));
|
||||
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
|
||||
&pair) != 0)
|
||||
return (SET_ERROR(EINVAL));
|
||||
@ -2538,9 +2538,8 @@ zfs_prop_set_special(const char *dsname, zprop_source_t source,
|
||||
|
||||
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
|
||||
nvlist_t *attrs;
|
||||
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
|
||||
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
|
||||
&pair) == 0);
|
||||
VERIFY0(nvpair_value_nvlist(pair, &attrs));
|
||||
VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair));
|
||||
}
|
||||
|
||||
/* all special properties are numeric except for keylocation */
|
||||
@ -2932,14 +2931,14 @@ props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops)
|
||||
{
|
||||
nvpair_t *pair;
|
||||
|
||||
VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
pair = NULL;
|
||||
while ((pair = nvlist_next_nvpair(props, pair)) != NULL) {
|
||||
if (nvlist_exists(skipped, nvpair_name(pair)))
|
||||
continue;
|
||||
|
||||
VERIFY(nvlist_add_nvpair(*newprops, pair) == 0);
|
||||
VERIFY0(nvlist_add_nvpair(*newprops, pair));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3064,11 +3063,11 @@ zfs_ioc_inherit_prop(zfs_cmd_t *zc)
|
||||
|
||||
switch (type) {
|
||||
case PROP_TYPE_STRING:
|
||||
VERIFY(0 == nvlist_add_string(dummy, propname, ""));
|
||||
VERIFY0(nvlist_add_string(dummy, propname, ""));
|
||||
break;
|
||||
case PROP_TYPE_NUMBER:
|
||||
case PROP_TYPE_INDEX:
|
||||
VERIFY(0 == nvlist_add_uint64(dummy, propname, 0));
|
||||
VERIFY0(nvlist_add_uint64(dummy, propname, 0));
|
||||
break;
|
||||
default:
|
||||
err = SET_ERROR(EINVAL);
|
||||
@ -3454,14 +3453,14 @@ zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
|
||||
/*
|
||||
* Put the version in the zplprops
|
||||
*/
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VERSION), zplver));
|
||||
|
||||
if (norm == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm));
|
||||
|
||||
/*
|
||||
* If we're normalizing, names must always be valid UTF-8 strings.
|
||||
@ -3471,55 +3470,55 @@ zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
|
||||
if (u8 == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8));
|
||||
|
||||
if (sense == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_CASE), sense));
|
||||
|
||||
if (duq == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTUSERQUOTA, &duq)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTUSERQUOTA), duq) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTUSERQUOTA), duq));
|
||||
|
||||
if (dgq == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTGROUPQUOTA,
|
||||
&dgq)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPQUOTA), dgq) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPQUOTA), dgq));
|
||||
|
||||
if (dpq == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTPROJECTQUOTA,
|
||||
&dpq)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTQUOTA), dpq) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTQUOTA), dpq));
|
||||
|
||||
if (duoq == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTUSEROBJQUOTA,
|
||||
&duoq)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTUSEROBJQUOTA), duoq) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTUSEROBJQUOTA), duoq));
|
||||
|
||||
if (dgoq == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTGROUPOBJQUOTA,
|
||||
&dgoq)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPOBJQUOTA), dgoq) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTGROUPOBJQUOTA), dgoq));
|
||||
|
||||
if (dpoq == ZFS_PROP_UNDEFINED &&
|
||||
(error = zfs_get_zplprop(os, ZFS_PROP_DEFAULTPROJECTOBJQUOTA,
|
||||
&dpoq)) != 0)
|
||||
return (error);
|
||||
VERIFY(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTOBJQUOTA), dpoq) == 0);
|
||||
VERIFY0(nvlist_add_uint64(zplprops,
|
||||
zfs_prop_to_name(ZFS_PROP_DEFAULTPROJECTOBJQUOTA), dpoq));
|
||||
|
||||
if (is_ci)
|
||||
*is_ci = (sense == ZFS_CASE_INSENSITIVE);
|
||||
@ -3668,8 +3667,8 @@ zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
|
||||
* file system creation, so go figure them out
|
||||
* now.
|
||||
*/
|
||||
VERIFY(nvlist_alloc(&zct.zct_zplprops,
|
||||
NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&zct.zct_zplprops,
|
||||
NV_UNIQUE_NAME, KM_SLEEP));
|
||||
error = zfs_fill_zplprops(fsname, nvprops,
|
||||
zct.zct_zplprops, &is_insensitive);
|
||||
if (error != 0) {
|
||||
@ -4916,9 +4915,8 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
|
||||
* format.
|
||||
*/
|
||||
nvlist_t *attrs;
|
||||
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
|
||||
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
|
||||
&pair) == 0);
|
||||
VERIFY0(nvpair_value_nvlist(pair, &attrs));
|
||||
VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &pair));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5103,7 +5101,7 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
|
||||
if (props == NULL)
|
||||
return (0);
|
||||
|
||||
VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP);
|
||||
(void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name));
|
||||
@ -5115,9 +5113,8 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
|
||||
sizeof (zc->zc_value));
|
||||
if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 ||
|
||||
(err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) {
|
||||
VERIFY(nvlist_remove_nvpair(props, pair) == 0);
|
||||
VERIFY(nvlist_add_int32(errors,
|
||||
zc->zc_value, err) == 0);
|
||||
VERIFY0(nvlist_remove_nvpair(props, pair));
|
||||
VERIFY0(nvlist_add_int32(errors, zc->zc_value, err));
|
||||
}
|
||||
pair = next_pair;
|
||||
}
|
||||
@ -5127,7 +5124,7 @@ zfs_check_clearable(const char *dataset, nvlist_t *props, nvlist_t **errlist)
|
||||
nvlist_free(errors);
|
||||
errors = NULL;
|
||||
} else {
|
||||
VERIFY(nvpair_value_int32(pair, &rv) == 0);
|
||||
VERIFY0(nvpair_value_int32(pair, &rv));
|
||||
}
|
||||
|
||||
if (errlist == NULL)
|
||||
@ -5144,16 +5141,14 @@ propval_equals(nvpair_t *p1, nvpair_t *p2)
|
||||
if (nvpair_type(p1) == DATA_TYPE_NVLIST) {
|
||||
/* dsl_prop_get_all_impl() format */
|
||||
nvlist_t *attrs;
|
||||
VERIFY(nvpair_value_nvlist(p1, &attrs) == 0);
|
||||
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
|
||||
&p1) == 0);
|
||||
VERIFY0(nvpair_value_nvlist(p1, &attrs));
|
||||
VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p1));
|
||||
}
|
||||
|
||||
if (nvpair_type(p2) == DATA_TYPE_NVLIST) {
|
||||
nvlist_t *attrs;
|
||||
VERIFY(nvpair_value_nvlist(p2, &attrs) == 0);
|
||||
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
|
||||
&p2) == 0);
|
||||
VERIFY0(nvpair_value_nvlist(p2, &attrs));
|
||||
VERIFY0(nvlist_lookup_nvpair(attrs, ZPROP_VALUE, &p2));
|
||||
}
|
||||
|
||||
if (nvpair_type(p1) != nvpair_type(p2))
|
||||
@ -5162,14 +5157,14 @@ propval_equals(nvpair_t *p1, nvpair_t *p2)
|
||||
if (nvpair_type(p1) == DATA_TYPE_STRING) {
|
||||
const char *valstr1, *valstr2;
|
||||
|
||||
VERIFY(nvpair_value_string(p1, &valstr1) == 0);
|
||||
VERIFY(nvpair_value_string(p2, &valstr2) == 0);
|
||||
VERIFY0(nvpair_value_string(p1, &valstr1));
|
||||
VERIFY0(nvpair_value_string(p2, &valstr2));
|
||||
return (strcmp(valstr1, valstr2) == 0);
|
||||
} else {
|
||||
uint64_t intval1, intval2;
|
||||
|
||||
VERIFY(nvpair_value_uint64(p1, &intval1) == 0);
|
||||
VERIFY(nvpair_value_uint64(p2, &intval2) == 0);
|
||||
VERIFY0(nvpair_value_uint64(p1, &intval1));
|
||||
VERIFY0(nvpair_value_uint64(p2, &intval2));
|
||||
return (intval1 == intval2);
|
||||
}
|
||||
}
|
||||
@ -5237,7 +5232,7 @@ extract_delay_props(nvlist_t *props)
|
||||
};
|
||||
int i;
|
||||
|
||||
VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
|
||||
VERIFY0(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP));
|
||||
|
||||
for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL;
|
||||
nvp = nvlist_next_nvpair(props, nvp)) {
|
||||
@ -5253,8 +5248,8 @@ extract_delay_props(nvlist_t *props)
|
||||
}
|
||||
if (delayable[i] != 0) {
|
||||
tmp = nvlist_prev_nvpair(props, nvp);
|
||||
VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0);
|
||||
VERIFY(nvlist_remove_nvpair(props, nvp) == 0);
|
||||
VERIFY0(nvlist_add_nvpair(delayprops, nvp));
|
||||
VERIFY0(nvlist_remove_nvpair(props, nvp));
|
||||
nvp = tmp;
|
||||
}
|
||||
}
|
||||
@ -5485,15 +5480,15 @@ zfs_ioc_recv_impl(char *tofs, char *tosnap, const char *origin,
|
||||
* using ASSERT() will be just like a VERIFY.
|
||||
*/
|
||||
if (recv_delayprops != NULL) {
|
||||
ASSERT(nvlist_merge(recvprops, recv_delayprops, 0) == 0);
|
||||
ASSERT0(nvlist_merge(recvprops, recv_delayprops, 0));
|
||||
nvlist_free(recv_delayprops);
|
||||
}
|
||||
if (local_delayprops != NULL) {
|
||||
ASSERT(nvlist_merge(localprops, local_delayprops, 0) == 0);
|
||||
ASSERT0(nvlist_merge(localprops, local_delayprops, 0));
|
||||
nvlist_free(local_delayprops);
|
||||
}
|
||||
if (inherited_delayprops != NULL) {
|
||||
ASSERT(nvlist_merge(localprops, inherited_delayprops, 0) == 0);
|
||||
ASSERT0(nvlist_merge(localprops, inherited_delayprops, 0));
|
||||
nvlist_free(inherited_delayprops);
|
||||
}
|
||||
*read_bytes = off - noff;
|
||||
|
||||
@ -374,7 +374,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
|
||||
if (*objp == 0) {
|
||||
*objp = zap_create(zfsvfs->z_os, DMU_OT_USERGROUP_QUOTA,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
VERIFY(0 == zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
|
||||
VERIFY0(zap_add(zfsvfs->z_os, MASTER_NODE_OBJ,
|
||||
zfs_userquota_prop_prefixes[type], 8, 1, objp, tx));
|
||||
}
|
||||
mutex_exit(&zfsvfs->z_lock);
|
||||
@ -386,7 +386,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
|
||||
} else {
|
||||
err = zap_update(zfsvfs->z_os, *objp, buf, 8, 1, "a, tx);
|
||||
}
|
||||
ASSERT(err == 0);
|
||||
ASSERT0(err);
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_sync(zfsvfs, tx);
|
||||
dmu_tx_commit(tx);
|
||||
|
||||
@ -169,7 +169,7 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
|
||||
ASSERT(MUTEX_HELD(&zp->z_lock));
|
||||
VERIFY((xoap = xva_getxoptattr(xvap)) != NULL);
|
||||
if (zp->z_is_sa)
|
||||
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
|
||||
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SCANSTAMP(zfsvfs),
|
||||
&xoap->xoa_av_scanstamp,
|
||||
sizeof (xoap->xoa_av_scanstamp), tx));
|
||||
else {
|
||||
@ -181,12 +181,12 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
|
||||
len = sizeof (xoap->xoa_av_scanstamp) +
|
||||
ZFS_OLD_ZNODE_PHYS_SIZE;
|
||||
if (len > doi.doi_bonus_size)
|
||||
VERIFY(dmu_set_bonus(db, len, tx) == 0);
|
||||
VERIFY0(dmu_set_bonus(db, len, tx));
|
||||
(void) memcpy((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE,
|
||||
xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp));
|
||||
|
||||
zp->z_pflags |= ZFS_BONUS_SCANSTAMP;
|
||||
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
|
||||
VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
|
||||
&zp->z_pflags, sizeof (uint64_t), tx));
|
||||
}
|
||||
}
|
||||
@ -427,11 +427,10 @@ zfs_sa_upgrade(sa_handle_t *hdl, dmu_tx_t *tx)
|
||||
zp->z_pflags &= ~ZFS_BONUS_SCANSTAMP;
|
||||
}
|
||||
|
||||
VERIFY(dmu_set_bonustype(db, DMU_OT_SA, tx) == 0);
|
||||
VERIFY(sa_replace_all_by_template_locked(hdl, sa_attrs,
|
||||
count, tx) == 0);
|
||||
VERIFY0(dmu_set_bonustype(db, DMU_OT_SA, tx));
|
||||
VERIFY0(sa_replace_all_by_template_locked(hdl, sa_attrs, count, tx));
|
||||
if (znode_acl.z_acl_extern_obj)
|
||||
VERIFY(0 == dmu_object_free(zfsvfs->z_os,
|
||||
VERIFY0(dmu_object_free(zfsvfs->z_os,
|
||||
znode_acl.z_acl_extern_obj, tx));
|
||||
|
||||
zp->z_is_sa = B_TRUE;
|
||||
|
||||
@ -991,8 +991,8 @@ zil_create(zilog_t *zilog)
|
||||
*/
|
||||
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
|
||||
|
||||
ASSERT(zh->zh_claim_txg == 0);
|
||||
ASSERT(zh->zh_replay_seq == 0);
|
||||
ASSERT0(zh->zh_claim_txg);
|
||||
ASSERT0(zh->zh_replay_seq);
|
||||
|
||||
blk = zh->zh_log;
|
||||
|
||||
@ -1104,7 +1104,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
|
||||
zilog->zl_keep_first = keep_first;
|
||||
|
||||
if (!list_is_empty(&zilog->zl_lwb_list)) {
|
||||
ASSERT(zh->zh_claim_txg == 0);
|
||||
ASSERT0(zh->zh_claim_txg);
|
||||
VERIFY(!keep_first);
|
||||
while ((lwb = list_remove_head(&zilog->zl_lwb_list)) != NULL) {
|
||||
if (lwb->lwb_buf != NULL)
|
||||
@ -3773,7 +3773,7 @@ zil_sync(zilog_t *zilog, dmu_tx_t *tx)
|
||||
|
||||
mutex_enter(&zilog->zl_lock);
|
||||
|
||||
ASSERT(zilog->zl_stop_sync == 0);
|
||||
ASSERT0(zilog->zl_stop_sync);
|
||||
|
||||
if (*replayed_seq != 0) {
|
||||
ASSERT(zh->zh_replay_seq < *replayed_seq);
|
||||
@ -4386,7 +4386,7 @@ zil_replay(objset_t *os, void *arg,
|
||||
|
||||
zilog->zl_replay = B_TRUE;
|
||||
zilog->zl_replay_time = ddi_get_lbolt();
|
||||
ASSERT(zilog->zl_replay_blks == 0);
|
||||
ASSERT0(zilog->zl_replay_blks);
|
||||
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
|
||||
zh->zh_claim_txg, B_TRUE);
|
||||
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
|
||||
|
||||
@ -771,7 +771,7 @@ zio_add_child_impl(zio_t *pio, zio_t *cio, boolean_t first)
|
||||
else
|
||||
mutex_enter(&cio->io_lock);
|
||||
|
||||
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
|
||||
ASSERT0(pio->io_state[ZIO_WAIT_DONE]);
|
||||
|
||||
uint64_t *countp = pio->io_children[cio->io_child_type];
|
||||
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
|
||||
@ -955,8 +955,8 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
|
||||
zio_t *zio;
|
||||
|
||||
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
|
||||
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
|
||||
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
|
||||
ASSERT0(P2PHASE(psize, SPA_MINBLOCKSIZE));
|
||||
ASSERT0(P2PHASE(offset, SPA_MINBLOCKSIZE));
|
||||
|
||||
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
|
||||
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
|
||||
@ -1559,7 +1559,7 @@ zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
|
||||
{
|
||||
zio_t *zio;
|
||||
|
||||
ASSERT(vd->vdev_children == 0);
|
||||
ASSERT0(vd->vdev_children);
|
||||
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
|
||||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
|
||||
ASSERT3U(offset + size, <=, vd->vdev_psize);
|
||||
@ -1580,7 +1580,7 @@ zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
|
||||
{
|
||||
zio_t *zio;
|
||||
|
||||
ASSERT(vd->vdev_children == 0);
|
||||
ASSERT0(vd->vdev_children);
|
||||
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
|
||||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
|
||||
ASSERT3U(offset + size, <=, vd->vdev_psize);
|
||||
@ -3362,7 +3362,7 @@ zio_nop_write(zio_t *zio)
|
||||
zio_prop_t *zp = &zio->io_prop;
|
||||
|
||||
ASSERT(BP_IS_HOLE(bp));
|
||||
ASSERT(BP_GET_LEVEL(bp) == 0);
|
||||
ASSERT0(BP_GET_LEVEL(bp));
|
||||
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
|
||||
ASSERT(zp->zp_nopwrite);
|
||||
ASSERT(!zp->zp_dedup);
|
||||
@ -4559,8 +4559,8 @@ zio_vdev_io_start(zio_t *zio)
|
||||
|
||||
zio->io_delay = 0;
|
||||
|
||||
ASSERT(zio->io_error == 0);
|
||||
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
|
||||
ASSERT0(zio->io_error);
|
||||
ASSERT0(zio->io_child_error[ZIO_CHILD_VDEV]);
|
||||
|
||||
if (vd == NULL) {
|
||||
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
|
||||
@ -4903,7 +4903,7 @@ void
|
||||
zio_vdev_io_reissue(zio_t *zio)
|
||||
{
|
||||
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
|
||||
ASSERT(zio->io_error == 0);
|
||||
ASSERT0(zio->io_error);
|
||||
|
||||
zio->io_stage >>= 1;
|
||||
}
|
||||
@ -4920,7 +4920,7 @@ void
|
||||
zio_vdev_io_bypass(zio_t *zio)
|
||||
{
|
||||
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
|
||||
ASSERT(zio->io_error == 0);
|
||||
ASSERT0(zio->io_error);
|
||||
|
||||
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
|
||||
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
|
||||
@ -5298,7 +5298,7 @@ zio_ready(zio_t *zio)
|
||||
ASSERT(IO_IS_ALLOCATING(zio));
|
||||
ASSERT(BP_GET_BIRTH(bp) == zio->io_txg ||
|
||||
BP_IS_HOLE(bp) || (zio->io_flags & ZIO_FLAG_NOPWRITE));
|
||||
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
|
||||
ASSERT0(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY]);
|
||||
|
||||
zio->io_ready(zio);
|
||||
}
|
||||
@ -5448,7 +5448,7 @@ zio_done(zio_t *zio)
|
||||
|
||||
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
|
||||
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
|
||||
ASSERT(zio->io_children[c][w] == 0);
|
||||
ASSERT0(zio->io_children[c][w]);
|
||||
|
||||
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
|
||||
ASSERT(memcmp(zio->io_bp, &zio->io_bp_copy,
|
||||
|
||||
@ -215,7 +215,7 @@ zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
|
||||
spa_feature_t
|
||||
zio_checksum_to_feature(enum zio_checksum cksum)
|
||||
{
|
||||
VERIFY((cksum & ~ZIO_CHECKSUM_MASK) == 0);
|
||||
VERIFY0((cksum & ~ZIO_CHECKSUM_MASK));
|
||||
|
||||
switch (cksum) {
|
||||
case ZIO_CHECKSUM_BLAKE3:
|
||||
|
||||
@ -215,8 +215,8 @@ zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
|
||||
int error;
|
||||
uint64_t volblocksize, volsize;
|
||||
|
||||
VERIFY(nvlist_lookup_uint64(nvprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
|
||||
VERIFY0(nvlist_lookup_uint64(nvprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize));
|
||||
if (nvlist_lookup_uint64(nvprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
|
||||
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
|
||||
@ -225,21 +225,20 @@ zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
|
||||
* These properties must be removed from the list so the generic
|
||||
* property setting step won't apply to them.
|
||||
*/
|
||||
VERIFY(nvlist_remove_all(nvprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
|
||||
VERIFY0(nvlist_remove_all(nvprops, zfs_prop_to_name(ZFS_PROP_VOLSIZE)));
|
||||
(void) nvlist_remove_all(nvprops,
|
||||
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
|
||||
|
||||
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
|
||||
DMU_OT_NONE, 0, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
|
||||
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
|
||||
ASSERT(error == 0);
|
||||
ASSERT0(error);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Loading…
Reference in New Issue
Block a user