FreeBSD: Clean up ASSERT/VERIFY use in module

Convert use of ASSERT() to ASSERT0(), ASSERT3U(), ASSERT3S(), 
ASSERT3P(), and likewise for VERIFY().  In some cases it ended up 
making more sense to change the code, such as VERIFY on nvlist 
operations that I have converted to use fnvlist instead.  In one 
place I changed an internal struct member from int to boolean_t to 
match its use.  Some asserts that combined multiple checks with && 
in a single assert have been split to separate asserts, to make it 
apparent which check fails.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ryan Moeller <ryan@iXsystems.com>
Closes #11971
This commit is contained in:
Ryan Moeller 2021-04-30 19:36:10 -04:00 committed by GitHub
parent ec4f330816
commit e4efb70950
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 233 additions and 238 deletions

View File

@ -43,7 +43,6 @@
#include <grp.h> #include <grp.h>
#include <pwd.h> #include <pwd.h>
#include <acl_common.h> #include <acl_common.h>
#define ASSERT assert
#endif #endif
#define ACE_POSIX_SUPPORTED_BITS (ACE_READ_DATA | \ #define ACE_POSIX_SUPPORTED_BITS (ACE_READ_DATA | \
@ -170,8 +169,9 @@ ksort(caddr_t v, int n, int s, int (*f)(void *, void *))
return; return;
/* Sanity check on arguments */ /* Sanity check on arguments */
ASSERT(((uintptr_t)v & 0x3) == 0 && (s & 0x3) == 0); ASSERT3U(((uintptr_t)v & 0x3), ==, 0);
ASSERT(s > 0); ASSERT3S((s & 0x3), ==, 0);
ASSERT3S(s, >, 0);
for (g = n / 2; g > 0; g /= 2) { for (g = n / 2; g > 0; g /= 2) {
for (i = g; i < n; i++) { for (i = g; i < n; i++) {
for (j = i - g; j >= 0 && for (j = i - g; j >= 0 &&

View File

@ -75,7 +75,7 @@ typedef struct callb {
typedef struct callb_table { typedef struct callb_table {
kmutex_t ct_lock; /* protect all callb states */ kmutex_t ct_lock; /* protect all callb states */
callb_t *ct_freelist; /* free callb structures */ callb_t *ct_freelist; /* free callb structures */
int ct_busy; /* != 0 prevents additions */ boolean_t ct_busy; /* B_TRUE prevents additions */
kcondvar_t ct_busy_cv; /* to wait for not busy */ kcondvar_t ct_busy_cv; /* to wait for not busy */
int ct_ncallb; /* num of callbs allocated */ int ct_ncallb; /* num of callbs allocated */
callb_t *ct_first_cb[NCBCLASS]; /* ptr to 1st callb in a class */ callb_t *ct_first_cb[NCBCLASS]; /* ptr to 1st callb in a class */
@ -98,7 +98,7 @@ callb_cpr_t callb_cprinfo_safe = {
static void static void
callb_init(void *dummy __unused) callb_init(void *dummy __unused)
{ {
callb_table.ct_busy = 0; /* mark table open for additions */ callb_table.ct_busy = B_FALSE; /* mark table open for additions */
mutex_init(&callb_safe_mutex, NULL, MUTEX_DEFAULT, NULL); mutex_init(&callb_safe_mutex, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&callb_table.ct_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&callb_table.ct_lock, NULL, MUTEX_DEFAULT, NULL);
} }
@ -139,7 +139,7 @@ callb_add_common(boolean_t (*func)(void *arg, int code),
{ {
callb_t *cp; callb_t *cp;
ASSERT(class < NCBCLASS); ASSERT3S(class, <, NCBCLASS);
mutex_enter(&ct->ct_lock); mutex_enter(&ct->ct_lock);
while (ct->ct_busy) while (ct->ct_busy)
@ -259,7 +259,7 @@ callb_execute_class(int class, int code)
callb_t *cp; callb_t *cp;
void *ret = NULL; void *ret = NULL;
ASSERT(class < NCBCLASS); ASSERT3S(class, <, NCBCLASS);
mutex_enter(&ct->ct_lock); mutex_enter(&ct->ct_lock);
@ -351,8 +351,8 @@ void
callb_lock_table(void) callb_lock_table(void)
{ {
mutex_enter(&ct->ct_lock); mutex_enter(&ct->ct_lock);
ASSERT(ct->ct_busy == 0); ASSERT(!ct->ct_busy);
ct->ct_busy = 1; ct->ct_busy = B_TRUE;
mutex_exit(&ct->ct_lock); mutex_exit(&ct->ct_lock);
} }
@ -363,8 +363,8 @@ void
callb_unlock_table(void) callb_unlock_table(void)
{ {
mutex_enter(&ct->ct_lock); mutex_enter(&ct->ct_lock);
ASSERT(ct->ct_busy != 0); ASSERT(ct->ct_busy);
ct->ct_busy = 0; ct->ct_busy = B_FALSE;
cv_broadcast(&ct->ct_busy_cv); cv_broadcast(&ct->ct_busy_cv);
mutex_exit(&ct->ct_lock); mutex_exit(&ct->ct_lock);
} }

View File

@ -61,9 +61,8 @@
void void
list_create(list_t *list, size_t size, size_t offset) list_create(list_t *list, size_t size, size_t offset)
{ {
ASSERT(list); ASSERT3P(list, !=, NULL);
ASSERT(size > 0); ASSERT3U(size, >=, offset + sizeof (list_node_t));
ASSERT(size >= offset + sizeof (list_node_t));
list->list_size = size; list->list_size = size;
list->list_offset = offset; list->list_offset = offset;
@ -76,9 +75,9 @@ list_destroy(list_t *list)
{ {
list_node_t *node = &list->list_head; list_node_t *node = &list->list_head;
ASSERT(list); ASSERT3P(list, !=, NULL);
ASSERT(list->list_head.list_next == node); ASSERT3P(list->list_head.list_next, ==, node);
ASSERT(list->list_head.list_prev == node); ASSERT3P(list->list_head.list_prev, ==, node);
node->list_next = node->list_prev = NULL; node->list_next = node->list_prev = NULL;
} }
@ -124,7 +123,7 @@ list_remove(list_t *list, void *object)
{ {
list_node_t *lold = list_d2l(list, object); list_node_t *lold = list_d2l(list, object);
ASSERT(!list_empty(list)); ASSERT(!list_empty(list));
ASSERT(lold->list_next != NULL); ASSERT3P(lold->list_next, !=, NULL);
list_remove_node(lold); list_remove_node(lold);
} }
@ -195,8 +194,8 @@ list_move_tail(list_t *dst, list_t *src)
list_node_t *dstnode = &dst->list_head; list_node_t *dstnode = &dst->list_head;
list_node_t *srcnode = &src->list_head; list_node_t *srcnode = &src->list_head;
ASSERT(dst->list_size == src->list_size); ASSERT3U(dst->list_size, ==, src->list_size);
ASSERT(dst->list_offset == src->list_offset); ASSERT3U(dst->list_offset, ==, src->list_offset);
if (list_empty(src)) if (list_empty(src))
return; return;

View File

@ -112,7 +112,7 @@ zfs_kmem_free(void *buf, size_t size __unused)
if (i == buf) if (i == buf)
break; break;
} }
ASSERT(i != NULL); ASSERT3P(i, !=, NULL);
LIST_REMOVE(i, next); LIST_REMOVE(i, next);
mtx_unlock(&kmem_items_mtx); mtx_unlock(&kmem_items_mtx);
memset(buf, 0xDC, MAX(size, 16)); memset(buf, 0xDC, MAX(size, 16));
@ -162,7 +162,7 @@ kmem_cache_create(char *name, size_t bufsize, size_t align,
{ {
kmem_cache_t *cache; kmem_cache_t *cache;
ASSERT(vmp == NULL); ASSERT3P(vmp, ==, NULL);
cache = kmem_alloc(sizeof (*cache), KM_SLEEP); cache = kmem_alloc(sizeof (*cache), KM_SLEEP);
strlcpy(cache->kc_name, name, sizeof (cache->kc_name)); strlcpy(cache->kc_name, name, sizeof (cache->kc_name));
@ -324,7 +324,7 @@ void
spl_kmem_cache_set_move(kmem_cache_t *skc, spl_kmem_cache_set_move(kmem_cache_t *skc,
kmem_cbrc_t (move)(void *, void *, size_t, void *)) kmem_cbrc_t (move)(void *, void *, size_t, void *))
{ {
ASSERT(move != NULL); ASSERT3P(move, !=, NULL);
} }
#ifdef KMEM_DEBUG #ifdef KMEM_DEBUG

View File

@ -69,7 +69,7 @@ __kstat_set_seq_raw_ops(kstat_t *ksp,
static int static int
kstat_default_update(kstat_t *ksp, int rw) kstat_default_update(kstat_t *ksp, int rw)
{ {
ASSERT(ksp != NULL); ASSERT3P(ksp, !=, NULL);
if (rw == KSTAT_WRITE) if (rw == KSTAT_WRITE)
return (EACCES); return (EACCES);
@ -223,7 +223,7 @@ restart:
sbuf_printf(sb, "%s", ksp->ks_raw_buf); sbuf_printf(sb, "%s", ksp->ks_raw_buf);
} else { } else {
ASSERT(ksp->ks_ndata == 1); ASSERT3U(ksp->ks_ndata, ==, 1);
sbuf_hexdump(sb, ksp->ks_data, sbuf_hexdump(sb, ksp->ks_data,
ksp->ks_data_size, NULL, 0); ksp->ks_data_size, NULL, 0);
} }
@ -250,7 +250,7 @@ __kstat_create(const char *module, int instance, const char *name,
KASSERT(instance == 0, ("instance=%d", instance)); KASSERT(instance == 0, ("instance=%d", instance));
if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO)) if ((ks_type == KSTAT_TYPE_INTR) || (ks_type == KSTAT_TYPE_IO))
ASSERT(ks_ndata == 1); ASSERT3U(ks_ndata, ==, 1);
if (class == NULL) if (class == NULL)
class = "misc"; class = "misc";
@ -461,7 +461,7 @@ kstat_install(kstat_t *ksp)
struct sysctl_oid *root; struct sysctl_oid *root;
if (ksp->ks_ndata == UINT32_MAX) if (ksp->ks_ndata == UINT32_MAX)
VERIFY(ksp->ks_type == KSTAT_TYPE_RAW); VERIFY3U(ksp->ks_type, ==, KSTAT_TYPE_RAW);
switch (ksp->ks_type) { switch (ksp->ks_type) {
case KSTAT_TYPE_NAMED: case KSTAT_TYPE_NAMED:
@ -493,7 +493,7 @@ kstat_install(kstat_t *ksp)
default: default:
panic("unsupported kstat type %d\n", ksp->ks_type); panic("unsupported kstat type %d\n", ksp->ks_type);
} }
VERIFY(root != NULL); VERIFY3P(root, !=, NULL);
ksp->ks_sysctl_root = root; ksp->ks_sysctl_root = root;
} }
@ -535,7 +535,7 @@ kstat_waitq_exit(kstat_io_t *kiop)
delta = new - kiop->wlastupdate; delta = new - kiop->wlastupdate;
kiop->wlastupdate = new; kiop->wlastupdate = new;
wcnt = kiop->wcnt--; wcnt = kiop->wcnt--;
ASSERT((int)wcnt > 0); ASSERT3S(wcnt, >, 0);
kiop->wlentime += delta * wcnt; kiop->wlentime += delta * wcnt;
kiop->wtime += delta; kiop->wtime += delta;
} }
@ -566,7 +566,7 @@ kstat_runq_exit(kstat_io_t *kiop)
delta = new - kiop->rlastupdate; delta = new - kiop->rlastupdate;
kiop->rlastupdate = new; kiop->rlastupdate = new;
rcnt = kiop->rcnt--; rcnt = kiop->rcnt--;
ASSERT((int)rcnt > 0); ASSERT3S(rcnt, >, 0);
kiop->rlentime += delta * rcnt; kiop->rlentime += delta * rcnt;
kiop->rtime += delta; kiop->rtime += delta;
} }

View File

@ -102,6 +102,6 @@ kmem_asprintf(const char *fmt, ...)
void void
kmem_strfree(char *str) kmem_strfree(char *str)
{ {
ASSERT(str != NULL); ASSERT3P(str, !=, NULL);
kmem_free(str, strlen(str) + 1); kmem_free(str, strlen(str) + 1);
} }

View File

@ -245,7 +245,7 @@ sysevent_worker(void *arg __unused)
if (error == ESHUTDOWN) if (error == ESHUTDOWN)
break; break;
} else { } else {
VERIFY(event != NULL); VERIFY3P(event, !=, NULL);
log_sysevent(event); log_sysevent(event);
nvlist_free(event); nvlist_free(event);
} }

View File

@ -48,7 +48,7 @@
int int
zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio) zfs_uiomove(void *cp, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
{ {
ASSERT(zfs_uio_rw(uio) == dir); ASSERT3U(zfs_uio_rw(uio), ==, dir);
return (uiomove(cp, (int)n, GET_UIO_STRUCT(uio))); return (uiomove(cp, (int)n, GET_UIO_STRUCT(uio)));
} }
@ -102,6 +102,6 @@ zfs_uioskip(zfs_uio_t *uio, size_t n)
int int
zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio) zfs_uio_fault_move(void *p, size_t n, zfs_uio_rw_t dir, zfs_uio_t *uio)
{ {
ASSERT(zfs_uio_rw(uio) == dir); ASSERT3U(zfs_uio_rw(uio), ==, dir);
return (vn_io_fault_uiomove(p, n, GET_UIO_STRUCT(uio))); return (vn_io_fault_uiomove(p, n, GET_UIO_STRUCT(uio)));
} }

View File

@ -275,13 +275,13 @@ mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
void void
vn_rele_async(vnode_t *vp, taskq_t *taskq) vn_rele_async(vnode_t *vp, taskq_t *taskq)
{ {
VERIFY(vp->v_usecount > 0); VERIFY3U(vp->v_usecount, >, 0);
if (refcount_release_if_not_last(&vp->v_usecount)) { if (refcount_release_if_not_last(&vp->v_usecount)) {
#if __FreeBSD_version < 1300045 #if __FreeBSD_version < 1300045
vdrop(vp); vdrop(vp);
#endif #endif
return; return;
} }
VERIFY(taskq_dispatch((taskq_t *)taskq, VERIFY3U(taskq_dispatch((taskq_t *)taskq,
(task_func_t *)vrele, vp, TQ_SLEEP) != 0); (task_func_t *)vrele, vp, TQ_SLEEP), !=, 0);
} }

View File

@ -168,8 +168,7 @@ abd_verify_scatter(abd_t *abd)
* if an error if the ABD has been marked as a linear page. * if an error if the ABD has been marked as a linear page.
*/ */
ASSERT(!abd_is_linear_page(abd)); ASSERT(!abd_is_linear_page(abd));
ASSERT3U(ABD_SCATTER(abd).abd_offset, <, ASSERT3U(ABD_SCATTER(abd).abd_offset, <, zfs_abd_chunk_size);
zfs_abd_chunk_size);
n = abd_scatter_chunkcnt(abd); n = abd_scatter_chunkcnt(abd);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL); ASSERT3P(ABD_SCATTER(abd).abd_chunks[i], !=, NULL);

View File

@ -120,7 +120,7 @@ dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_buf_t *db = dbp[i]; dmu_buf_t *db = dbp[i];
caddr_t va; caddr_t va;
ASSERT(size > 0); ASSERT3U(size, >, 0);
ASSERT3U(db->db_size, >=, PAGESIZE); ASSERT3U(db->db_size, >=, PAGESIZE);
bufoff = offset - db->db_offset; bufoff = offset - db->db_offset;
@ -170,7 +170,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
int err; int err;
ASSERT3U(ma[0]->pindex + count - 1, ==, ma[count - 1]->pindex); ASSERT3U(ma[0]->pindex + count - 1, ==, ma[count - 1]->pindex);
ASSERT(last_size <= PAGE_SIZE); ASSERT3S(last_size, <=, PAGE_SIZE);
err = dmu_buf_hold_array(os, object, IDX_TO_OFF(ma[0]->pindex), err = dmu_buf_hold_array(os, object, IDX_TO_OFF(ma[0]->pindex),
IDX_TO_OFF(count - 1) + last_size, TRUE, FTAG, &numbufs, &dbp); IDX_TO_OFF(count - 1) + last_size, TRUE, FTAG, &numbufs, &dbp);
@ -182,7 +182,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (dbp[0]->db_offset != 0 || numbufs > 1) { if (dbp[0]->db_offset != 0 || numbufs > 1) {
for (i = 0; i < numbufs; i++) { for (i = 0; i < numbufs; i++) {
ASSERT(ISP2(dbp[i]->db_size)); ASSERT(ISP2(dbp[i]->db_size));
ASSERT((dbp[i]->db_offset % dbp[i]->db_size) == 0); ASSERT3U((dbp[i]->db_offset % dbp[i]->db_size), ==, 0);
ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size); ASSERT3U(dbp[i]->db_size, ==, dbp[0]->db_size);
} }
} }
@ -202,10 +202,10 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_do_sunbusy(m); vm_page_do_sunbusy(m);
break; break;
} }
ASSERT(m->dirty == 0); ASSERT3U(m->dirty, ==, 0);
ASSERT(!pmap_page_is_write_mapped(m)); ASSERT(!pmap_page_is_write_mapped(m));
ASSERT(db->db_size > PAGE_SIZE); ASSERT3U(db->db_size, >, PAGE_SIZE);
bufoff = IDX_TO_OFF(m->pindex) % db->db_size; bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
va = zfs_map_page(m, &sf); va = zfs_map_page(m, &sf);
bcopy((char *)db->db_data + bufoff, va, PAGESIZE); bcopy((char *)db->db_data + bufoff, va, PAGESIZE);
@ -229,7 +229,7 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
if (m != bogus_page) { if (m != bogus_page) {
vm_page_assert_xbusied(m); vm_page_assert_xbusied(m);
ASSERT(vm_page_none_valid(m)); ASSERT(vm_page_none_valid(m));
ASSERT(m->dirty == 0); ASSERT3U(m->dirty, ==, 0);
ASSERT(!pmap_page_is_write_mapped(m)); ASSERT(!pmap_page_is_write_mapped(m));
va = zfs_map_page(m, &sf); va = zfs_map_page(m, &sf);
} }
@ -248,25 +248,28 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
* end of file anyway. * end of file anyway.
*/ */
tocpy = MIN(db->db_size - bufoff, PAGESIZE - pgoff); tocpy = MIN(db->db_size - bufoff, PAGESIZE - pgoff);
ASSERT3S(tocpy, >=, 0);
if (m != bogus_page) if (m != bogus_page)
bcopy((char *)db->db_data + bufoff, va + pgoff, tocpy); bcopy((char *)db->db_data + bufoff, va + pgoff, tocpy);
pgoff += tocpy; pgoff += tocpy;
ASSERT(pgoff <= PAGESIZE); ASSERT3S(pgoff, >=, 0);
ASSERT3S(pgoff, <=, PAGESIZE);
if (pgoff == PAGESIZE) { if (pgoff == PAGESIZE) {
if (m != bogus_page) { if (m != bogus_page) {
zfs_unmap_page(sf); zfs_unmap_page(sf);
vm_page_valid(m); vm_page_valid(m);
} }
ASSERT(mi < count); ASSERT3S(mi, <, count);
mi++; mi++;
pgoff = 0; pgoff = 0;
} }
bufoff += tocpy; bufoff += tocpy;
ASSERT(bufoff <= db->db_size); ASSERT3S(bufoff, >=, 0);
ASSERT3S(bufoff, <=, db->db_size);
if (bufoff == db->db_size) { if (bufoff == db->db_size) {
ASSERT(di < numbufs); ASSERT3S(di, <, numbufs);
di++; di++;
bufoff = 0; bufoff = 0;
} }
@ -286,23 +289,23 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
* with a size that is not a multiple of the page size. * with a size that is not a multiple of the page size.
*/ */
if (mi == count) { if (mi == count) {
ASSERT(di >= numbufs - 1); ASSERT3S(di, >=, numbufs - 1);
IMPLY(*rahead != 0, di == numbufs - 1); IMPLY(*rahead != 0, di == numbufs - 1);
IMPLY(*rahead != 0, bufoff != 0); IMPLY(*rahead != 0, bufoff != 0);
ASSERT(pgoff == 0); ASSERT0(pgoff);
} }
if (di == numbufs) { if (di == numbufs) {
ASSERT(mi >= count - 1); ASSERT3S(mi, >=, count - 1);
ASSERT(*rahead == 0); ASSERT0(*rahead);
IMPLY(pgoff == 0, mi == count); IMPLY(pgoff == 0, mi == count);
if (pgoff != 0) { if (pgoff != 0) {
ASSERT(mi == count - 1); ASSERT3S(mi, ==, count - 1);
ASSERT((dbp[0]->db_size & PAGE_MASK) != 0); ASSERT3U((dbp[0]->db_size & PAGE_MASK), !=, 0);
} }
} }
#endif #endif
if (pgoff != 0) { if (pgoff != 0) {
ASSERT(m != bogus_page); ASSERT3P(m, !=, bogus_page);
bzero(va + pgoff, PAGESIZE - pgoff); bzero(va + pgoff, PAGESIZE - pgoff);
zfs_unmap_page(sf); zfs_unmap_page(sf);
vm_page_valid(m); vm_page_valid(m);
@ -318,17 +321,17 @@ dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
vm_page_do_sunbusy(m); vm_page_do_sunbusy(m);
break; break;
} }
ASSERT(m->dirty == 0); ASSERT3U(m->dirty, ==, 0);
ASSERT(!pmap_page_is_write_mapped(m)); ASSERT(!pmap_page_is_write_mapped(m));
ASSERT(db->db_size > PAGE_SIZE); ASSERT3U(db->db_size, >, PAGE_SIZE);
bufoff = IDX_TO_OFF(m->pindex) % db->db_size; bufoff = IDX_TO_OFF(m->pindex) % db->db_size;
tocpy = MIN(db->db_size - bufoff, PAGESIZE); tocpy = MIN(db->db_size - bufoff, PAGESIZE);
va = zfs_map_page(m, &sf); va = zfs_map_page(m, &sf);
bcopy((char *)db->db_data + bufoff, va, tocpy); bcopy((char *)db->db_data + bufoff, va, tocpy);
if (tocpy < PAGESIZE) { if (tocpy < PAGESIZE) {
ASSERT(i == *rahead - 1); ASSERT3S(i, ==, *rahead - 1);
ASSERT((db->db_size & PAGE_MASK) != 0); ASSERT3U((db->db_size & PAGE_MASK), !=, 0);
bzero(va + tocpy, PAGESIZE - tocpy); bzero(va + tocpy, PAGESIZE - tocpy);
} }
zfs_unmap_page(sf); zfs_unmap_page(sf);

View File

@ -95,8 +95,7 @@ spa_generate_rootconf(const char *name)
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
uint64_t txg; uint64_t txg;
VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG, txg = fnvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG);
&txg) == 0);
if (txg > best_txg) { if (txg > best_txg) {
best_txg = txg; best_txg = txg;
best_cfg = configs[i]; best_cfg = configs[i];
@ -115,72 +114,66 @@ spa_generate_rootconf(const char *name)
break; break;
if (configs[i] == NULL) if (configs[i] == NULL)
continue; continue;
VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE, nvtop = fnvlist_lookup_nvlist(configs[i],
&nvtop) == 0); ZPOOL_CONFIG_VDEV_TREE);
nvlist_dup(nvtop, &tops[i], KM_SLEEP); tops[i] = fnvlist_dup(nvtop);
} }
for (i = 0; holes != NULL && i < nholes; i++) { for (i = 0; holes != NULL && i < nholes; i++) {
if (i >= nchildren) if (i >= nchildren)
continue; continue;
if (tops[holes[i]] != NULL) if (tops[holes[i]] != NULL)
continue; continue;
nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP); tops[holes[i]] = fnvlist_alloc();
VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE, fnvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) == 0); VDEV_TYPE_HOLE);
VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, fnvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, holes[i]);
holes[i]) == 0); fnvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 0);
VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID,
0) == 0);
} }
for (i = 0; i < nchildren; i++) { for (i = 0; i < nchildren; i++) {
if (tops[i] != NULL) if (tops[i] != NULL)
continue; continue;
nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP); tops[i] = fnvlist_alloc();
VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE, fnvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING) == 0); VDEV_TYPE_MISSING);
VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, fnvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, i);
i) == 0); fnvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 0);
VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID,
0) == 0);
} }
/* /*
* Create pool config based on the best vdev config. * Create pool config based on the best vdev config.
*/ */
nvlist_dup(best_cfg, &config, KM_SLEEP); config = fnvlist_dup(best_cfg);
/* /*
* Put this pool's top-level vdevs into a root vdev. * Put this pool's top-level vdevs into a root vdev.
*/ */
VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, pgid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
&pgid) == 0); nvroot = fnvlist_alloc();
VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); fnvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT);
VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, fnvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL);
VDEV_TYPE_ROOT) == 0); fnvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid);
VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); fnvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, tops,
VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); nchildren);
VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
tops, nchildren) == 0);
/* /*
* Replace the existing vdev_tree with the new root vdev in * Replace the existing vdev_tree with the new root vdev in
* this pool's configuration (remove the old, add the new). * this pool's configuration (remove the old, add the new).
*/ */
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot);
/* /*
* Drop vdev config elements that should not be present at pool level. * Drop vdev config elements that should not be present at pool level.
*/ */
nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64); fnvlist_remove(config, ZPOOL_CONFIG_GUID);
nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64); fnvlist_remove(config, ZPOOL_CONFIG_TOP_GUID);
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
nvlist_free(configs[i]); fnvlist_free(configs[i]);
kmem_free(configs, count * sizeof (void *)); kmem_free(configs, count * sizeof (void *));
for (i = 0; i < nchildren; i++) for (i = 0; i < nchildren; i++)
nvlist_free(tops[i]); fnvlist_free(tops[i]);
kmem_free(tops, nchildren * sizeof (void *)); kmem_free(tops, nchildren * sizeof (void *));
nvlist_free(nvroot); fnvlist_free(nvroot);
return (config); return (config);
} }
@ -201,10 +194,9 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
mutex_enter(&spa_namespace_lock); mutex_enter(&spa_namespace_lock);
if (config != NULL) { if (config != NULL) {
VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, pname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
&pname) == 0 && strcmp(name, pname) == 0); VERIFY0(strcmp(name, pname));
VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) txg = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG);
== 0);
if ((spa = spa_lookup(pname)) != NULL) { if ((spa = spa_lookup(pname)) != NULL) {
/* /*
@ -213,7 +205,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
*/ */
if (spa->spa_state == POOL_STATE_ACTIVE) { if (spa->spa_state == POOL_STATE_ACTIVE) {
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);
nvlist_free(config); fnvlist_free(config);
return (0); return (0);
} }
@ -235,12 +227,12 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
} else if ((spa = spa_lookup(name)) == NULL) { } else if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);
nvlist_free(config); fnvlist_free(config);
cmn_err(CE_NOTE, "Cannot find the pool label for '%s'", cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
name); name);
return (EIO); return (EIO);
} else { } else {
VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0); config = fnvlist_dup(spa->spa_config);
} }
spa->spa_is_root = B_TRUE; spa->spa_is_root = B_TRUE;
spa->spa_import_flags = ZFS_IMPORT_VERBATIM; spa->spa_import_flags = ZFS_IMPORT_VERBATIM;
@ -251,15 +243,14 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
/* /*
* Build up a vdev tree based on the boot device's label config. * Build up a vdev tree based on the boot device's label config.
*/ */
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvtop = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
&nvtop) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, error = spa_config_parse(spa, &rvd, nvtop, NULL, 0,
VDEV_ALLOC_ROOTPOOL); VDEV_ALLOC_ROOTPOOL);
spa_config_exit(spa, SCL_ALL, FTAG); spa_config_exit(spa, SCL_ALL, FTAG);
if (error) { if (error) {
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);
nvlist_free(config); fnvlist_free(config);
cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
pname); pname);
return (error); return (error);
@ -270,7 +261,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
spa_config_exit(spa, SCL_ALL, FTAG); spa_config_exit(spa, SCL_ALL, FTAG);
mutex_exit(&spa_namespace_lock); mutex_exit(&spa_namespace_lock);
nvlist_free(config); fnvlist_free(config);
return (0); return (0);
} }

View File

@ -59,13 +59,13 @@ vdev_file_fini(void)
static void static void
vdev_file_hold(vdev_t *vd) vdev_file_hold(vdev_t *vd)
{ {
ASSERT(vd->vdev_path != NULL); ASSERT3P(vd->vdev_path, !=, NULL);
} }
static void static void
vdev_file_rele(vdev_t *vd) vdev_file_rele(vdev_t *vd)
{ {
ASSERT(vd->vdev_path != NULL); ASSERT3P(vd->vdev_path, !=, NULL);
} }
static mode_t static mode_t
@ -137,7 +137,8 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
* administrator has already decided that the pool should be available * administrator has already decided that the pool should be available
* to local zone users, so the underlying devices should be as well. * to local zone users, so the underlying devices should be as well.
*/ */
ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/'); ASSERT3P(vd->vdev_path, !=, NULL);
ASSERT(vd->vdev_path[0] == '/');
error = zfs_file_open(vd->vdev_path, error = zfs_file_open(vd->vdev_path,
vdev_file_open_mode(spa_mode(vd->vdev_spa)), 0, &fp); vdev_file_open_mode(spa_mode(vd->vdev_spa)), 0, &fp);

View File

@ -396,8 +396,8 @@ vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
p = datas[i]; p = datas[i];
s = sizes[i]; s = sizes[i];
end = off + s; end = off + s;
ASSERT((off % cp->provider->sectorsize) == 0); ASSERT0(off % cp->provider->sectorsize);
ASSERT((s % cp->provider->sectorsize) == 0); ASSERT0(s % cp->provider->sectorsize);
for (; off < end; off += maxio, p += maxio, s -= maxio, j++) { for (; off < end; off += maxio, p += maxio, s -= maxio, j++) {
bios[j] = g_alloc_bio(); bios[j] = g_alloc_bio();
@ -409,7 +409,7 @@ vdev_geom_io(struct g_consumer *cp, int *cmds, void **datas, off_t *offsets,
g_io_request(bios[j], cp); g_io_request(bios[j], cp);
} }
} }
ASSERT(j == n_bios); ASSERT3S(j, ==, n_bios);
/* Wait for all of the bios to complete, and clean them up */ /* Wait for all of the bios to complete, and clean them up */
for (i = j = 0; i < ncmds; i++) { for (i = j = 0; i < ncmds; i++) {
@ -467,7 +467,7 @@ vdev_geom_read_config(struct g_consumer *cp, nvlist_t **configp)
offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE; offsets[l] = vdev_label_offset(psize, l, 0) + VDEV_SKIP_SIZE;
sizes[l] = size; sizes[l] = size;
errors[l] = 0; errors[l] = 0;
ASSERT(offsets[l] % pp->sectorsize == 0); ASSERT0(offsets[l] % pp->sectorsize);
} }
/* Issue the IO requests */ /* Issue the IO requests */
@ -557,7 +557,7 @@ process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0) if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
goto ignore; goto ignore;
VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); txg = fnvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG);
if (*known_pool_guid != 0) { if (*known_pool_guid != 0) {
if (pool_guid != *known_pool_guid) if (pool_guid != *known_pool_guid)
@ -568,8 +568,8 @@ process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
resize_configs(configs, count, id); resize_configs(configs, count, id);
if ((*configs)[id] != NULL) { if ((*configs)[id] != NULL) {
VERIFY(nvlist_lookup_uint64((*configs)[id], known_txg = fnvlist_lookup_uint64((*configs)[id],
ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0); ZPOOL_CONFIG_POOL_TXG);
if (txg <= known_txg) if (txg <= known_txg)
goto ignore; goto ignore;
nvlist_free((*configs)[id]); nvlist_free((*configs)[id]);
@ -813,7 +813,7 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
* Set the TLS to indicate downstack that we * Set the TLS to indicate downstack that we
* should not access zvols * should not access zvols
*/ */
VERIFY(tsd_set(zfs_geom_probe_vdev_key, vd) == 0); VERIFY0(tsd_set(zfs_geom_probe_vdev_key, vd));
/* /*
* We must have a pathname, and it must be absolute. * We must have a pathname, and it must be absolute.
@ -873,7 +873,7 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
} }
/* Clear the TLS now that tasting is done */ /* Clear the TLS now that tasting is done */
VERIFY(tsd_set(zfs_geom_probe_vdev_key, NULL) == 0); VERIFY0(tsd_set(zfs_geom_probe_vdev_key, NULL));
if (cp == NULL) { if (cp == NULL) {
ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path); ZFS_LOG(1, "Vdev %s not found.", vd->vdev_path);
@ -1160,7 +1160,7 @@ vdev_geom_io_done(zio_t *zio)
struct bio *bp = zio->io_bio; struct bio *bp = zio->io_bio;
if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) { if (zio->io_type != ZIO_TYPE_READ && zio->io_type != ZIO_TYPE_WRITE) {
ASSERT(bp == NULL); ASSERT3P(bp, ==, NULL);
return; return;
} }

View File

@ -52,7 +52,7 @@ vdev_label_write_pad2(vdev_t *vd, const char *buf, size_t size)
if (vdev_is_dead(vd)) if (vdev_is_dead(vd))
return (ENXIO); return (ENXIO);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); ASSERT3U(spa_config_held(spa, SCL_ALL, RW_WRITER), ==, SCL_ALL);
pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE); pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(pad2, VDEV_PAD_SIZE); abd_zero(pad2, VDEV_PAD_SIZE);

View File

@ -354,7 +354,8 @@ zfs_external_acl(znode_t *zp)
* after upgrade the SA_ZPL_ZNODE_ACL should have been * after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed * removed
*/ */
VERIFY(zp->z_is_sa && error == ENOENT); VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (0); return (0);
} }
} }
@ -427,7 +428,8 @@ zfs_znode_acl_version(znode_t *zp)
* After upgrade SA_ZPL_ZNODE_ACL should have * After upgrade SA_ZPL_ZNODE_ACL should have
* been removed. * been removed.
*/ */
VERIFY(zp->z_is_sa && error == ENOENT); VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (ZFS_ACL_VERSION_FUID); return (ZFS_ACL_VERSION_FUID);
} }
} }
@ -575,7 +577,7 @@ zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
{ {
zfs_acl_node_t *aclnode; zfs_acl_node_t *aclnode;
ASSERT(aclp); ASSERT3P(aclp, !=, NULL);
if (start == NULL) { if (start == NULL) {
aclnode = list_head(&aclp->z_acl); aclnode = list_head(&aclp->z_acl);
@ -804,7 +806,7 @@ zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
void *cookie = NULL; void *cookie = NULL;
zfs_acl_node_t *newaclnode; zfs_acl_node_t *newaclnode;
ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL); ASSERT3U(aclp->z_version, ==, ZFS_ACL_VERSION_INITIAL);
/* /*
* First create the ACE in a contiguous piece of memory * First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid(). * for zfs_copy_ace_2_fuid().
@ -826,9 +828,9 @@ zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count * newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t)); sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops; aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp, VERIFY0(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp,
oldaclp, newaclnode->z_acldata, aclp->z_acl_count, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0); &newaclnode->z_size, NULL, cr));
newaclnode->z_ace_count = aclp->z_acl_count; newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION; aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t)); kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
@ -1204,7 +1206,7 @@ zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) && if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID)) (zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr); zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID); ASSERT3U(aclp->z_version, >=, ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL; otype = DMU_OT_ACL;
} }
@ -1560,8 +1562,8 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
* Copy special opaque data if any * Copy special opaque data if any
*/ */
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) { if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep, data2sz = aclp->z_ops->ace_data(acep, &data2);
&data2)) == data1sz); VERIFY3U(data2sz, ==, data1sz);
bcopy(data1, data2, data2sz); bcopy(data1, data2, data2sz);
} }
@ -1630,7 +1632,7 @@ zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
if (zfsvfs->z_replay == B_FALSE) if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__); ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else } else
ASSERT(dzp->z_vnode == NULL); ASSERT3P(dzp->z_vnode, ==, NULL);
bzero(acl_ids, sizeof (zfs_acl_ids_t)); bzero(acl_ids, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode); acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
@ -1849,8 +1851,8 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
aclnode->z_size); aclnode->z_size);
start = (caddr_t)start + aclnode->z_size; start = (caddr_t)start + aclnode->z_size;
} }
ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp == ASSERT3U((caddr_t)start - (caddr_t)vsecp->vsa_aclentp,
aclp->z_acl_bytes); ==, aclp->z_acl_bytes);
} }
} }
if (mask & VSA_ACE_ACLFLAGS) { if (mask & VSA_ACE_ACLFLAGS) {
@ -2009,8 +2011,8 @@ top:
} }
error = zfs_aclset_common(zp, aclp, cr, tx); error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT(error == 0); ASSERT0(error);
ASSERT(zp->z_acl_cached == NULL); ASSERT3P(zp->z_acl_cached, ==, NULL);
zp->z_acl_cached = aclp; zp->z_acl_cached = aclp;
if (fuid_dirtied) if (fuid_dirtied)
@ -2123,7 +2125,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
return (error); return (error);
} }
ASSERT(zp->z_acl_cached); ASSERT3P(zp->z_acl_cached, !=, NULL);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask, while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) { &iflags, &type))) {
@ -2444,7 +2446,7 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
*/ */
error = 0; error = 0;
ASSERT(working_mode != 0); ASSERT3U(working_mode, !=, 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) && if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr))) owner == crgetuid(cr)))
@ -2610,7 +2612,8 @@ zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
&zpcheck_privs, B_FALSE, cr)) == 0) &zpcheck_privs, B_FALSE, cr)) == 0)
return (0); return (0);
ASSERT(dzp_error && zp_error); ASSERT(dzp_error);
ASSERT(zp_error);
if (!dzpcheck_privs) if (!dzpcheck_privs)
return (dzp_error); return (dzp_error);

View File

@ -352,7 +352,7 @@ zfsctl_create(zfsvfs_t *zfsvfs)
vnode_t *rvp; vnode_t *rvp;
uint64_t crtime[2]; uint64_t crtime[2];
ASSERT(zfsvfs->z_ctldir == NULL); ASSERT3P(zfsvfs->z_ctldir, ==, NULL);
snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT, snapdir = sfs_alloc_node(sizeof (*snapdir), "snapshot", ZFSCTL_INO_ROOT,
ZFSCTL_INO_SNAPDIR); ZFSCTL_INO_SNAPDIR);
@ -360,8 +360,8 @@ zfsctl_create(zfsvfs_t *zfsvfs)
ZFSCTL_INO_ROOT); ZFSCTL_INO_ROOT);
dot_zfs->snapdir = snapdir; dot_zfs->snapdir = snapdir;
VERIFY(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp) == 0); VERIFY0(VFS_ROOT(zfsvfs->z_vfs, LK_EXCLUSIVE, &rvp));
VERIFY(0 == sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs), VERIFY0(sa_lookup(VTOZ(rvp)->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
&crtime, sizeof (crtime))); &crtime, sizeof (crtime)));
ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime); ZFS_TIME_DECODE(&dot_zfs->cmtime, crtime);
vput(rvp); vput(rvp);
@ -637,7 +637,7 @@ zfsctl_root_lookup(struct vop_lookup_args *ap)
int nameiop = ap->a_cnp->cn_nameiop; int nameiop = ap->a_cnp->cn_nameiop;
int err; int err;
ASSERT(dvp->v_type == VDIR); ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP) if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
@ -673,7 +673,7 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
zfs_uio_init(&uio, ap->a_uio); zfs_uio_init(&uio, ap->a_uio);
ASSERT(vp->v_type == VDIR); ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio, error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset); &dots_offset);
@ -918,7 +918,7 @@ zfsctl_snapdir_lookup(struct vop_lookup_args *ap)
int flags = cnp->cn_flags; int flags = cnp->cn_flags;
int err; int err;
ASSERT(dvp->v_type == VDIR); ASSERT3S(dvp->v_type, ==, VDIR);
if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP) if ((flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP)); return (SET_ERROR(ENOTSUP));
@ -1013,7 +1013,7 @@ zfsctl_snapdir_lookup(struct vop_lookup_args *ap)
* make .zfs/snapshot/<snapname> accessible over NFS * make .zfs/snapshot/<snapname> accessible over NFS
* without requiring manual mounts of <snapname>. * without requiring manual mounts of <snapname>.
*/ */
ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs); ASSERT3P(VTOZ(*vpp)->z_zfsvfs, !=, zfsvfs);
VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs; VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
/* Clear the root flag (set via VFS_ROOT) as well. */ /* Clear the root flag (set via VFS_ROOT) as well. */
@ -1039,7 +1039,7 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
zfs_uio_init(&uio, ap->a_uio); zfs_uio_init(&uio, ap->a_uio);
ASSERT(vp->v_type == VDIR); ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap, error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset); &uio, &dots_offset);
@ -1143,7 +1143,7 @@ zfsctl_snapshot_inactive(struct vop_inactive_args *ap)
{ {
vnode_t *vp = ap->a_vp; vnode_t *vp = ap->a_vp;
VERIFY(vrecycle(vp) == 1); VERIFY3S(vrecycle(vp), ==, 1);
return (0); return (0);
} }
@ -1248,7 +1248,7 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
vnode_t *vp; vnode_t *vp;
int error; int error;
ASSERT(zfsvfs->z_ctldir != NULL); ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
*zfsvfsp = NULL; *zfsvfsp = NULL;
error = sfs_vnode_get(vfsp, LK_EXCLUSIVE, error = sfs_vnode_get(vfsp, LK_EXCLUSIVE,
ZFSCTL_INO_SNAPDIR, objsetid, &vp); ZFSCTL_INO_SNAPDIR, objsetid, &vp);
@ -1280,7 +1280,7 @@ zfsctl_umount_snapshots(vfs_t *vfsp, int fflags, cred_t *cr)
uint64_t cookie; uint64_t cookie;
int error; int error;
ASSERT(zfsvfs->z_ctldir != NULL); ASSERT3P(zfsvfs->z_ctldir, !=, NULL);
cookie = 0; cookie = 0;
for (;;) { for (;;) {

View File

@ -273,10 +273,9 @@ zfs_unlinked_add(znode_t *zp, dmu_tx_t *tx)
zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT(zp->z_unlinked); ASSERT(zp->z_unlinked);
ASSERT(zp->z_links == 0); ASSERT3U(zp->z_links, ==, 0);
VERIFY3U(0, ==, VERIFY0(zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
zap_add_int(zfsvfs->z_os, zfsvfs->z_unlinkedobj, zp->z_id, tx));
dataset_kstats_update_nunlinks_kstat(&zfsvfs->z_kstat, 1); dataset_kstats_update_nunlinks_kstat(&zfsvfs->z_kstat, 1);
} }
@ -433,7 +432,7 @@ zfs_rmnode(znode_t *zp)
uint64_t count; uint64_t count;
int error; int error;
ASSERT(zp->z_links == 0); ASSERT3U(zp->z_links, ==, 0);
if (zfsvfs->z_replay == B_FALSE) if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__); ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
@ -599,7 +598,7 @@ zfs_link_create(znode_t *dzp, const char *name, znode_t *zp, dmu_tx_t *tx,
&zp->z_links, sizeof (zp->z_links)); &zp->z_links, sizeof (zp->z_links));
} else { } else {
ASSERT(zp->z_unlinked == 0); ASSERT(!zp->z_unlinked);
} }
value = zfs_dirent(zp, zp->z_mode); value = zfs_dirent(zp, zp->z_mode);
error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, name, error = zap_add(zp->z_zfsvfs->z_os, dzp->z_id, name,
@ -758,7 +757,7 @@ zfs_link_destroy(znode_t *dzp, const char *name, znode_t *zp, dmu_tx_t *tx,
count = 0; count = 0;
ASSERT0(error); ASSERT0(error);
} else { } else {
ASSERT(zp->z_unlinked == 0); ASSERT(!zp->z_unlinked);
error = zfs_dropname(dzp, name, zp, tx, flag); error = zfs_dropname(dzp, name, zp, tx, flag);
if (error != 0) if (error != 0)
return (error); return (error);
@ -806,7 +805,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
int error; int error;
zfs_acl_ids_t acl_ids; zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied; boolean_t fuid_dirtied;
uint64_t parent __unused; uint64_t parent __maybe_unused;
*xvpp = NULL; *xvpp = NULL;
@ -840,13 +839,11 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx); zfs_fuid_sync(zfsvfs, tx);
#ifdef ZFS_DEBUG ASSERT0(sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), &parent,
error = sa_lookup(xzp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs), sizeof (parent)));
&parent, sizeof (parent)); ASSERT3U(parent, ==, zp->z_id);
ASSERT(error == 0 && parent == zp->z_id);
#endif
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id, VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs), &xzp->z_id,
sizeof (xzp->z_id), tx)); sizeof (xzp->z_id), tx));
(void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp, (void) zfs_log_create(zfsvfs->z_log, tx, TX_MKXATTR, zp,

View File

@ -620,9 +620,9 @@ zfs_register_callbacks(vfs_t *vfsp)
boolean_t do_xattr = B_FALSE; boolean_t do_xattr = B_FALSE;
int error = 0; int error = 0;
ASSERT(vfsp); ASSERT3P(vfsp, !=, NULL);
zfsvfs = vfsp->vfs_data; zfsvfs = vfsp->vfs_data;
ASSERT(zfsvfs); ASSERT3P(zfsvfs, !=, NULL);
os = zfsvfs->z_os; os = zfsvfs->z_os;
/* /*
@ -845,7 +845,7 @@ zfsvfs_init(zfsvfs_t *zfsvfs, objset_t *os)
&zfsvfs->z_root); &zfsvfs->z_root);
if (error != 0) if (error != 0)
return (error); return (error);
ASSERT(zfsvfs->z_root != 0); ASSERT3U(zfsvfs->z_root, !=, 0);
error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1, error = zap_lookup(os, MASTER_NODE_OBJ, ZFS_UNLINKED_SET, 8, 1,
&zfsvfs->z_unlinkedobj); &zfsvfs->z_unlinkedobj);
@ -1124,7 +1124,7 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
mutex_destroy(&zfsvfs->z_znodes_lock); mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock); mutex_destroy(&zfsvfs->z_lock);
ASSERT(zfsvfs->z_nr_znodes == 0); ASSERT3U(zfsvfs->z_nr_znodes, ==, 0);
list_destroy(&zfsvfs->z_all_znodes); list_destroy(&zfsvfs->z_all_znodes);
ZFS_TEARDOWN_DESTROY(zfsvfs); ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs); ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
@ -1166,8 +1166,8 @@ zfs_domount(vfs_t *vfsp, char *osname)
int error = 0; int error = 0;
zfsvfs_t *zfsvfs; zfsvfs_t *zfsvfs;
ASSERT(vfsp); ASSERT3P(vfsp, !=, NULL);
ASSERT(osname); ASSERT3P(osname, !=, NULL);
error = zfsvfs_create(osname, vfsp->mnt_flag & MNT_RDONLY, &zfsvfs); error = zfsvfs_create(osname, vfsp->mnt_flag & MNT_RDONLY, &zfsvfs);
if (error) if (error)
@ -1205,9 +1205,9 @@ zfs_domount(vfs_t *vfsp, char *osname)
* because that's where other Solaris filesystems put it. * because that's where other Solaris filesystems put it.
*/ */
fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os); fsid_guid = dmu_objset_fsid_guid(zfsvfs->z_os);
ASSERT((fsid_guid & ~((1ULL<<56)-1)) == 0); ASSERT3U((fsid_guid & ~((1ULL << 56) - 1)), ==, 0);
vfsp->vfs_fsid.val[0] = fsid_guid; vfsp->vfs_fsid.val[0] = fsid_guid;
vfsp->vfs_fsid.val[1] = ((fsid_guid>>32) << 8) | vfsp->vfs_fsid.val[1] = ((fsid_guid >> 32) << 8) |
(vfsp->mnt_vfc->vfc_typenum & 0xFF); (vfsp->mnt_vfc->vfc_typenum & 0xFF);
/* /*
@ -1591,11 +1591,11 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
*/ */
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL; for (zp = list_head(&zfsvfs->z_all_znodes); zp != NULL;
zp = list_next(&zfsvfs->z_all_znodes, zp)) zp = list_next(&zfsvfs->z_all_znodes, zp)) {
if (zp->z_sa_hdl) { if (zp->z_sa_hdl != NULL) {
ASSERT(ZTOV(zp)->v_usecount >= 0);
zfs_znode_dmu_fini(zp); zfs_znode_dmu_fini(zp);
} }
}
mutex_exit(&zfsvfs->z_znodes_lock); mutex_exit(&zfsvfs->z_znodes_lock);
/* /*
@ -1682,7 +1682,7 @@ zfs_umount(vfs_t *vfsp, int fflag)
taskqueue_drain(zfsvfs_taskq->tq_queue, taskqueue_drain(zfsvfs_taskq->tq_queue,
&zfsvfs->z_unlinked_drain_task); &zfsvfs->z_unlinked_drain_task);
VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0); VERIFY0(zfsvfs_teardown(zfsvfs, B_TRUE));
os = zfsvfs->z_os; os = zfsvfs->z_os;
/* /*
@ -1944,7 +1944,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
goto bail; goto bail;
ds->ds_dir->dd_activity_cancelled = B_FALSE; ds->ds_dir->dd_activity_cancelled = B_FALSE;
VERIFY(zfsvfs_setup(zfsvfs, B_FALSE) == 0); VERIFY0(zfsvfs_setup(zfsvfs, B_FALSE));
zfs_set_fuid_feature(zfsvfs); zfs_set_fuid_feature(zfsvfs);
@ -2157,7 +2157,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT0(error); ASSERT0(error);
VERIFY(0 == sa_set_sa_object(os, sa_obj)); VERIFY0(sa_set_sa_object(os, sa_obj));
sa_register_update_callback(os, zfs_sa_upgrade); sa_register_update_callback(os, zfs_sa_upgrade);
} }
@ -2271,7 +2271,7 @@ zfs_get_vfs_flag_unmounted(objset_t *os)
zfsvfs_t *zfvp; zfsvfs_t *zfvp;
boolean_t unmounted = B_FALSE; boolean_t unmounted = B_FALSE;
ASSERT(dmu_objset_type(os) == DMU_OST_ZFS); ASSERT3U(dmu_objset_type(os), ==, DMU_OST_ZFS);
mutex_enter(&os->os_user_ptr_lock); mutex_enter(&os->os_user_ptr_lock);
zfvp = dmu_objset_get_user(os); zfvp = dmu_objset_get_user(os);

View File

@ -265,7 +265,7 @@ zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan && if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
ZTOV(zp)->v_type == VREG && ZTOV(zp)->v_type == VREG &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) !(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
VERIFY(fs_vscan(vp, cr, 1) == 0); VERIFY0(fs_vscan(vp, cr, 1));
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
return (0); return (0);
@ -473,9 +473,9 @@ update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
caddr_t va; caddr_t va;
int off; int off;
ASSERT(vp->v_mount != NULL); ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object; obj = vp->v_object;
ASSERT(obj != NULL); ASSERT3P(obj, !=, NULL);
off = start & PAGEOFFSET; off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj); zfs_vmobject_wlock_12(obj);
@ -530,11 +530,11 @@ mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
int len = nbytes; int len = nbytes;
int error = 0; int error = 0;
ASSERT(zfs_uio_segflg(uio) == UIO_NOCOPY); ASSERT3U(zfs_uio_segflg(uio), ==, UIO_NOCOPY);
ASSERT(vp->v_mount != NULL); ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object; obj = vp->v_object;
ASSERT(obj != NULL); ASSERT3P(obj, !=, NULL);
ASSERT((zfs_uio_offset(uio) & PAGEOFFSET) == 0); ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
zfs_vmobject_wlock_12(obj); zfs_vmobject_wlock_12(obj);
for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) { for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
@ -611,9 +611,9 @@ mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
int off; int off;
int error = 0; int error = 0;
ASSERT(vp->v_mount != NULL); ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object; obj = vp->v_object;
ASSERT(obj != NULL); ASSERT3P(obj, !=, NULL);
start = zfs_uio_offset(uio); start = zfs_uio_offset(uio);
off = start & PAGEOFFSET; off = start & PAGEOFFSET;
@ -1413,7 +1413,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
zfs_acl_ids_t acl_ids; zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied; boolean_t fuid_dirtied;
ASSERT(vap->va_type == VDIR); ASSERT3U(vap->va_type, ==, VDIR);
/* /*
* If we have an ephemeral id, ACL, or XVATTR then * If we have an ephemeral id, ACL, or XVATTR then
@ -1921,7 +1921,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
} }
outcount += reclen; outcount += reclen;
ASSERT(outcount <= bufsize); ASSERT3S(outcount, <=, bufsize);
/* Prefetch znode */ /* Prefetch znode */
if (prefetch) if (prefetch)
@ -2781,12 +2781,12 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
new_mode = zp->z_mode; new_mode = zp->z_mode;
} }
err = zfs_acl_chown_setattr(zp); err = zfs_acl_chown_setattr(zp);
ASSERT(err == 0); ASSERT0(err);
if (attrzp) { if (attrzp) {
vn_seqc_write_begin(ZTOV(attrzp)); vn_seqc_write_begin(ZTOV(attrzp));
err = zfs_acl_chown_setattr(attrzp); err = zfs_acl_chown_setattr(attrzp);
vn_seqc_write_end(ZTOV(attrzp)); vn_seqc_write_end(ZTOV(attrzp));
ASSERT(err == 0); ASSERT0(err);
} }
} }
@ -2794,7 +2794,7 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode)); &new_mode, sizeof (new_mode));
zp->z_mode = new_mode; zp->z_mode = new_mode;
ASSERT3U((uintptr_t)aclp, !=, 0); ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx); err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err); ASSERT0(err);
if (zp->z_acl_cached) if (zp->z_acl_cached)
@ -2880,7 +2880,7 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
} }
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT(vp->v_type == VREG); ASSERT3S(vp->v_type, ==, VREG);
zfs_xvattr_set(zp, xvap, tx); zfs_xvattr_set(zp, xvap, tx);
} }
@ -2902,7 +2902,7 @@ out:
if (err == 0 && attrzp) { if (err == 0 && attrzp) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk, err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx); xattr_count, tx);
ASSERT(err2 == 0); ASSERT0(err2);
} }
if (attrzp) if (attrzp)
@ -3430,8 +3430,8 @@ zfs_rename_(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
* succeed; fortunately, it is very unlikely to * succeed; fortunately, it is very unlikely to
* fail, since we just created it. * fail, since we just created it.
*/ */
VERIFY3U(zfs_link_destroy(tdzp, tnm, szp, tx, VERIFY0(zfs_link_destroy(tdzp, tnm, szp, tx,
ZRENAMING, NULL), ==, 0); ZRENAMING, NULL));
} }
} }
if (error == 0) { if (error == 0) {
@ -3535,7 +3535,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
boolean_t fuid_dirtied; boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK; uint64_t txtype = TX_SYMLINK;
ASSERT(vap->va_type == VLNK); ASSERT3S(vap->va_type, ==, VLNK);
ZFS_ENTER(zfsvfs); ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp); ZFS_VERIFY_ZP(dzp);
@ -3709,7 +3709,7 @@ zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
uint64_t parent; uint64_t parent;
uid_t owner; uid_t owner;
ASSERT(ZTOV(tdzp)->v_type == VDIR); ASSERT3S(ZTOV(tdzp)->v_type, ==, VDIR);
ZFS_ENTER(zfsvfs); ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(tdzp); ZFS_VERIFY_ZP(tdzp);
@ -4589,7 +4589,7 @@ zfs_freebsd_lookup(struct vop_lookup_args *ap, boolean_t cached)
struct componentname *cnp = ap->a_cnp; struct componentname *cnp = ap->a_cnp;
char nm[NAME_MAX + 1]; char nm[NAME_MAX + 1];
ASSERT(cnp->cn_namelen < sizeof (nm)); ASSERT3U(cnp->cn_namelen, <, sizeof (nm));
strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm))); strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm)));
return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop, return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
@ -5172,7 +5172,7 @@ zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
znode_t *zp = VTOZ(vp); znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT(zp != NULL); ASSERT3P(zp, !=, NULL);
#if __FreeBSD_version < 1300042 #if __FreeBSD_version < 1300042
/* Destroy the vm object and flush associated pages. */ /* Destroy the vm object and flush associated pages. */

View File

@ -164,7 +164,7 @@ zfs_znode_cache_destructor(void *buf, void *arg)
mutex_destroy(&zp->z_acl_lock); mutex_destroy(&zp->z_acl_lock);
zfs_rangelock_fini(&zp->z_rangelock); zfs_rangelock_fini(&zp->z_rangelock);
ASSERT(zp->z_acl_cached == NULL); ASSERT3P(zp->z_acl_cached, ==, NULL);
} }
@ -192,7 +192,7 @@ zfs_znode_init(void)
/* /*
* Initialize zcache * Initialize zcache
*/ */
ASSERT(znode_uma_zone == NULL); ASSERT3P(znode_uma_zone, ==, NULL);
znode_uma_zone = uma_zcreate("zfs_znode_cache", znode_uma_zone = uma_zcreate("zfs_znode_cache",
sizeof (znode_t), zfs_znode_cache_constructor_smr, sizeof (znode_t), zfs_znode_cache_constructor_smr,
zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0); zfs_znode_cache_destructor_smr, NULL, NULL, 0, 0);
@ -219,7 +219,7 @@ zfs_znode_init(void)
/* /*
* Initialize zcache * Initialize zcache
*/ */
ASSERT(znode_cache == NULL); ASSERT3P(znode_cache, ==, NULL);
znode_cache = kmem_cache_create("zfs_znode_cache", znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor, sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, 0); zfs_znode_cache_destructor, NULL, NULL, NULL, 0);
@ -282,7 +282,7 @@ zfs_create_share_dir(zfsvfs_t *zfsvfs, dmu_tx_t *tx)
sharezp->z_zfsvfs = zfsvfs; sharezp->z_zfsvfs = zfsvfs;
sharezp->z_is_sa = zfsvfs->z_use_sa; sharezp->z_is_sa = zfsvfs->z_use_sa;
VERIFY(0 == zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr, VERIFY0(zfs_acl_ids_create(sharezp, IS_ROOT_NODE, &vattr,
kcred, NULL, &acl_ids)); kcred, NULL, &acl_ids));
zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids); zfs_mknode(sharezp, &vattr, tx, kcred, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, sharezp); ASSERT3P(zp, ==, sharezp);
@ -345,10 +345,10 @@ zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs)); ASSERT(!POINTER_IS_VALID(zp->z_zfsvfs) || (zfsvfs == zp->z_zfsvfs));
ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id))); ASSERT(MUTEX_HELD(ZFS_OBJ_MUTEX(zfsvfs, zp->z_id)));
ASSERT(zp->z_sa_hdl == NULL); ASSERT3P(zp->z_sa_hdl, ==, NULL);
ASSERT(zp->z_acl_cached == NULL); ASSERT3P(zp->z_acl_cached, ==, NULL);
if (sa_hdl == NULL) { if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp, VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl)); SA_HDL_SHARED, &zp->z_sa_hdl));
} else { } else {
zp->z_sa_hdl = sa_hdl; zp->z_sa_hdl = sa_hdl;
@ -504,7 +504,8 @@ zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
break; break;
case VREG: case VREG:
if (parent == zfsvfs->z_shares_dir) { if (parent == zfsvfs->z_shares_dir) {
ASSERT(zp->z_uid == 0 && zp->z_gid == 0); ASSERT0(zp->z_uid);
ASSERT0(zp->z_gid);
vp->v_op = &zfs_shareops; vp->v_op = &zfs_shareops;
} }
break; break;
@ -570,7 +571,8 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
int cnt = 0; int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 }; zfs_acl_locator_cb_t locate = { 0 };
ASSERT(vap && ((vap->va_mask & AT_MODE) == AT_MODE)); ASSERT3P(vap, !=, NULL);
ASSERT3U((vap->va_mask & AT_MODE), ==, AT_MODE);
if (zfsvfs->z_replay) { if (zfsvfs->z_replay) {
obj = vap->va_nodeid; obj = vap->va_nodeid;
@ -623,7 +625,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
} }
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj); ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
VERIFY(0 == sa_buf_hold(zfsvfs->z_os, obj, NULL, &db)); VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/* /*
* If this is the root, fix up the half-initialized parent pointer * If this is the root, fix up the half-initialized parent pointer
@ -686,7 +688,7 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
} }
/* Now add in all of the "SA" attributes */ /* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED, VERIFY0(sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl)); &sa_hdl));
/* /*
@ -773,11 +775,11 @@ zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
acl_ids->z_fuid, acl_ids->z_fgid); acl_ids->z_fuid, acl_ids->z_fgid);
} }
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0); VERIFY0(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx));
if (!(flag & IS_ROOT_NODE)) { if (!(flag & IS_ROOT_NODE)) {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl); *zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
ASSERT(*zpp != NULL); ASSERT3P(*zpp, !=, NULL);
} else { } else {
/* /*
* If we are creating the root node, the "parent" we * If we are creating the root node, the "parent" we
@ -822,7 +824,7 @@ zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
xoptattr_t *xoap; xoptattr_t *xoap;
xoap = xva_getxoptattr(xvap); xoap = xva_getxoptattr(xvap);
ASSERT(xoap); ASSERT3P(xoap, !=, NULL);
ASSERT_VOP_IN_SEQC(ZTOV(zp)); ASSERT_VOP_IN_SEQC(ZTOV(zp));
@ -1081,7 +1083,7 @@ zfs_rezget(znode_t *zp)
} }
mutex_exit(&zp->z_acl_lock); mutex_exit(&zp->z_acl_lock);
ASSERT(zp->z_sa_hdl == NULL); ASSERT3P(zp->z_sa_hdl, ==, NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db); err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) { if (err) {
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num); ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
@ -1193,9 +1195,9 @@ zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
ZFS_OBJ_HOLD_ENTER(zfsvfs, obj); ZFS_OBJ_HOLD_ENTER(zfsvfs, obj);
if (acl_obj) { if (acl_obj) {
VERIFY(!zp->z_is_sa); VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx)); VERIFY0(dmu_object_free(os, acl_obj, tx));
} }
VERIFY(0 == dmu_object_free(os, obj, tx)); VERIFY0(dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp); zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj); ZFS_OBJ_HOLD_EXIT(zfsvfs, obj);
zfs_znode_free(zp); zfs_znode_free(zp);
@ -1207,7 +1209,7 @@ zfs_zinactive(znode_t *zp)
zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint64_t z_id = zp->z_id; uint64_t z_id = zp->z_id;
ASSERT(zp->z_sa_hdl); ASSERT3P(zp->z_sa_hdl, !=, NULL);
/* /*
* Don't allow a zfs_zget() while were trying to release this znode * Don't allow a zfs_zget() while were trying to release this znode
@ -1244,7 +1246,7 @@ zfs_znode_free(znode_t *zp)
char *symlink; char *symlink;
#endif #endif
ASSERT(zp->z_sa_hdl == NULL); ASSERT3P(zp->z_sa_hdl, ==, NULL);
zp->z_vnode = NULL; zp->z_vnode = NULL;
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
POINTER_INVALIDATE(&zp->z_zfsvfs); POINTER_INVALIDATE(&zp->z_zfsvfs);
@ -1411,7 +1413,7 @@ zfs_extend(znode_t *zp, uint64_t end)
zp->z_size = end; zp->z_size = end;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs), VERIFY0(sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zp->z_zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx)); &zp->z_size, sizeof (zp->z_size), tx));
vnode_pager_setsize(ZTOV(zp), end); vnode_pager_setsize(ZTOV(zp), end);
@ -1529,7 +1531,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8); NULL, &zp->z_pflags, 8);
} }
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0); VERIFY0(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
dmu_tx_commit(tx); dmu_tx_commit(tx);
@ -1606,7 +1608,7 @@ log:
NULL, &zp->z_pflags, 8); NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime); zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0); ASSERT0(error);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len); zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
@ -1639,7 +1641,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
moid = MASTER_NODE_OBJ; moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE, error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx); DMU_OT_NONE, 0, tx);
ASSERT(error == 0); ASSERT0(error);
/* /*
* Set starting attributes. * Set starting attributes.
@ -1651,8 +1653,8 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
uint64_t val; uint64_t val;
char *name; char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64); ASSERT3S(nvpair_type(elem), ==, DATA_TYPE_UINT64);
VERIFY(nvpair_value_uint64(elem, &val) == 0); val = fnvpair_value_uint64(elem);
name = nvpair_name(elem); name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) { if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version) if (val < version)
@ -1660,13 +1662,13 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
} else { } else {
error = zap_update(os, moid, name, 8, 1, &val, tx); error = zap_update(os, moid, name, 8, 1, &val, tx);
} }
ASSERT(error == 0); ASSERT0(error);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0) if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val; norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0) else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val; sense = val;
} }
ASSERT(version != 0); ASSERT3U(version, !=, 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx); error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
/* /*
@ -1677,7 +1679,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE, sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx); DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx); error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT(error == 0); ASSERT0(error);
} else { } else {
sa_obj = 0; sa_obj = 0;
} }
@ -1687,7 +1689,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx); obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx); error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0); ASSERT0(error);
/* /*
* Create root znode. Create minimal znode/vnode/zfsvfs * Create root znode. Create minimal znode/vnode/zfsvfs
@ -1718,7 +1720,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END, error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table); &zfsvfs->z_attr_table);
ASSERT(error == 0); ASSERT0(error);
/* /*
* Fold case on file systems that are always or sometimes case * Fold case on file systems that are always or sometimes case
@ -1735,12 +1737,12 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL); mutex_init(&zfsvfs->z_hold_mtx[i], NULL, MUTEX_DEFAULT, NULL);
rootzp->z_zfsvfs = zfsvfs; rootzp->z_zfsvfs = zfsvfs;
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr, VERIFY0(zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids)); cr, NULL, &acl_ids));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids); zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp); ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx); error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0); ASSERT0(error);
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
POINTER_INVALIDATE(&rootzp->z_zfsvfs); POINTER_INVALIDATE(&rootzp->z_zfsvfs);
@ -1753,7 +1755,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
error = zfs_create_share_dir(zfsvfs, tx); error = zfs_create_share_dir(zfsvfs, tx);
ASSERT(error == 0); ASSERT0(error);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
mutex_destroy(&zfsvfs->z_hold_mtx[i]); mutex_destroy(&zfsvfs->z_hold_mtx[i]);
@ -1921,7 +1923,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
int is_xattrdir; int is_xattrdir;
if (prevdb) { if (prevdb) {
ASSERT(prevhdl != NULL); ASSERT3P(prevhdl, !=, NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG); zfs_release_sa_handle(prevhdl, prevdb, FTAG);
} }
@ -1947,7 +1949,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
complen = strlen(component); complen = strlen(component);
path -= complen; path -= complen;
ASSERT(path >= buf); ASSERT3P(path, >=, buf);
bcopy(component, path, complen); bcopy(component, path, complen);
obj = pobj; obj = pobj;
@ -1964,7 +1966,7 @@ zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
} }
if (sa_hdl != NULL && sa_hdl != hdl) { if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL); ASSERT3P(sa_db, !=, NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG); zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
} }

View File

@ -239,7 +239,7 @@ zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key)
uint_t keydata_len; uint_t keydata_len;
zio_crypt_info_t *ci = NULL; zio_crypt_info_t *ci = NULL;
ASSERT(key != NULL); ASSERT3P(key, !=, NULL);
ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS);
ci = &zio_crypt_table[crypt]; ci = &zio_crypt_table[crypt];

View File

@ -420,7 +420,7 @@ zvol_geom_destroy(zvol_state_t *zv)
g_topology_assert(); g_topology_assert();
mutex_enter(&zv->zv_state_lock); mutex_enter(&zv->zv_state_lock);
VERIFY(zsg->zsg_state == ZVOL_GEOM_RUNNING); VERIFY3S(zsg->zsg_state, ==, ZVOL_GEOM_RUNNING);
mutex_exit(&zv->zv_state_lock); mutex_exit(&zv->zv_state_lock);
zsg->zsg_provider = NULL; zsg->zsg_provider = NULL;
g_wither_geom(pp->geom, ENXIO); g_wither_geom(pp->geom, ENXIO);
@ -1164,8 +1164,8 @@ zvol_ensure_zilog(zvol_state_t *zv)
zvol_get_data); zvol_get_data);
zv->zv_flags |= ZVOL_WRITTEN_TO; zv->zv_flags |= ZVOL_WRITTEN_TO;
/* replay / destroy done in zvol_create_minor_impl() */ /* replay / destroy done in zvol_create_minor_impl() */
VERIFY0((zv->zv_zilog->zl_header->zh_flags & VERIFY0(zv->zv_zilog->zl_header->zh_flags &
ZIL_REPLAY_NEEDED)); ZIL_REPLAY_NEEDED);
} }
rw_downgrade(&zv->zv_suspend_lock); rw_downgrade(&zv->zv_suspend_lock);
} }