Add defensive assertions

Coverity complains about possible bugs involving referencing NULL return
values and division by zero. The division by zero bugs require that a
block pointer be corrupt, either from in-memory corruption, or on-disk
corruption. The NULL return value complaints are only bugs if
assumptions that we make about the state of data structures are wrong.
Some seem impossible to be wrong and thus are false positives, while
others are hard to analyze.

Rather than dismiss these as false positives by assuming we know better,
we add defensive assertions to let us know when our assumptions are
wrong.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13972
This commit is contained in:
Richard Yao 2022-10-12 14:25:18 -04:00 committed by GitHub
parent bfaa1d98f4
commit a6ccb36b94
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 17 additions and 1 deletions

View File

@ -182,6 +182,7 @@ zil_prt_rec_write(zilog_t *zilog, int txtype, const void *arg)
return; return;
} }
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os),
lr->lr_foid, ZB_ZIL_LEVEL, lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp)); lr->lr_offset / BP_GET_LSIZE(bp));

View File

@ -1133,6 +1133,7 @@ zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl, cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node); cb->cb_acl_node);
} }
ASSERT3P(cb->cb_acl_node, !=, NULL);
*dataptr = cb->cb_acl_node->z_acldata; *dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size; *length = cb->cb_acl_node->z_size;
} }

View File

@ -1163,6 +1163,7 @@ zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl, cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node); cb->cb_acl_node);
} }
ASSERT3P(cb->cb_acl_node, !=, NULL);
*dataptr = cb->cb_acl_node->z_acldata; *dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size; *length = cb->cb_acl_node->z_size;
} }

View File

@ -8539,6 +8539,7 @@ l2arc_dev_get_next(void)
else if (next == first) else if (next == first)
break; break;
ASSERT3P(next, !=, NULL);
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild || } while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all); next->l2ad_trim_all);

View File

@ -2687,6 +2687,7 @@ dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
dbuf_dirty_record_t *dr; dbuf_dirty_record_t *dr;
dr = list_head(&db->db_dirty_records); dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg); ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl; dl = &dr->dt.dl;
dl->dr_overridden_by = *bp; dl->dr_overridden_by = *bp;
@ -2748,6 +2749,7 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
dmu_buf_will_not_fill(dbuf, tx); dmu_buf_will_not_fill(dbuf, tx);
dr = list_head(&db->db_dirty_records); dr = list_head(&db->db_dirty_records);
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg); ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dl = &dr->dt.dl; dl = &dr->dt.dl;
encode_embedded_bp_compressed(&dl->dr_overridden_by, encode_embedded_bp_compressed(&dl->dr_overridden_by,

View File

@ -111,6 +111,7 @@ traverse_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
if (claim_txg == 0 || bp->blk_birth < claim_txg) if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0); return (0);
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid, SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));

View File

@ -542,6 +542,7 @@ dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL); dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
ASSERT3P(dle, !=, NULL); ASSERT3P(dle, !=, NULL);
dle_prev = AVL_PREV(&dl->dl_tree, dle); dle_prev = AVL_PREV(&dl->dl_tree, dle);
ASSERT3P(dle_prev, !=, NULL);
dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx); dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);

View File

@ -1470,6 +1470,7 @@ dsl_scan_zil_record(zilog_t *zilog, const lr_t *lrc, void *arg,
if (claim_txg == 0 || bp->blk_birth < claim_txg) if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0); return (0);
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET], SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL, lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp)); lr->lr_offset / BP_GET_LSIZE(bp));

View File

@ -303,8 +303,10 @@ mmp_next_leaf(spa_t *spa)
do { do {
leaf = list_next(&spa->spa_leaf_list, leaf); leaf = list_next(&spa->spa_leaf_list, leaf);
if (leaf == NULL) if (leaf == NULL) {
leaf = list_head(&spa->spa_leaf_list); leaf = list_head(&spa->spa_leaf_list);
ASSERT3P(leaf, !=, NULL);
}
/* /*
* We skip unwritable, offline, detached, and dRAID spare * We skip unwritable, offline, detached, and dRAID spare

View File

@ -369,6 +369,7 @@ range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
* invalid as soon as we do any mutating btree operations. * invalid as soon as we do any mutating btree operations.
*/ */
rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after); rs_after = zfs_btree_find(&rt->rt_root, &tmp, &where_after);
ASSERT3P(rs_after, !=, NULL);
rs_set_start_raw(rs_after, rt, before_start); rs_set_start_raw(rs_after, rt, before_start);
rs_set_fill(rs_after, rt, after_fill + before_fill + fill); rs_set_fill(rs_after, rt, after_fill + before_fill + fill);
rs = rs_after; rs = rs_after;

View File

@ -507,6 +507,7 @@ void
spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone) spa_log_summary_decrement_blkcount(spa_t *spa, uint64_t blocks_gone)
{ {
log_summary_entry_t *e = list_head(&spa->spa_log_summary); log_summary_entry_t *e = list_head(&spa->spa_log_summary);
ASSERT3P(e, !=, NULL);
if (e->lse_txgcount > 0) if (e->lse_txgcount > 0)
e->lse_txgcount--; e->lse_txgcount--;
for (; e != NULL; e = list_head(&spa->spa_log_summary)) { for (; e != NULL; e = list_head(&spa->spa_log_summary)) {

View File

@ -1319,6 +1319,7 @@ vdev_indirect_io_start(zio_t *zio)
vdev_indirect_gather_splits, zio); vdev_indirect_gather_splits, zio);
indirect_split_t *first = list_head(&iv->iv_splits); indirect_split_t *first = list_head(&iv->iv_splits);
ASSERT3P(first, !=, NULL);
if (first->is_size == zio->io_size) { if (first->is_size == zio->io_size) {
/* /*
* This is not a split block; we are pointing to the entire * This is not a split block; we are pointing to the entire

View File

@ -756,6 +756,7 @@ vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
do { do {
dio = nio; dio = nio;
nio = AVL_NEXT(t, dio); nio = AVL_NEXT(t, dio);
ASSERT3P(dio, !=, NULL);
zio_add_child(dio, aio); zio_add_child(dio, aio);
vdev_queue_io_remove(vq, dio); vdev_queue_io_remove(vq, dio);

View File

@ -339,6 +339,7 @@ zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
if (wbuf == NULL) if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW; zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(BP_GET_LSIZE(bp), !=, 0);
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid, SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp)); ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));