Verify BPs in spa_load_verify_cb() and dsl_scan_visitbp()

We want `zpool import` to be highly robust and never panic, even
when encountering corrupt metadata.  This is already handled in the
arc_read() code path, which covers most cases, but spa_load_verify_cb()
relies on zio_read() and is responsible for verifying the block pointer.

During import it is also possible to encounter blocks pointers which
contain ZIO_COMPRESS_INHERIT and ZIO_CHECKSUM_INHERIT values.  Relax
the verification function slightly to allow this.

Futhermore, extend dsl_scan_recurse() to verify the block pointer
contents of level zero blocks which are not of type DMU_OT_DNODE or
DMU_OT_OBJSET.  This is handled by arc_read() in the other cases.

Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #13124 
Closes #13360
This commit is contained in:
Brian Behlendorf 2022-05-20 10:36:14 -07:00 committed by GitHub
parent 03df6bad94
commit 2cd0f98f4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 31 additions and 24 deletions

View File

@ -1824,6 +1824,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
@ -1838,7 +1839,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
if (dnp != NULL &&
dnp->dn_bonuslen > DN_MAX_BONUS_LEN(dnp)) {
scn->scn_phys.scn_errors++;
spa_log_error(dp->dp_spa, zb);
spa_log_error(spa, zb);
return (SET_ERROR(EINVAL));
}
@ -1849,7 +1850,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
@ -1877,7 +1878,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
@ -1896,7 +1897,7 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
@ -1927,6 +1928,14 @@ dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
} else if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) {
/*
* Sanity check the block pointer contents, this is handled
* by arc_read() for the cases above.
*/
scn->scn_phys.scn_errors++;
spa_log_error(spa, zb);
return (SET_ERROR(EINVAL));
}
return (0);
@ -1977,19 +1986,6 @@ dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
scn->scn_visited_this_txg++;
/*
* This debugging is commented out to conserve stack space. This
* function is called recursively and the debugging adds several
* bytes to the stack for each call. It can be commented back in
* if required to debug an issue in dsl_scan_visitbp().
*
* dprintf_bp(bp,
* "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
* ds, ds ? ds->ds_object : 0,
* zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
* bp);
*/
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;

View File

@ -2310,9 +2310,6 @@ spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
(void) zilog, (void) dnp;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
@ -2320,6 +2317,22 @@ spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
*/
if (!spa_load_verify_metadata)
return (0);
/*
* Sanity check the block pointer in order to detect obvious damage
* before using the contents in subsequent checks or in zio_read().
* When damaged consider it to be a metadata error since we cannot
* trust the BP_GET_TYPE and BP_GET_LEVEL values.
*/
if (!zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_LOG)) {
atomic_inc_64(&sle->sle_meta_count);
return (0);
}
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
if (!BP_IS_METADATA(bp) &&
(!spa_load_verify_data || !sle->sle_verify_data))
return (0);

View File

@ -962,14 +962,12 @@ zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
"blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));