From 305781da4bbe11acef8707894d7e33f8aef3ca8e Mon Sep 17 00:00:00 2001 From: Tom Caputi Date: Thu, 17 Jan 2019 18:47:08 -0500 Subject: [PATCH] Fix error handling incallers of dbuf_hold_level() Currently, the functions dbuf_prefetch_indirect_done() and dmu_assign_arcbuf_by_dnode() assume that dbuf_hold_level() cannot fail. In the event of an error the former will cause a NULL pointer dereference and the later will trigger a VERIFY. This patch adds error handling to these functions and their callers where necessary. Reviewed by: Matt Ahrens Reviewed-by: Brian Behlendorf Signed-off-by: Tom Caputi Closes #8291 --- cmd/ztest/ztest.c | 12 ++++++------ include/sys/dmu.h | 4 ++-- module/zfs/dbuf.c | 6 ++++++ module/zfs/dmu.c | 15 +++++++++++---- module/zfs/dmu_recv.c | 7 ++++++- module/zfs/zfs_vnops.c | 7 ++++++- 6 files changed, 37 insertions(+), 14 deletions(-) diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c index da763d74d..111ed9835 100644 --- a/cmd/ztest/ztest.c +++ b/cmd/ztest/ztest.c @@ -4847,14 +4847,14 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id) FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); } if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { - dmu_assign_arcbuf_by_dbuf(bonus_db, off, - bigbuf_arcbufs[j], tx); + VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db, + off, bigbuf_arcbufs[j], tx)); } else { - dmu_assign_arcbuf_by_dbuf(bonus_db, off, - bigbuf_arcbufs[2 * j], tx); - dmu_assign_arcbuf_by_dbuf(bonus_db, + VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db, + off, bigbuf_arcbufs[2 * j], tx)); + VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db, off + chunksize / 2, - bigbuf_arcbufs[2 * j + 1], tx); + bigbuf_arcbufs[2 * j + 1], tx)); } if (i == 1) { dmu_buf_rele(dbt, FTAG); diff --git a/include/sys/dmu.h b/include/sys/dmu.h index 542eff95f..63c51ecfb 100644 --- a/include/sys/dmu.h +++ b/include/sys/dmu.h @@ -855,9 +855,9 @@ int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size, #endif struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); void dmu_return_arcbuf(struct arc_buf *buf); -void dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, +int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, struct arc_buf *buf, dmu_tx_t *tx); -void dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, +int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf, dmu_tx_t *tx); #define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf void dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c index 826934b36..9f3c9bfd5 100644 --- a/module/zfs/dbuf.c +++ b/module/zfs/dbuf.c @@ -2886,6 +2886,12 @@ dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb, dpa->dpa_zb.zb_level)); dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, dpa->dpa_curlevel, curblkid, FTAG); + if (db == NULL) { + kmem_free(dpa, sizeof (*dpa)); + arc_buf_destroy(abuf, private); + return; + } + (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); dbuf_rele(db, FTAG); diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c index 5b79eb907..231ed3053 100644 --- a/module/zfs/dmu.c +++ b/module/zfs/dmu.c @@ -1688,7 +1688,7 @@ dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, * If this is not possible copy the contents of passed arc buf via * dmu_write(). */ -void +int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, dmu_tx_t *tx) { @@ -1700,7 +1700,9 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, rw_enter(&dn->dn_struct_rwlock, RW_READER); blkid = dbuf_whichblock(dn, 0, offset); - VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); + db = dbuf_hold(dn, blkid, FTAG); + if (db == NULL) + return (SET_ERROR(EIO)); rw_exit(&dn->dn_struct_rwlock); /* @@ -1720,17 +1722,22 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, dmu_return_arcbuf(buf); XUIOSTAT_BUMP(xuiostat_wbuf_copied); } + + return (0); } -void +int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, dmu_tx_t *tx) { + int err; dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; DB_DNODE_ENTER(dbuf); - dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); + err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); DB_DNODE_EXIT(dbuf); + + return (err); } typedef struct { diff --git a/module/zfs/dmu_recv.c b/module/zfs/dmu_recv.c index a448bc148..257f157fd 100644 --- a/module/zfs/dmu_recv.c +++ b/module/zfs/dmu_recv.c @@ -1439,7 +1439,12 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, } VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn)); - dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx); + err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx); + if (err != 0) { + dnode_rele(dn, FTAG); + dmu_tx_commit(tx); + return (err); + } dnode_rele(dn, FTAG); /* diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 06d93bad0..bbc171bd2 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -844,8 +844,13 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr) xuio_stat_wbuf_copied(); } else { ASSERT(xuio || tx_bytes == max_blksz); - dmu_assign_arcbuf_by_dbuf( + error = dmu_assign_arcbuf_by_dbuf( sa_get_db(zp->z_sa_hdl), woff, abuf, tx); + if (error != 0) { + dmu_return_arcbuf(abuf); + dmu_tx_commit(tx); + break; + } } ASSERT(tx_bytes <= uio->uio_resid); uioskip(uio, tx_bytes);