Fix error handling incallers of dbuf_hold_level()

Currently, the functions dbuf_prefetch_indirect_done() and
dmu_assign_arcbuf_by_dnode() assume that dbuf_hold_level() cannot
fail. In the event of an error the former will cause a NULL pointer
dereference and the later will trigger a VERIFY. This patch adds
error handling to these functions and their callers where necessary.

Reviewed by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #8291
This commit is contained in:
Tom Caputi 2019-01-17 18:47:08 -05:00 committed by Brian Behlendorf
parent 75058f3303
commit 305781da4b
6 changed files with 37 additions and 14 deletions

View File

@ -4847,14 +4847,14 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0); FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
} }
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) { if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
dmu_assign_arcbuf_by_dbuf(bonus_db, off, VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
bigbuf_arcbufs[j], tx); off, bigbuf_arcbufs[j], tx));
} else { } else {
dmu_assign_arcbuf_by_dbuf(bonus_db, off, VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
bigbuf_arcbufs[2 * j], tx); off, bigbuf_arcbufs[2 * j], tx));
dmu_assign_arcbuf_by_dbuf(bonus_db, VERIFY0(dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2, off + chunksize / 2,
bigbuf_arcbufs[2 * j + 1], tx); bigbuf_arcbufs[2 * j + 1], tx));
} }
if (i == 1) { if (i == 1) {
dmu_buf_rele(dbt, FTAG); dmu_buf_rele(dbt, FTAG);

View File

@ -855,9 +855,9 @@ int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size,
#endif #endif
struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
void dmu_return_arcbuf(struct arc_buf *buf); void dmu_return_arcbuf(struct arc_buf *buf);
void dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, int dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx); struct arc_buf *buf, dmu_tx_t *tx);
void dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, int dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx); struct arc_buf *buf, dmu_tx_t *tx);
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf #define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
void dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset, void dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset,

View File

@ -2886,6 +2886,12 @@ dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
dpa->dpa_zb.zb_level)); dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode, dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG); dpa->dpa_curlevel, curblkid, FTAG);
if (db == NULL) {
kmem_free(dpa, sizeof (*dpa));
arc_buf_destroy(abuf, private);
return;
}
(void) dbuf_read(db, NULL, (void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT); DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG); dbuf_rele(db, FTAG);

View File

@ -1688,7 +1688,7 @@ dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset,
* If this is not possible copy the contents of passed arc buf via * If this is not possible copy the contents of passed arc buf via
* dmu_write(). * dmu_write().
*/ */
void int
dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
@ -1700,7 +1700,9 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
rw_enter(&dn->dn_struct_rwlock, RW_READER); rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset); blkid = dbuf_whichblock(dn, 0, offset);
VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL); db = dbuf_hold(dn, blkid, FTAG);
if (db == NULL)
return (SET_ERROR(EIO));
rw_exit(&dn->dn_struct_rwlock); rw_exit(&dn->dn_struct_rwlock);
/* /*
@ -1720,17 +1722,22 @@ dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_return_arcbuf(buf); dmu_return_arcbuf(buf);
XUIOSTAT_BUMP(xuiostat_wbuf_copied); XUIOSTAT_BUMP(xuiostat_wbuf_copied);
} }
return (0);
} }
void int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx) dmu_tx_t *tx)
{ {
int err;
dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
DB_DNODE_ENTER(dbuf); DB_DNODE_ENTER(dbuf);
dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
DB_DNODE_EXIT(dbuf); DB_DNODE_EXIT(dbuf);
return (err);
} }
typedef struct { typedef struct {

View File

@ -1439,7 +1439,12 @@ receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
} }
VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn)); VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx); err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
if (err != 0) {
dnode_rele(dn, FTAG);
dmu_tx_commit(tx);
return (err);
}
dnode_rele(dn, FTAG); dnode_rele(dn, FTAG);
/* /*

View File

@ -844,8 +844,13 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
xuio_stat_wbuf_copied(); xuio_stat_wbuf_copied();
} else { } else {
ASSERT(xuio || tx_bytes == max_blksz); ASSERT(xuio || tx_bytes == max_blksz);
dmu_assign_arcbuf_by_dbuf( error = dmu_assign_arcbuf_by_dbuf(
sa_get_db(zp->z_sa_hdl), woff, abuf, tx); sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
if (error != 0) {
dmu_return_arcbuf(abuf);
dmu_tx_commit(tx);
break;
}
} }
ASSERT(tx_bytes <= uio->uio_resid); ASSERT(tx_bytes <= uio->uio_resid);
uioskip(uio, tx_bytes); uioskip(uio, tx_bytes);