Fix self-healing IO prior to dsl_pool_init() completion

Async writes triggered by a self-healing IO may be issued before the
pool finishes the process of initialization.  This results in a NULL
dereference of `spa->spa_dsl_pool` in vdev_queue_max_async_writes().

George Wilson recommended addressing this issue by initializing the
passed `dsl_pool_t **` prior to dmu_objset_open_impl().  Since the
caller is passing the `spa->spa_dsl_pool` this has the effect of
ensuring it's initialized.

However, since this depends on the caller knowing they must pass
the `spa->spa_dsl_pool` an additional NULL check was added to
vdev_queue_max_async_writes().  This guards against any future
restructuring of the code which might result in dsl_pool_init()
being called differently.

Signed-off-by: GeLiXin <47034221@qq.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4652
This commit is contained in:
GeLiXin 2016-05-21 11:34:06 +08:00 committed by Brian Behlendorf
parent 92547bc45c
commit b7faa7aabd
2 changed files with 23 additions and 6 deletions

14
module/zfs/dsl_pool.c Normal file → Executable file
View File

@ -182,12 +182,20 @@ dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
int err; int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg); dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
/*
* Initialize the caller's dsl_pool_t structure before we actually open
* the meta objset. This is done because a self-healing write zio may
* be issued as part of dmu_objset_open_impl() and the spa needs its
* dsl_pool_t initialized in order to handle the write.
*/
*dpp = dp;
err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp, err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
&dp->dp_meta_objset); &dp->dp_meta_objset);
if (err != 0) if (err != 0) {
dsl_pool_close(dp); dsl_pool_close(dp);
else *dpp = NULL;
*dpp = dp; }
return (err); return (err);
} }

View File

@ -249,20 +249,29 @@ static int
vdev_queue_max_async_writes(spa_t *spa) vdev_queue_max_async_writes(spa_t *spa)
{ {
int writes; int writes;
uint64_t dirty = spa->spa_dsl_pool->dp_dirty_total; uint64_t dirty = 0;
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t min_bytes = zfs_dirty_data_max * uint64_t min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100; zfs_vdev_async_write_active_min_dirty_percent / 100;
uint64_t max_bytes = zfs_dirty_data_max * uint64_t max_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_max_dirty_percent / 100; zfs_vdev_async_write_active_max_dirty_percent / 100;
/*
* Async writes may occur before the assignment of the spa's
* dsl_pool_t if a self-healing zio is issued prior to the
* completion of dmu_objset_open_impl().
*/
if (dp == NULL)
return (zfs_vdev_async_write_max_active);
/* /*
* Sync tasks correspond to interactive user actions. To reduce the * Sync tasks correspond to interactive user actions. To reduce the
* execution time of those actions we push data out as fast as possible. * execution time of those actions we push data out as fast as possible.
*/ */
if (spa_has_pending_synctask(spa)) { if (spa_has_pending_synctask(spa))
return (zfs_vdev_async_write_max_active); return (zfs_vdev_async_write_max_active);
}
dirty = dp->dp_dirty_total;
if (dirty < min_bytes) if (dirty < min_bytes)
return (zfs_vdev_async_write_min_active); return (zfs_vdev_async_write_min_active);
if (dirty > max_bytes) if (dirty > max_bytes)