mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 02:49:32 +03:00
Fix __zio_execute() asynchronous dispatch
To save valuable stack all zio's were made asynchronous when in the
tgx_sync_thread context or during pool initialization. See commit
2fac4c2
for the original patch and motivation.
Unfortuantely, the changes to dsl_pool_sync_context() made by the
feature flags broke this logic causing in __zio_execute() to dispatch
itself infinitely when called during pool initialization. This
commit refines the existing logic to specificly target only the two
cases we care about.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
parent
ea0b2538cd
commit
91579709fc
@ -1248,7 +1248,7 @@ __zio_execute(zio_t *zio)
|
|||||||
while (zio->io_stage < ZIO_STAGE_DONE) {
|
while (zio->io_stage < ZIO_STAGE_DONE) {
|
||||||
enum zio_stage pipeline = zio->io_pipeline;
|
enum zio_stage pipeline = zio->io_pipeline;
|
||||||
enum zio_stage stage = zio->io_stage;
|
enum zio_stage stage = zio->io_stage;
|
||||||
dsl_pool_t *dsl;
|
dsl_pool_t *dp;
|
||||||
boolean_t cut;
|
boolean_t cut;
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
@ -1262,7 +1262,7 @@ __zio_execute(zio_t *zio)
|
|||||||
|
|
||||||
ASSERT(stage <= ZIO_STAGE_DONE);
|
ASSERT(stage <= ZIO_STAGE_DONE);
|
||||||
|
|
||||||
dsl = spa_get_dsl(zio->io_spa);
|
dp = spa_get_dsl(zio->io_spa);
|
||||||
cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
|
cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
|
||||||
zio_requeue_io_start_cut_in_line : B_FALSE;
|
zio_requeue_io_start_cut_in_line : B_FALSE;
|
||||||
|
|
||||||
@ -1272,16 +1272,24 @@ __zio_execute(zio_t *zio)
|
|||||||
* or may wait for an I/O that needs an interrupt thread
|
* or may wait for an I/O that needs an interrupt thread
|
||||||
* to complete, issue async to avoid deadlock.
|
* to complete, issue async to avoid deadlock.
|
||||||
*
|
*
|
||||||
* If we are in the txg_sync_thread or being called
|
|
||||||
* during pool init issue async to minimize stack depth.
|
|
||||||
* Both of these call paths may be recursively called.
|
|
||||||
*
|
|
||||||
* For VDEV_IO_START, we cut in line so that the io will
|
* For VDEV_IO_START, we cut in line so that the io will
|
||||||
* be sent to disk promptly.
|
* be sent to disk promptly.
|
||||||
*/
|
*/
|
||||||
if (((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
|
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
|
||||||
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) ||
|
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
|
||||||
(dsl != NULL && dsl_pool_sync_context(dsl))) {
|
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we executing in the context of the tx_sync_thread,
|
||||||
|
* or we are performing pool initialization outside of a
|
||||||
|
* zio_taskq[ZIO_TASKQ_ISSUE] context. Then issue the zio
|
||||||
|
* async to minimize stack usage for these deep call paths.
|
||||||
|
*/
|
||||||
|
if ((dp && curthread == dp->dp_tx.tx_sync_thread) ||
|
||||||
|
(dp && spa_is_initializing(dp->dp_spa) &&
|
||||||
|
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE))) {
|
||||||
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
|
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user