mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
Make tgx_sync_thread zio's async
The majority of the recursive operations performed by the dsl
are done either in the context of the tgx_sync_thread or during
pool import. It is these recursive operations which contribute
greatly to the stack depth. When this recursion is coupled with
a synchronous I/O in the same context overflow becomes possible.
Previously to handle this case I have focused on keeping the
individual stack frames as light as possible. This is a good
idea as long as it can be done in a way which doesn't overly
complicate the code. However, there is a better solution.
If we treat all zio's issued by the tgx_sync_thread as async then
we can use the tgx_sync_thread stack for the recursive parts, and
the zio_* threads for the I/O parts. This effectively doubles our
available stack space with the only drawback being a small delay
to schedule the I/O. However, in practice the scheduling time
is so much smaller than the actual I/O time this isn't an issue.
Another benefit of making the zio async is that the zio pipeline
is now parallel. That should mean for CPU intensive pipelines
such as compression or dedup performance may be improved.
With this change in place the worst case stack usage observed so
far is 6902 bytes. This is still higher than I'd like but
significantly improved. Additional changes to specific functions
should improve this further. This change allows us to revent
commit 6656bf5
which did some horrible things to the recursive
traverse_visitbp() callpath in the name of saving stack.
This commit is contained in:
parent
f74fae8b30
commit
2fac4c2a74
@ -1150,6 +1150,8 @@ __zio_execute(zio_t *zio)
|
|||||||
while (zio->io_stage < ZIO_STAGE_DONE) {
|
while (zio->io_stage < ZIO_STAGE_DONE) {
|
||||||
enum zio_stage pipeline = zio->io_pipeline;
|
enum zio_stage pipeline = zio->io_pipeline;
|
||||||
enum zio_stage stage = zio->io_stage;
|
enum zio_stage stage = zio->io_stage;
|
||||||
|
dsl_pool_t *dsl;
|
||||||
|
boolean_t cut;
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
ASSERT(!MUTEX_HELD(&zio->io_lock));
|
ASSERT(!MUTEX_HELD(&zio->io_lock));
|
||||||
@ -1162,19 +1164,26 @@ __zio_execute(zio_t *zio)
|
|||||||
|
|
||||||
ASSERT(stage <= ZIO_STAGE_DONE);
|
ASSERT(stage <= ZIO_STAGE_DONE);
|
||||||
|
|
||||||
|
dsl = spa_get_dsl(zio->io_spa);
|
||||||
|
cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
|
||||||
|
zio_requeue_io_start_cut_in_line : B_FALSE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are in interrupt context and this pipeline stage
|
* If we are in interrupt context and this pipeline stage
|
||||||
* will grab a config lock that is held across I/O,
|
* will grab a config lock that is held across I/O,
|
||||||
* or may wait for an I/O that needs an interrupt thread
|
* or may wait for an I/O that needs an interrupt thread
|
||||||
* to complete, issue async to avoid deadlock.
|
* to complete, issue async to avoid deadlock.
|
||||||
*
|
*
|
||||||
|
* If we are in the txg_sync_thread or being called
|
||||||
|
* during pool init issue async to minimize stack depth.
|
||||||
|
* Both of these call paths may be recursively called.
|
||||||
|
*
|
||||||
* For VDEV_IO_START, we cut in line so that the io will
|
* For VDEV_IO_START, we cut in line so that the io will
|
||||||
* be sent to disk promptly.
|
* be sent to disk promptly.
|
||||||
*/
|
*/
|
||||||
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
|
if (((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
|
||||||
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
|
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) ||
|
||||||
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
|
(dsl != NULL && dsl_pool_sync_context(dsl))) {
|
||||||
zio_requeue_io_start_cut_in_line : B_FALSE;
|
|
||||||
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
|
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user