Illumos #5244 - zio pipeline callers should explicitly invoke next stage

5244 zio pipeline callers should explicitly invoke next stage
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Alex Reece <alex.reece@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Richard Elling <richard.elling@gmail.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: Steven Hartland <killing@multiplay.co.uk>
Approved by: Gordon Ross <gwr@nexenta.com>

References:
  https://www.illumos.org/issues/5244
  https://github.com/illumos/illumos-gate/commit/738f37b

Porting Notes:

1. The unported "2932 support crash dumps to raidz, etc. pools"
   caused a merge conflict due to a copyright difference in
   module/zfs/vdev_raidz.c.
2. The unported "4128 disks in zpools never go away when pulled"
   and additional Linux-specific changes caused merge conflicts in
   module/zfs/vdev_disk.c.

Ported-by: Richard Yao <richard.yao@clusterhq.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2828
This commit is contained in:
George Wilson 2014-10-20 22:07:45 +00:00 committed by Brian Behlendorf
parent 8dd86a10cf
commit 98b254188a
9 changed files with 55 additions and 34 deletions

View File

@ -60,7 +60,7 @@ typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift); uint64_t *ashift);
typedef void vdev_close_func_t(vdev_t *vd); typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize); typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
typedef int vdev_io_start_func_t(zio_t *zio); typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio); typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int); typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef void vdev_hold_func_t(vdev_t *vd); typedef void vdev_hold_func_t(vdev_t *vd);

View File

@ -153,9 +153,6 @@ typedef enum zio_priority {
ZIO_PRIORITY_NOW /* non-queued i/os (e.g. free) */ ZIO_PRIORITY_NOW /* non-queued i/os (e.g. free) */
} zio_priority_t; } zio_priority_t;
#define ZIO_PIPELINE_CONTINUE 0x100
#define ZIO_PIPELINE_STOP 0x101
enum zio_flag { enum zio_flag {
/* /*
* Flags inherited by gang, ddt, and vdev children, * Flags inherited by gang, ddt, and vdev children,

View File

@ -25,6 +25,7 @@
/* /*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved. * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
* Copyright (c) 2014 by Delphix. All rights reserved.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -33,8 +34,10 @@ int taskq_now;
taskq_t *system_taskq; taskq_t *system_taskq;
#define TASKQ_ACTIVE 0x00010000 #define TASKQ_ACTIVE 0x00010000
#define TASKQ_NAMELEN 31
struct taskq { struct taskq {
char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock; kmutex_t tq_lock;
krwlock_t tq_threadlock; krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv; kcondvar_t tq_dispatch_cv;
@ -280,6 +283,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL); cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL); cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL); cv_init(&tq->tq_maxalloc_cv, NULL, CV_DEFAULT, NULL);
(void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
tq->tq_flags = flags | TASKQ_ACTIVE; tq->tq_flags = flags | TASKQ_ACTIVE;
tq->tq_active = nthreads; tq->tq_active = nthreads;
tq->tq_nthreads = nthreads; tq->tq_nthreads = nthreads;

View File

@ -657,7 +657,7 @@ vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
return (0); return (0);
} }
static int static void
vdev_disk_io_start(zio_t *zio) vdev_disk_io_start(zio_t *zio)
{ {
vdev_t *v = zio->io_vd; vdev_t *v = zio->io_vd;
@ -669,7 +669,8 @@ vdev_disk_io_start(zio_t *zio)
if (!vdev_readable(v)) { if (!vdev_readable(v)) {
zio->io_error = SET_ERROR(ENXIO); zio->io_error = SET_ERROR(ENXIO);
return (ZIO_PIPELINE_CONTINUE); zio_interrupt(zio);
return;
} }
switch (zio->io_cmd) { switch (zio->io_cmd) {
@ -685,7 +686,7 @@ vdev_disk_io_start(zio_t *zio)
error = vdev_disk_io_flush(vd->vd_bdev, zio); error = vdev_disk_io_flush(vd->vd_bdev, zio);
if (error == 0) if (error == 0)
return (ZIO_PIPELINE_STOP); return;
zio->io_error = error; zio->io_error = error;
if (error == ENOTSUP) if (error == ENOTSUP)
@ -697,8 +698,8 @@ vdev_disk_io_start(zio_t *zio)
zio->io_error = SET_ERROR(ENOTSUP); zio->io_error = SET_ERROR(ENOTSUP);
} }
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
return;
case ZIO_TYPE_WRITE: case ZIO_TYPE_WRITE:
flags = WRITE; flags = WRITE;
break; break;
@ -709,17 +710,17 @@ vdev_disk_io_start(zio_t *zio)
default: default:
zio->io_error = SET_ERROR(ENOTSUP); zio->io_error = SET_ERROR(ENOTSUP);
return (ZIO_PIPELINE_CONTINUE); zio_interrupt(zio);
return;
} }
error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data, error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
zio->io_size, zio->io_offset, flags); zio->io_size, zio->io_offset, flags);
if (error) { if (error) {
zio->io_error = error; zio->io_error = error;
return (ZIO_PIPELINE_CONTINUE); zio_interrupt(zio);
return;
} }
return (ZIO_PIPELINE_STOP);
} }
static void static void

View File

@ -20,7 +20,7 @@
*/ */
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved. * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -172,7 +172,7 @@ vdev_file_io_fsync(void *arg)
zio_interrupt(zio); zio_interrupt(zio);
} }
static int static void
vdev_file_io_start(zio_t *zio) vdev_file_io_start(zio_t *zio)
{ {
vdev_t *vd = zio->io_vd; vdev_t *vd = zio->io_vd;
@ -182,7 +182,8 @@ vdev_file_io_start(zio_t *zio)
/* XXPOLICY */ /* XXPOLICY */
if (!vdev_readable(vd)) { if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO); zio->io_error = SET_ERROR(ENXIO);
return (ZIO_PIPELINE_CONTINUE); zio_interrupt(zio);
return;
} }
switch (zio->io_cmd) { switch (zio->io_cmd) {
@ -201,7 +202,7 @@ vdev_file_io_start(zio_t *zio)
if (spl_fstrans_check()) { if (spl_fstrans_check()) {
VERIFY3U(taskq_dispatch(vdev_file_taskq, VERIFY3U(taskq_dispatch(vdev_file_taskq,
vdev_file_io_fsync, zio, TQ_SLEEP), !=, 0); vdev_file_io_fsync, zio, TQ_SLEEP), !=, 0);
return (ZIO_PIPELINE_STOP); return;
} }
zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC,
@ -211,13 +212,12 @@ vdev_file_io_start(zio_t *zio)
zio->io_error = SET_ERROR(ENOTSUP); zio->io_error = SET_ERROR(ENOTSUP);
} }
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
return;
} }
VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio, VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio,
TQ_SLEEP), !=, 0); TQ_SLEEP), !=, 0);
return (ZIO_PIPELINE_STOP);
} }
/* ARGSUSED */ /* ARGSUSED */

View File

@ -24,7 +24,7 @@
*/ */
/* /*
* Copyright (c) 2013 by Delphix. All rights reserved. * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -327,7 +327,7 @@ vdev_mirror_child_select(zio_t *zio)
return (-1); return (-1);
} }
static int static void
vdev_mirror_io_start(zio_t *zio) vdev_mirror_io_start(zio_t *zio)
{ {
mirror_map_t *mm; mirror_map_t *mm;
@ -352,7 +352,8 @@ vdev_mirror_io_start(zio_t *zio)
zio->io_type, zio->io_priority, 0, zio->io_type, zio->io_priority, 0,
vdev_mirror_scrub_done, mc)); vdev_mirror_scrub_done, mc));
} }
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
return;
} }
/* /*
* For normal reads just pick one child. * For normal reads just pick one child.
@ -378,7 +379,7 @@ vdev_mirror_io_start(zio_t *zio)
c++; c++;
} }
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
} }
static int static int

View File

@ -24,7 +24,7 @@
*/ */
/* /*
* Copyright (c) 2013 by Delphix. All rights reserved. * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/ */
/* /*
@ -66,11 +66,11 @@ vdev_missing_close(vdev_t *vd)
} }
/* ARGSUSED */ /* ARGSUSED */
static int static void
vdev_missing_io_start(zio_t *zio) vdev_missing_io_start(zio_t *zio)
{ {
zio->io_error = SET_ERROR(ENOTSUP); zio->io_error = SET_ERROR(ENOTSUP);
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
} }
/* ARGSUSED */ /* ARGSUSED */

View File

@ -21,7 +21,7 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved. * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -1567,7 +1567,7 @@ vdev_raidz_child_done(zio_t *zio)
* vdevs have had errors, then create zio read operations to the parity * vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well. * columns' VDevs as well.
*/ */
static int static void
vdev_raidz_io_start(zio_t *zio) vdev_raidz_io_start(zio_t *zio)
{ {
vdev_t *vd = zio->io_vd; vdev_t *vd = zio->io_vd;
@ -1611,7 +1611,8 @@ vdev_raidz_io_start(zio_t *zio)
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL)); ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
} }
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
return;
} }
ASSERT(zio->io_type == ZIO_TYPE_READ); ASSERT(zio->io_type == ZIO_TYPE_READ);
@ -1651,7 +1652,7 @@ vdev_raidz_io_start(zio_t *zio)
} }
} }
return (ZIO_PIPELINE_CONTINUE); zio_execute(zio);
} }

View File

@ -59,6 +59,9 @@ kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
int zio_delay_max = ZIO_DELAY_MAX; int zio_delay_max = ZIO_DELAY_MAX;
#define ZIO_PIPELINE_CONTINUE 0x100
#define ZIO_PIPELINE_STOP 0x101
/* /*
* The following actions directly effect the spa's sync-to-convergence logic. * The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action. * The values below define the sync pass when we start performing the action.
@ -2526,6 +2529,18 @@ zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp)
* Read and write to physical devices * Read and write to physical devices
* ========================================================================== * ==========================================================================
*/ */
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static int static int
zio_vdev_io_start(zio_t *zio) zio_vdev_io_start(zio_t *zio)
{ {
@ -2543,7 +2558,8 @@ zio_vdev_io_start(zio_t *zio)
/* /*
* The mirror_ops handle multiple DVAs in a single BP. * The mirror_ops handle multiple DVAs in a single BP.
*/ */
return (vdev_mirror_ops.vdev_op_io_start(zio)); vdev_mirror_ops.vdev_op_io_start(zio);
return (ZIO_PIPELINE_STOP);
} }
/* /*
@ -2551,7 +2567,7 @@ zio_vdev_io_start(zio_t *zio)
* can quickly react to certain workloads. In particular, we care * can quickly react to certain workloads. In particular, we care
* about non-scrubbing, top-level reads and writes with the following * about non-scrubbing, top-level reads and writes with the following
* characteristics: * characteristics:
* - synchronous writes of user data to non-slog devices * - synchronous writes of user data to non-slog devices
* - any reads of user data * - any reads of user data
* When these conditions are met, adjust the timestamp of spa_last_io * When these conditions are met, adjust the timestamp of spa_last_io
* which allows the scan thread to adjust its workload accordingly. * which allows the scan thread to adjust its workload accordingly.
@ -2637,7 +2653,8 @@ zio_vdev_io_start(zio_t *zio)
} }
} }
return (vd->vdev_ops->vdev_op_io_start(zio)); vd->vdev_ops->vdev_op_io_start(zio);
return (ZIO_PIPELINE_STOP);
} }
static int static int