3306 zdb should be able to issue reads in parallel
3321 'zpool reopen' command should be documented in the man
     page and help

Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Matt Ahrens <matthew.ahrens@delphix.com>
Reviewed by: Christopher Siden <chris.siden@delphix.com>
Approved by: Garrett D'Amore <garrett@damore.org>

References:
  illumos/illumos-gate@31d7e8fa33
  https://www.illumos.org/issues/3306
  https://www.illumos.org/issues/3321

The vdev_file.c implementation in this patch diverges significantly
from the upstream version.  For consistenty with the vdev_disk.c
code the upstream version leverages the Illumos bio interfaces.
This makes sense for Illumos but not for ZoL for two reasons.

1) The vdev_disk.c code in ZoL has been rewritten to use the
   Linux block device interfaces which differ significantly
   from those in Illumos.  Therefore, updating the vdev_file.c
   to use the Illumos interfaces doesn't get you consistency
   with vdev_disk.c.

2) Using the upstream patch as is would requiring implementing
   compatibility code for those Solaris block device interfaces
   in user and kernel space.  That additional complexity could
   lead to confusion and doesn't buy us anything.

For these reasons I've opted to simply move the existing vn_rdwr()
as is in to the taskq function.  This has the advantage of being
low risk and easy to understand.  Moving the vn_rdwr() function
in to its own taskq thread also neatly avoids the possibility of
a stack overflow.

Finally, because of the additional work which is being handled by
the free taskq the number of threads has been increased.  The
thread count under Illumos defaults to 100 but was decreased to 2
in commit 08d08e due to contention.  We increase it to 8 until
the contention can be address by porting Illumos #3581.

Ported-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1354
This commit is contained in:
George Wilson 2013-05-02 16:36:32 -07:00 committed by Brian Behlendorf
parent 5165473737
commit 5853fe790d
7 changed files with 169 additions and 65 deletions

View File

@ -86,6 +86,7 @@ extern void dump_intent_log(zilog_t *);
uint64_t *zopt_object = NULL; uint64_t *zopt_object = NULL;
int zopt_objects = 0; int zopt_objects = 0;
libzfs_handle_t *g_zfs; libzfs_handle_t *g_zfs;
uint64_t max_inflight = 200;
/* /*
* These libumem hooks provide a reasonable set of defaults for the allocator's * These libumem hooks provide a reasonable set of defaults for the allocator's
@ -108,13 +109,14 @@ usage(void)
{ {
(void) fprintf(stderr, (void) fprintf(stderr,
"Usage: %s [-CumdibcsDvhLXFPA] [-t txg] [-e [-p path...]] " "Usage: %s [-CumdibcsDvhLXFPA] [-t txg] [-e [-p path...]] "
"poolname [object...]\n" "[-U config] [-M inflight I/Os] poolname [object...]\n"
" %s [-divPA] [-e -p path...] dataset [object...]\n" " %s [-divPA] [-e -p path...] [-U config] dataset "
" %s -m [-LXFPA] [-t txg] [-e [-p path...]] " "[object...]\n"
" %s -m [-LXFPA] [-t txg] [-e [-p path...]] [-U config] "
"poolname [vdev [metaslab...]]\n" "poolname [vdev [metaslab...]]\n"
" %s -R [-A] [-e [-p path...]] poolname " " %s -R [-A] [-e [-p path...]] poolname "
"vdev:offset:size[:flags]\n" "vdev:offset:size[:flags]\n"
" %s -S [-PA] [-e [-p path...]] poolname\n" " %s -S [-PA] [-e [-p path...]] [-U config] poolname\n"
" %s -l [-uA] device\n" " %s -l [-uA] device\n"
" %s -C [-A] [-U config]\n\n", " %s -C [-A] [-U config]\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname); cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname);
@ -161,6 +163,8 @@ usage(void)
(void) fprintf(stderr, " -P print numbers in parseable form\n"); (void) fprintf(stderr, " -P print numbers in parseable form\n");
(void) fprintf(stderr, " -t <txg> -- highest txg to use when " (void) fprintf(stderr, " -t <txg> -- highest txg to use when "
"searching for uberblocks\n"); "searching for uberblocks\n");
(void) fprintf(stderr, " -M <number of inflight I/Os> -- "
"specify the maximum number of checksumming I/Os [default is 200]");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) " (void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n"); "to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n"); (void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
@ -2005,6 +2009,45 @@ zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0); bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
} }
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_t *zb = &zio->io_bookmark;
zio_data_buf_free(zio->io_data, zio->io_size);
mutex_enter(&spa->spa_scrub_lock);
spa->spa_scrub_inflight--;
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
sprintf_blkptr(blkbuf, bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
}
/* ARGSUSED */ /* ARGSUSED */
static int static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
@ -2026,39 +2069,23 @@ zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)); is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata)) { if (dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata)) {
int ioerr;
size_t size = BP_GET_PSIZE(bp); size_t size = BP_GET_PSIZE(bp);
void *data = malloc(size); void *data = zio_data_buf_alloc(size);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW; int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */ /* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL) if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE; flags |= ZIO_FLAG_SPECULATIVE;
ioerr = zio_wait(zio_read(NULL, spa, bp, data, size, mutex_enter(&spa->spa_scrub_lock);
NULL, NULL, ZIO_PRIORITY_ASYNC_READ, flags, zb)); while (spa->spa_scrub_inflight > max_inflight)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight++;
mutex_exit(&spa->spa_scrub_lock);
free(data); zio_nowait(zio_read(NULL, spa, bp, data, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
if (ioerr && !(flags & ZIO_FLAG_SPECULATIVE)) {
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
sprintf_blkptr(blkbuf, bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
} }
zcb->zcb_readfails = 0; zcb->zcb_readfails = 0;
@ -2266,6 +2293,18 @@ dump_block_stats(spa_t *spa)
zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb); zcb.zcb_haderrors |= traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
(void) zio_wait(spa->spa_async_zio_root);
spa->spa_async_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
if (zcb.zcb_haderrors) { if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n"); (void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count"); (void) printf("\t%5s %s\n", "errno", "count");
@ -3026,7 +3065,7 @@ main(int argc, char **argv)
dprintf_setup(&argc, argv); dprintf_setup(&argc, argv);
while ((c = getopt(argc, argv, "bcdhilmsuCDRSAFLXevp:t:U:P")) != -1) { while ((c = getopt(argc, argv, "bcdhilmM:suCDRSAFLXevp:t:U:P")) != -1) {
switch (c) { switch (c) {
case 'b': case 'b':
case 'c': case 'c':
@ -3055,6 +3094,15 @@ main(int argc, char **argv)
case 'v': case 'v':
verbose++; verbose++;
break; break;
case 'M':
max_inflight = strtoull(optarg, NULL, 0);
if (max_inflight == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight I/Os must be greater "
"than 0\n");
usage();
}
break;
case 'p': case 'p':
if (searchdirs == NULL) { if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *), searchdirs = umem_alloc(sizeof (char *),

View File

@ -246,7 +246,7 @@ get_usage(zpool_help_t idx) {
case HELP_REMOVE: case HELP_REMOVE:
return (gettext("\tremove <pool> <device> ...\n")); return (gettext("\tremove <pool> <device> ...\n"));
case HELP_REOPEN: case HELP_REOPEN:
return (""); /* Undocumented command */ return (gettext("\treopen <pool>\n"));
case HELP_SCRUB: case HELP_SCRUB:
return (gettext("\tscrub [-s] <pool> ...\n")); return (gettext("\tscrub [-s] <pool> ...\n"));
case HELP_STATUS: case HELP_STATUS:
@ -3612,22 +3612,37 @@ zpool_do_reguid(int argc, char **argv)
* zpool reopen <pool> * zpool reopen <pool>
* *
* Reopen the pool so that the kernel can update the sizes of all vdevs. * Reopen the pool so that the kernel can update the sizes of all vdevs.
*
* NOTE: This command is currently undocumented. If the command is ever
* exposed then the appropriate usage() messages will need to be made.
*/ */
int int
zpool_do_reopen(int argc, char **argv) zpool_do_reopen(int argc, char **argv)
{ {
int c;
int ret = 0; int ret = 0;
zpool_handle_t *zhp; zpool_handle_t *zhp;
char *pool; char *pool;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc--; argc--;
argv++; argv++;
if (argc != 1) if (argc < 1) {
return (2); (void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0]; pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL)

View File

@ -11,6 +11,7 @@
.\" .\"
.\" .\"
.\" Copyright 2012, Richard Lowe. .\" Copyright 2012, Richard Lowe.
.\" Copyright (c) 2012 by Delphix. All rights reserved.
.\" .\"
.TH "ZDB" "8" "February 15, 2012" "" "" .TH "ZDB" "8" "February 15, 2012" "" ""
@ -19,21 +20,23 @@
.SH "SYNOPSIS" .SH "SYNOPSIS"
\fBzdb\fR [-CumdibcsDvhLXFPA] [-e [-p \fIpath\fR...]] [-t \fItxg\fR] \fBzdb\fR [-CumdibcsDvhLXFPA] [-e [-p \fIpath\fR...]] [-t \fItxg\fR]
\fIpoolname\fR [\fIobject\fR ...] [-U \fIcache\fR] [-M \fIinflight I/Os\fR] [\fIpoolname\fR
[\fIobject\fR ...]]
.P .P
\fBzdb\fR [-divPA] [-e [-p \fIpath\fR...]] \fIdataset\fR [\fIobject\fR ...] \fBzdb\fR [-divPA] [-e [-p \fIpath\fR...]] [-U \fIcache\fR]
\fIdataset\fR [\fIobject\fR ...]
.P .P
\fBzdb\fR -m [-LXFPA] [-t \fItxg\fR] [-e [-p \fIpath\fR...]] \fIpoolname\fR \fBzdb\fR -m [-LXFPA] [-t \fItxg\fR] [-e [-p \fIpath\fR...]] [-U \fIcache\fR]
[\fIvdev\fR [\fImetaslab\fR ...]] \fIpoolname\fR [\fIvdev\fR [\fImetaslab\fR ...]]
.P .P
\fBzdb\fR -R [-A] [-e [-p \fIpath\fR...]] \fIpoolname\fR \fBzdb\fR -R [-A] [-e [-p \fIpath\fR...]] [-U \fIcache\fR] \fIpoolname\fR
\fIvdev\fR:\fIoffset\fR:\fIsize\fR[:\fIflags\fR] \fIvdev\fR:\fIoffset\fR:\fIsize\fR[:\fIflags\fR]
.P .P
\fBzdb\fR -S [-AP] [-e [-p \fIpath\fR...]] \fIpoolname\fR \fBzdb\fR -S [-AP] [-e [-p \fIpath\fR...]] [-U \fIcache\fR] \fIpoolname\fR
.P .P
\fBzdb\fR -l [-uA] \fIdevice\fR \fBzdb\fR -l [-uA] \fIdevice\fR
@ -354,6 +357,18 @@ Attempt to make an unreadable pool readable by trying progressively older
transactions. transactions.
.RE .RE
.sp
.ne 2
.na
\fB-M \fIinflight I/Os\fR \fR
.ad
.sp .6
.RS 4n
Limit the number of outstanding checksum I/Os to the specified value. The
default value is 200. This option affects the performance of the \fB-c\fR
option.
.RE
.sp .sp
.ne 2 .ne 2
.na .na
@ -384,8 +399,7 @@ and their associated transaction numbers.
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
Use a cache file other than \fB/etc/zfs/zpool.cache\fR. This option is only Use a cache file other than \fB/etc/zfs/zpool.cache\fR.
valid with \fB-C\fR
.RE .RE
.sp .sp

View File

@ -112,6 +112,11 @@ zpool \- configures ZFS storage pools
\fBzpool reguid\fR \fIpool\fR \fBzpool reguid\fR \fIpool\fR
.fi .fi
.LP
.nf
\fBzpool reopen\fR \fIpool\fR
.fi
.LP .LP
.nf .nf
\fBzpool remove\fR \fIpool\fR \fIdevice\fR ... \fBzpool remove\fR \fIpool\fR \fIdevice\fR ...
@ -1508,8 +1513,18 @@ Expand the device to use all available space. If the device is part of a mirror
.ad .ad
.sp .6 .sp .6
.RS 4n .RS 4n
Generates a new unique identifier for the pool. You must ensure that all devices in this pool are online and Generates a new unique identifier for the pool. You must ensure that all
healthy before performing this action. devices in this pool are online and healthy before performing this action.
.RE
.sp
.ne 2
.na
\fB\fBzpool reopen\fR \fIpool\fR
.ad
.sp .6
.RS 4n
Reopen all the vdevs associated with the pool.
.RE .RE
.sp .sp

View File

@ -110,7 +110,7 @@ const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
{ ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, { ZTI_FIX(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL },
{ ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(16), ZTI_FIX(5) }, { ZTI_BATCH, ZTI_FIX(5), ZTI_FIX(16), ZTI_FIX(5) },
{ ZTI_PCT(100), ZTI_NULL, ZTI_ONE, ZTI_NULL }, { ZTI_FIX(8), ZTI_NULL, ZTI_ONE, ZTI_NULL },
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL },
}; };

View File

@ -25,6 +25,7 @@
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
#include <sys/spa.h> #include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_file.h> #include <sys/vdev_file.h>
#include <sys/vdev_impl.h> #include <sys/vdev_impl.h>
#include <sys/zio.h> #include <sys/zio.h>
@ -139,21 +140,39 @@ vdev_file_close(vdev_t *vd)
vd->vdev_tsd = NULL; vd->vdev_tsd = NULL;
} }
static void
vdev_file_io_strategy(void *arg)
{
zio_t *zio = (zio_t *)arg;
vdev_t *vd = zio->io_vd;
vdev_file_t *vf = vd->vdev_tsd;
ssize_t resid;
zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ?
UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data,
zio->io_size, zio->io_offset, UIO_SYSSPACE,
0, RLIM64_INFINITY, kcred, &resid);
if (resid != 0 && zio->io_error == 0)
zio->io_error = ENOSPC;
zio_interrupt(zio);
}
static int static int
vdev_file_io_start(zio_t *zio) vdev_file_io_start(zio_t *zio)
{ {
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd; vdev_t *vd = zio->io_vd;
vdev_file_t *vf; vdev_file_t *vf = vd->vdev_tsd;
ssize_t resid = 0;
if (!vdev_readable(vd)) {
zio->io_error = ENXIO;
return (ZIO_PIPELINE_CONTINUE);
}
vf = vd->vdev_tsd;
if (zio->io_type == ZIO_TYPE_IOCTL) { if (zio->io_type == ZIO_TYPE_IOCTL) {
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = ENXIO;
return (ZIO_PIPELINE_CONTINUE);
}
switch (zio->io_cmd) { switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE: case DKIOCFLUSHWRITECACHE:
zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC,
@ -166,15 +185,8 @@ vdev_file_io_start(zio_t *zio)
return (ZIO_PIPELINE_CONTINUE); return (ZIO_PIPELINE_CONTINUE);
} }
zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ? taskq_dispatch_ent(spa->spa_zio_taskq[ZIO_TYPE_FREE][ZIO_TASKQ_ISSUE],
UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data, vdev_file_io_strategy, zio, 0, &zio->io_tqent);
zio->io_size, zio->io_offset, UIO_SYSSPACE,
0, RLIM64_INFINITY, kcred, &resid);
if (resid != 0 && zio->io_error == 0)
zio->io_error = ENOSPC;
zio_interrupt(zio);
return (ZIO_PIPELINE_STOP); return (ZIO_PIPELINE_STOP);
} }

View File

@ -3062,7 +3062,7 @@ zio_done(zio_t *zio)
* Hand it off to the otherwise-unused claim taskq. * Hand it off to the otherwise-unused claim taskq.
*/ */
ASSERT(taskq_empty_ent(&zio->io_tqent)); ASSERT(taskq_empty_ent(&zio->io_tqent));
(void) taskq_dispatch_ent( taskq_dispatch_ent(
zio->io_spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE], zio->io_spa->spa_zio_taskq[ZIO_TYPE_CLAIM][ZIO_TASKQ_ISSUE],
(task_func_t *)zio_reexecute, zio, 0, (task_func_t *)zio_reexecute, zio, 0,
&zio->io_tqent); &zio->io_tqent);