Add subcommand to wait for background zfs activity to complete

Currently the best way to wait for the completion of a long-running
operation in a pool, like a scrub or device removal, is to poll 'zpool
status' and parse its output, which is neither efficient nor convenient.

This change adds a 'wait' subcommand to the zpool command. When invoked,
'zpool wait' will block until a specified type of background activity
completes. Currently, this subcommand can wait for any of the following:

 - Scrubs or resilvers to complete
 - Devices to initialized
 - Devices to be replaced
 - Devices to be removed
 - Checkpoints to be discarded
 - Background freeing to complete

For example, a scrub that is in progress could be waited for by running

    zpool wait -t scrub <pool>

This also adds a -w flag to the attach, checkpoint, initialize, replace,
remove, and scrub subcommands. When used, this flag makes the operations
kicked off by these subcommands synchronous instead of asynchronous.

This functionality is implemented using a new ioctl. The type of
activity to wait for is provided as input to the ioctl, and the ioctl
blocks until all activity of that type has completed. An ioctl was used
over other methods of kernel-userspace communiction primarily for the
sake of portability.

Porting Notes:
This is ported from Delphix OS change DLPX-44432. The following changes
were made while porting:

 - Added ZoL-style ioctl input declaration.
 - Reorganized error handling in zpool_initialize in libzfs to integrate
   better with changes made for TRIM support.
 - Fixed check for whether a checkpoint discard is in progress.
   Previously it also waited if the pool had a checkpoint, instead of
   just if a checkpoint was being discarded.
 - Exposed zfs_initialize_chunk_size as a ZoL-style tunable.
 - Updated more existing tests to make use of new 'zpool wait'
   functionality, tests that don't exist in Delphix OS.
 - Used existing ZoL tunable zfs_scan_suspend_progress, together with
   zinject, in place of a new tunable zfs_scan_max_blks_per_txg.
 - Added support for a non-integral interval argument to zpool wait.

Future work:
ZoL has support for trimming devices, which Delphix OS does not. In the
future, 'zpool wait' could be extended to add the ability to wait for
trim operations to complete.

Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Gallagher <john.gallagher@delphix.com>
Closes #9162
This commit is contained in:
John Gallagher 2019-09-13 18:09:06 -07:00 committed by Brian Behlendorf
parent 7238cbd4d3
commit e60e158eff
61 changed files with 2662 additions and 144 deletions

View File

@ -43,10 +43,13 @@
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <time.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
@ -118,6 +121,8 @@ static int zpool_do_sync(int, char **);
static int zpool_do_version(int, char **);
static int zpool_do_wait(int, char **);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
@ -168,7 +173,8 @@ typedef enum {
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN,
HELP_VERSION
HELP_VERSION,
HELP_WAIT
} zpool_help_t;
@ -309,6 +315,8 @@ static zpool_command_t command_table[] = {
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
{ NULL },
{ "wait", zpool_do_wait, HELP_WAIT },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
@ -328,7 +336,7 @@ get_usage(zpool_help_t idx)
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-f] [-o property=value] "
return (gettext("\tattach [-fw] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [-nF] <pool> [device]\n"));
@ -337,7 +345,7 @@ get_usage(zpool_help_t idx)
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_CHECKPOINT:
return (gettext("\tcheckpoint [--discard] <pool> ...\n"));
return (gettext("\tcheckpoint [-d [-w]] <pool> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
@ -371,17 +379,17 @@ get_usage(zpool_help_t idx)
case HELP_ONLINE:
return (gettext("\tonline [-e] <pool> <device> ...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-f] [-o property=value] "
return (gettext("\treplace [-fw] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
return (gettext("\tremove [-nps] <pool> <device> ...\n"));
return (gettext("\tremove [-npsw] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_INITIALIZE:
return (gettext("\tinitialize [-c | -s] <pool> "
return (gettext("\tinitialize [-c | -s] [-w] <pool> "
"[<device> ...]\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] <pool> ...\n"));
return (gettext("\tscrub [-s | -p] [-w] <pool> ...\n"));
case HELP_RESILVER:
return (gettext("\tresilver <pool> ...\n"));
case HELP_TRIM:
@ -412,6 +420,9 @@ get_usage(zpool_help_t idx)
return (gettext("\tsync [pool] ...\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_WAIT:
return (gettext("\twait [-Hp] [-T d|u] [-t <activity>[,...]] "
"<pool> [interval]\n"));
}
abort();
@ -530,12 +541,13 @@ usage(boolean_t requested)
}
/*
* zpool initialize [-c | -s] <pool> [<vdev> ...]
* zpool initialize [-c | -s] [-w] <pool> [<vdev> ...]
* Initialize all unused blocks in the specified vdevs, or all vdevs in the pool
* if none specified.
*
* -c Cancel. Ends active initializing.
* -s Suspend. Initializing can then be restarted with no flags.
* -w Wait. Blocks until initializing has completed.
*/
int
zpool_do_initialize(int argc, char **argv)
@ -545,15 +557,17 @@ zpool_do_initialize(int argc, char **argv)
zpool_handle_t *zhp;
nvlist_t *vdevs;
int err = 0;
boolean_t wait = B_FALSE;
struct option long_options[] = {
{"cancel", no_argument, NULL, 'c'},
{"suspend", no_argument, NULL, 's'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
pool_initialize_func_t cmd_type = POOL_INITIALIZE_START;
while ((c = getopt_long(argc, argv, "cs", long_options, NULL)) != -1) {
while ((c = getopt_long(argc, argv, "csw", long_options, NULL)) != -1) {
switch (c) {
case 'c':
if (cmd_type != POOL_INITIALIZE_START &&
@ -573,6 +587,9 @@ zpool_do_initialize(int argc, char **argv)
}
cmd_type = POOL_INITIALIZE_SUSPEND;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
if (optopt != 0) {
(void) fprintf(stderr,
@ -595,6 +612,12 @@ zpool_do_initialize(int argc, char **argv)
return (-1);
}
if (wait && (cmd_type != POOL_INITIALIZE_START)) {
(void) fprintf(stderr, gettext("-w cannot be used with -c or "
"-s\n"));
usage(B_FALSE);
}
poolname = argv[0];
zhp = zpool_open(g_zfs, poolname);
if (zhp == NULL)
@ -613,6 +636,9 @@ zpool_do_initialize(int argc, char **argv)
}
}
if (wait)
err = zpool_initialize_wait(zhp, cmd_type, vdevs);
else
err = zpool_initialize(zhp, cmd_type, vdevs);
fnvlist_free(vdevs);
@ -962,7 +988,7 @@ zpool_do_add(int argc, char **argv)
}
/*
* zpool remove <pool> <vdev> ...
* zpool remove [-npsw] <pool> <vdev> ...
*
* Removes the given vdev from the pool.
*/
@ -976,9 +1002,10 @@ zpool_do_remove(int argc, char **argv)
int c;
boolean_t noop = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t wait = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "nps")) != -1) {
while ((c = getopt(argc, argv, "npsw")) != -1) {
switch (c) {
case 'n':
noop = B_TRUE;
@ -989,6 +1016,9 @@ zpool_do_remove(int argc, char **argv)
case 's':
stop = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -1022,6 +1052,11 @@ zpool_do_remove(int argc, char **argv)
}
if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
if (wait) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -w cannot be used with -s\n"));
usage(B_FALSE);
}
} else {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device\n"));
@ -1053,6 +1088,9 @@ zpool_do_remove(int argc, char **argv)
ret = 1;
}
}
if (ret == 0 && wait)
ret = zpool_wait(zhp, ZPOOL_WAIT_REMOVE);
}
zpool_close(zhp);
@ -2874,28 +2912,36 @@ name_or_guid_exists(zpool_handle_t *zhp, void *data)
* -d Discard the checkpoint from a checkpointed
* --discard pool.
*
* -w Wait for discarding a checkpoint to complete.
* --wait
*
* Checkpoints the specified pool, by taking a "snapshot" of its
* current state. A pool can only have one checkpoint at a time.
*/
int
zpool_do_checkpoint(int argc, char **argv)
{
boolean_t discard;
boolean_t discard, wait;
char *pool;
zpool_handle_t *zhp;
int c, err;
struct option long_options[] = {
{"discard", no_argument, NULL, 'd'},
{"wait", no_argument, NULL, 'w'},
{0, 0, 0, 0}
};
discard = B_FALSE;
while ((c = getopt_long(argc, argv, ":d", long_options, NULL)) != -1) {
wait = B_FALSE;
while ((c = getopt_long(argc, argv, ":dw", long_options, NULL)) != -1) {
switch (c) {
case 'd':
discard = B_TRUE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -2903,6 +2949,12 @@ zpool_do_checkpoint(int argc, char **argv)
}
}
if (wait && !discard) {
(void) fprintf(stderr, gettext("--wait only valid when "
"--discard also specified\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
@ -2928,10 +2980,13 @@ zpool_do_checkpoint(int argc, char **argv)
return (1);
}
if (discard)
if (discard) {
err = (zpool_discard_checkpoint(zhp) != 0);
else
if (err == 0 && wait)
err = zpool_wait(zhp, ZPOOL_WAIT_CKPT_DISCARD);
} else {
err = (zpool_checkpoint(zhp) != 0);
}
zpool_close(zhp);
@ -4907,6 +4962,24 @@ fsleep(float sec)
nanosleep(&req, NULL);
}
/*
* Terminal height, in rows. Returns -1 if stdout is not connected to a TTY or
* if we were unable to determine its size.
*/
static int
terminal_height(void)
{
struct winsize win;
if (isatty(STDOUT_FILENO) == 0)
return (-1);
if (ioctl(STDOUT_FILENO, TIOCGWINSZ, &win) != -1 && win.ws_row > 0)
return (win.ws_row);
return (-1);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
@ -5047,7 +5120,6 @@ zpool_do_iostat(int argc, char **argv)
int npools;
float interval = 0;
unsigned long count = 0;
struct winsize win;
int winheight = 24;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
@ -5335,25 +5407,19 @@ zpool_do_iostat(int argc, char **argv)
cb.vcdl = NULL;
}
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (isatty(fileno(stdout)) == 0)
headers_once = B_TRUE;
/*
* Check terminal size so we can print headers
* even when terminal window has its height
* changed.
*/
if (headers_once == B_FALSE) {
if (ioctl(1, TIOCGWINSZ, &win) != -1 &&
win.ws_row > 0)
winheight = win.ws_row;
else
winheight = terminal_height();
/*
* Are we connected to TTY? If not, headers_once
* should be true, to avoid breaking scripts.
*/
if (winheight < 0)
headers_once = B_TRUE;
}
/*
* If it's the first time and we're not skipping it,
@ -5943,6 +6009,7 @@ static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
boolean_t wait = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
@ -5952,7 +6019,7 @@ zpool_do_attach_or_replace(int argc, char **argv, int replacing)
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:")) != -1) {
while ((c = getopt(argc, argv, "fo:w")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
@ -5970,6 +6037,9 @@ zpool_do_attach_or_replace(int argc, char **argv, int replacing)
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -6053,6 +6123,10 @@ zpool_do_attach_or_replace(int argc, char **argv, int replacing)
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing);
if (ret == 0 && wait)
ret = zpool_wait(zhp,
replacing ? ZPOOL_WAIT_REPLACE : ZPOOL_WAIT_RESILVER);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
@ -6061,9 +6135,11 @@ zpool_do_attach_or_replace(int argc, char **argv, int replacing)
}
/*
* zpool replace [-f] <pool> <device> <new_device>
* zpool replace [-fw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -o Set property=value.
* -w Wait for replacing to complete before returning
*
* Replace <device> with <new_device>.
*/
@ -6075,10 +6151,11 @@ zpool_do_replace(int argc, char **argv)
}
/*
* zpool attach [-f] [-o property=value] <pool> <device> <new_device>
* zpool attach [-fw] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -o Set property=value.
* -w Wait for resilvering to complete before returning
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
@ -6643,8 +6720,6 @@ zpool_do_reopen(int argc, char **argv)
typedef struct scrub_cbdata {
int cb_type;
int cb_argc;
char **cb_argv;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
@ -6701,23 +6776,33 @@ scrub_callback(zpool_handle_t *zhp, void *data)
return (err != 0);
}
static int
wait_callback(zpool_handle_t *zhp, void *data)
{
zpool_wait_activity_t *act = data;
return (zpool_wait(zhp, *act));
}
/*
* zpool scrub [-s | -p] <pool> ...
* zpool scrub [-s | -p] [-w] <pool> ...
*
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
* -w Wait. Blocks until scrub has completed.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
boolean_t wait = B_FALSE;
int error;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "sp")) != -1) {
while ((c = getopt(argc, argv, "spw")) != -1) {
switch (c) {
case 's':
cb.cb_type = POOL_SCAN_NONE;
@ -6725,6 +6810,9 @@ zpool_do_scrub(int argc, char **argv)
case 'p':
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
break;
case 'w':
wait = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
@ -6739,8 +6827,13 @@ zpool_do_scrub(int argc, char **argv)
usage(B_FALSE);
}
cb.cb_argc = argc;
cb.cb_argv = argv;
if (wait && (cb.cb_type == POOL_SCAN_NONE ||
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE)) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-w cannot be used with -p or -s\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
@ -6749,7 +6842,15 @@ zpool_do_scrub(int argc, char **argv)
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
error = for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb);
if (wait && !error) {
zpool_wait_activity_t act = ZPOOL_WAIT_SCRUB;
error = for_each_pool(argc, argv, B_TRUE, NULL, wait_callback,
&act);
}
return (error);
}
/*
@ -6765,8 +6866,6 @@ zpool_do_resilver(int argc, char **argv)
cb.cb_type = POOL_SCAN_RESILVER;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
cb.cb_argc = argc;
cb.cb_argv = argv;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
@ -9215,6 +9314,382 @@ zpool_do_set(int argc, char **argv)
return (error);
}
/* Add up the total number of bytes left to initialize across all vdevs */
static uint64_t
vdev_initialize_remaining(nvlist_t *nv)
{
uint64_t bytes_remaining;
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (vs->vs_initialize_state == VDEV_INITIALIZE_ACTIVE)
bytes_remaining = vs->vs_initialize_bytes_est -
vs->vs_initialize_bytes_done;
else
bytes_remaining = 0;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++)
bytes_remaining += vdev_initialize_remaining(child[c]);
return (bytes_remaining);
}
/* Whether any vdevs are 'spare' or 'replacing' vdevs */
static boolean_t
vdev_any_spare_replacing(nvlist_t *nv)
{
nvlist_t **child;
uint_t c, children;
char *vdev_type;
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &vdev_type);
if (strcmp(vdev_type, VDEV_TYPE_REPLACING) == 0 ||
strcmp(vdev_type, VDEV_TYPE_SPARE) == 0) {
return (B_TRUE);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
for (c = 0; c < children; c++) {
if (vdev_any_spare_replacing(child[c]))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct wait_data {
char *wd_poolname;
boolean_t wd_scripted;
boolean_t wd_exact;
boolean_t wd_headers_once;
/* Which activities to wait for */
boolean_t wd_enabled[ZPOOL_WAIT_NUM_ACTIVITIES];
float wd_interval;
sem_t wd_sem;
} wait_data_t;
/*
* Print to stdout a single line, containing one column for each activity that
* we are waiting for specifying how many bytes of work are left for that
* activity.
*/
static void
print_wait_status_row(wait_data_t *wd, zpool_handle_t *zhp, int row)
{
nvlist_t *config, *nvroot;
uint_t c;
int i;
pool_checkpoint_stat_t *pcs = NULL;
pool_scan_stat_t *pss = NULL;
pool_removal_stat_t *prs = NULL;
char *headers[] = {"DISCARD", "FREE", "INITIALIZE", "REPLACE",
"REMOVE", "RESILVER", "SCRUB"};
int col_widths[ZPOOL_WAIT_NUM_ACTIVITIES];
/* Calculate the width of each column */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
/*
* Make sure we have enough space in the col for pretty-printed
* numbers and for the column header, and then leave a couple
* spaces between cols for readability.
*/
col_widths[i] = MAX(strlen(headers[i]), 6) + 2;
}
/* Print header if appropriate */
int term_height = terminal_height();
boolean_t reprint_header = (!wd->wd_headers_once && term_height > 0 &&
row % (term_height-1) == 0);
if (!wd->wd_scripted && (row == 0 || reprint_header)) {
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
if (wd->wd_enabled[i])
(void) printf("%*s", col_widths[i], headers[i]);
}
(void) printf("\n");
}
/* Bytes of work remaining in each activity */
int64_t bytes_rem[ZPOOL_WAIT_NUM_ACTIVITIES] = {0};
bytes_rem[ZPOOL_WAIT_FREE] =
zpool_get_prop_int(zhp, ZPOOL_PROP_FREEING, NULL);
config = zpool_get_config(zhp, NULL);
nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t **)&pcs, &c);
if (pcs != NULL && pcs->pcs_state == CS_CHECKPOINT_DISCARDING)
bytes_rem[ZPOOL_WAIT_CKPT_DISCARD] = pcs->pcs_space;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
if (prs != NULL && prs->prs_state == DSS_SCANNING)
bytes_rem[ZPOOL_WAIT_REMOVE] = prs->prs_to_copy -
prs->prs_copied;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&pss, &c);
if (pss != NULL && pss->pss_state == DSS_SCANNING &&
pss->pss_pass_scrub_pause == 0) {
int64_t rem = pss->pss_to_examine - pss->pss_issued;
if (pss->pss_func == POOL_SCAN_SCRUB)
bytes_rem[ZPOOL_WAIT_SCRUB] = rem;
else
bytes_rem[ZPOOL_WAIT_RESILVER] = rem;
}
bytes_rem[ZPOOL_WAIT_INITIALIZE] = vdev_initialize_remaining(nvroot);
/*
* A replace finishes after resilvering finishes, so the amount of work
* left for a replace is the same as for resilvering.
*
* It isn't quite correct to say that if we have any 'spare' or
* 'replacing' vdevs and a resilver is happening, then a replace is in
* progress, like we do here. When a hot spare is used, the faulted vdev
* is not removed after the hot spare is resilvered, so parent 'spare'
* vdev is not removed either. So we could have a 'spare' vdev, but be
* resilvering for a different reason. However, we use it as a heuristic
* because we don't have access to the DTLs, which could tell us whether
* or not we have really finished resilvering a hot spare.
*/
if (vdev_any_spare_replacing(nvroot))
bytes_rem[ZPOOL_WAIT_REPLACE] = bytes_rem[ZPOOL_WAIT_RESILVER];
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
char buf[64];
if (!wd->wd_enabled[i])
continue;
if (wd->wd_exact)
(void) snprintf(buf, sizeof (buf), "%" PRIi64,
bytes_rem[i]);
else
zfs_nicenum(bytes_rem[i], buf, sizeof (buf));
if (wd->wd_scripted)
(void) printf(i == 0 ? "%s" : "\t%s", buf);
else
(void) printf(" %*s", col_widths[i] - 1, buf);
}
(void) printf("\n");
(void) fflush(stdout);
}
void *
wait_status_thread(void *arg)
{
wait_data_t *wd = (wait_data_t *)arg;
zpool_handle_t *zhp;
if ((zhp = zpool_open(g_zfs, wd->wd_poolname)) == NULL)
return (void *)(1);
for (int row = 0; ; row++) {
boolean_t missing;
struct timespec timeout;
(void) clock_gettime(CLOCK_REALTIME, &timeout);
if (zpool_refresh_stats(zhp, &missing) != 0 || missing ||
zpool_props_refresh(zhp) != 0) {
zpool_close(zhp);
return (void *)(uintptr_t)(missing ? 0 : 1);
}
print_wait_status_row(wd, zhp, row);
timeout.tv_sec += floor(wd->wd_interval);
long nanos = timeout.tv_nsec +
(wd->wd_interval - floor(wd->wd_interval)) * NANOSEC;
if (nanos >= NANOSEC) {
timeout.tv_sec++;
timeout.tv_nsec = nanos - NANOSEC;
} else {
timeout.tv_nsec = nanos;
}
if (sem_timedwait(&wd->wd_sem, &timeout) == 0) {
break; /* signaled by main thread */
} else if (errno != ETIMEDOUT) {
(void) fprintf(stderr, gettext("sem_timedwait failed: "
"%s\n"), strerror(errno));
zpool_close(zhp);
return (void *)(uintptr_t)(1);
}
}
zpool_close(zhp);
return (void *)(0);
}
int
zpool_do_wait(int argc, char **argv)
{
boolean_t verbose = B_FALSE;
char c;
char *value;
int i;
unsigned long count;
pthread_t status_thr;
int error = 0;
zpool_handle_t *zhp;
wait_data_t wd;
wd.wd_scripted = B_FALSE;
wd.wd_exact = B_FALSE;
wd.wd_headers_once = B_FALSE;
(void) sem_init(&wd.wd_sem, 0, 0);
/* By default, wait for all types of activity. */
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++)
wd.wd_enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "HpT:t:")) != -1) {
switch (c) {
case 'H':
wd.wd_scripted = B_TRUE;
break;
case 'n':
wd.wd_headers_once = B_TRUE;
break;
case 'p':
wd.wd_exact = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 't':
{
static char *col_subopts[] = { "discard", "free",
"initialize", "replace", "remove", "resilver",
"scrub", NULL };
/* Reset activities array */
bzero(&wd.wd_enabled, sizeof (wd.wd_enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);
if (activity < 0) {
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"),
value);
usage(B_FALSE);
}
wd.wd_enabled[activity] = B_TRUE;
}
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &wd.wd_interval, &count);
if (count != 0) {
/* This subcmd only accepts an interval, not a count */
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (wd.wd_interval != 0)
verbose = B_TRUE;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'pool' argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
wd.wd_poolname = argv[0];
if ((zhp = zpool_open(g_zfs, wd.wd_poolname)) == NULL)
return (1);
if (verbose) {
/*
* We use a separate thread for printing status updates because
* the main thread will call lzc_wait(), which blocks as long
* as an activity is in progress, which can be a long time.
*/
if (pthread_create(&status_thr, NULL, wait_status_thread, &wd)
!= 0) {
(void) fprintf(stderr, gettext("failed to create status"
"thread: %s\n"), strerror(errno));
zpool_close(zhp);
return (1);
}
}
/*
* Loop over all activities that we are supposed to wait for until none
* of them are in progress. Note that this means we can end up waiting
* for more activities to complete than just those that were in progress
* when we began waiting; if an activity we are interested in begins
* while we are waiting for another activity, we will wait for both to
* complete before exiting.
*/
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (i = 0; i < ZPOOL_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!wd.wd_enabled[i])
continue;
error = zpool_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zpool_close(zhp);
if (verbose) {
uintptr_t status;
(void) sem_post(&wd.wd_sem);
(void) pthread_join(status_thr, (void *)&status);
if (status != 0)
error = status;
}
(void) sem_destroy(&wd.wd_sem);
return (error);
}
static int
find_command_idx(char *command, int *idx)

View File

@ -279,6 +279,8 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/cli_root/zpool_trim/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/blockfiles/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_wait/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_wait/scan/Makefile
tests/zfs-tests/tests/functional/cli_user/Makefile
tests/zfs-tests/tests/functional/cli_user/misc/Makefile
tests/zfs-tests/tests/functional/cli_user/zfs_list/Makefile

View File

@ -194,6 +194,10 @@ typedef struct zfs_handle zfs_handle_t;
typedef struct zpool_handle zpool_handle_t;
typedef struct libzfs_handle libzfs_handle_t;
extern int zpool_wait(zpool_handle_t *, zpool_wait_activity_t);
extern int zpool_wait_status(zpool_handle_t *, zpool_wait_activity_t,
boolean_t *, boolean_t *);
/*
* Library initialization
*/
@ -275,6 +279,8 @@ typedef struct trimflags {
extern int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
extern int zpool_initialize(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
extern int zpool_initialize_wait(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
extern int zpool_trim(zpool_handle_t *, pool_trim_func_t, nvlist_t *,
trimflags_t *);
@ -317,6 +323,7 @@ extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
size_t proplen, zprop_source_t *, boolean_t literal);
extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
zprop_source_t *);
extern int zpool_props_refresh(zpool_handle_t *);
extern const char *zpool_prop_to_name(zpool_prop_t);
extern const char *zpool_prop_values(zpool_prop_t);

View File

@ -130,6 +130,9 @@ int lzc_reopen(const char *, boolean_t);
int lzc_pool_checkpoint(const char *);
int lzc_pool_checkpoint_discard(const char *);
int lzc_wait(const char *, zpool_wait_activity_t, boolean_t *);
int lzc_wait_tag(const char *, zpool_wait_activity_t, uint64_t, boolean_t *);
#ifdef __cplusplus
}
#endif

View File

@ -1277,6 +1277,7 @@ typedef enum zfs_ioc {
ZFS_IOC_POOL_TRIM, /* 0x5a50 */
ZFS_IOC_REDACT, /* 0x5a51 */
ZFS_IOC_GET_BOOKMARK_PROPS, /* 0x5a52 */
ZFS_IOC_WAIT, /* 0x5a53 */
/*
* Linux - 3/64 numbers reserved.
@ -1340,6 +1341,17 @@ typedef enum {
SPA_LOAD_CREATE /* creation in progress */
} spa_load_state_t;
typedef enum {
ZPOOL_WAIT_CKPT_DISCARD,
ZPOOL_WAIT_FREE,
ZPOOL_WAIT_INITIALIZE,
ZPOOL_WAIT_REPLACE,
ZPOOL_WAIT_REMOVE,
ZPOOL_WAIT_RESILVER,
ZPOOL_WAIT_SCRUB,
ZPOOL_WAIT_NUM_ACTIVITIES
} zpool_wait_activity_t;
/*
* Bookmark name values.
*/
@ -1390,6 +1402,13 @@ typedef enum {
#define ZPOOL_TRIM_RATE "trim_rate"
#define ZPOOL_TRIM_SECURE "trim_secure"
/*
* The following are names used when invoking ZFS_IOC_POOL_WAIT.
*/
#define ZPOOL_WAIT_ACTIVITY "wait_activity"
#define ZPOOL_WAIT_TAG "wait_tag"
#define ZPOOL_WAIT_WAITED "wait_waited"
/*
* Flags for ZFS_IOC_VDEV_SET_STATE
*/

View File

@ -1204,6 +1204,14 @@ extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, nvlist_t *hist_nvl,
const char *name);
/* waiting for pool activities to complete */
extern int spa_wait(const char *pool, zpool_wait_activity_t activity,
boolean_t *waited);
extern int spa_wait_tag(const char *name, zpool_wait_activity_t activity,
uint64_t tag, boolean_t *waited);
extern void spa_notify_waiters(spa_t *spa);
extern void spa_wake_waiters(spa_t *spa);
#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \

View File

@ -413,6 +413,13 @@ struct spa {
uint64_t spa_leaf_list_gen; /* track leaf_list changes */
uint32_t spa_hostid; /* cached system hostid */
/* synchronization for threads in spa_wait */
kmutex_t spa_activities_lock;
kcondvar_t spa_activities_cv;
kcondvar_t spa_waiters_cv;
int spa_waiters; /* number of waiting threads */
boolean_t spa_waiters_cancel; /* waiters should return */
/*
* spa_refcount & spa_config_lock must be the last elements
* because zfs_refcount_t changes size based on compilation options.

View File

@ -85,6 +85,7 @@ extern void vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset,
uint64_t size);
extern void spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev,
uint64_t offset, uint64_t size, dmu_tx_t *tx);
extern boolean_t vdev_replace_in_progress(vdev_t *vdev);
extern void vdev_hold(vdev_t *);
extern void vdev_rele(vdev_t *);

View File

@ -101,7 +101,7 @@ zpool_get_all_props(zpool_handle_t *zhp)
return (0);
}
static int
int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
@ -2158,10 +2158,9 @@ xlate_init_err(int err)
* blocks) for the given vdevs in the given pool.
*/
int
zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds, boolean_t wait)
{
char msg[1024];
int err;
nvlist_t *vdev_guids = fnvlist_alloc();
@ -2173,26 +2172,46 @@ zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &vd_errlist);
if (err == 0) {
err = lzc_initialize(zhp->zpool_name, cmd_type,
vdev_guids, &errlist);
if (err == 0) {
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
return (0);
if (err != 0) {
verify(vd_errlist != NULL);
goto list_errors;
}
err = lzc_initialize(zhp->zpool_name, cmd_type,
vdev_guids, &errlist);
if (err != 0) {
if (errlist != NULL) {
vd_errlist = fnvlist_lookup_nvlist(errlist,
ZPOOL_INITIALIZE_VDEVS);
goto list_errors;
}
(void) snprintf(msg, sizeof (msg),
(void) zpool_standard_error(zhp->zpool_hdl, err,
dgettext(TEXT_DOMAIN, "operation failed"));
} else {
verify(vd_errlist != NULL);
goto out;
}
if (wait) {
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_INITIALIZE, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting for '%s' to initialize"),
nvpair_name(elem));
goto out;
}
}
}
goto out;
list_errors:
for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
elem = nvlist_next_nvpair(vd_errlist, elem)) {
int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
@ -2206,15 +2225,28 @@ zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
"cannot initialize '%s'", path);
}
out:
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
if (vd_errlist != NULL) {
if (vd_errlist != NULL)
fnvlist_free(vd_errlist);
return (-1);
}
return (zpool_standard_error(zhp->zpool_hdl, err, msg));
return (err == 0 ? 0 : -1);
}
int
zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
}
int
zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
}
static int
@ -4782,3 +4814,51 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
return (0);
}
/*
* Wait while the specified activity is in progress in the pool.
*/
int
zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
{
boolean_t missing;
int error = zpool_wait_status(zhp, activity, &missing, NULL);
if (missing) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
return (ENOENT);
} else {
return (error);
}
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent pools are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zpool's wait cmd). In that
* scenario, a pool being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the pool doesn't
* exist.
*/
int
zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait(zhp->zpool_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
}
return (error);
}

View File

@ -1579,3 +1579,39 @@ lzc_redact(const char *snapshot, const char *bookname, nvlist_t *snapnv)
fnvlist_free(args);
return (error);
}
static int
wait_common(const char *pool, zpool_wait_activity_t activity, boolean_t use_tag,
uint64_t tag, boolean_t *waited)
{
nvlist_t *args = fnvlist_alloc();
nvlist_t *result = NULL;
fnvlist_add_int32(args, ZPOOL_WAIT_ACTIVITY, activity);
if (use_tag)
fnvlist_add_uint64(args, ZPOOL_WAIT_TAG, tag);
int error = lzc_ioctl(ZFS_IOC_WAIT, pool, args, &result);
if (error == 0 && waited != NULL)
*waited = fnvlist_lookup_boolean_value(result,
ZPOOL_WAIT_WAITED);
fnvlist_free(args);
fnvlist_free(result);
return (error);
}
int
lzc_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (wait_common(pool, activity, B_FALSE, 0, waited));
}
int
lzc_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (wait_common(pool, activity, B_TRUE, tag, waited));
}

View File

@ -1968,6 +1968,18 @@ Pattern written to vdev free space by \fBzpool initialize\fR.
Default value: \fB16,045,690,984,833,335,022\fR (0xdeadbeefdeadbeee).
.RE
.sp
.ne 2
.na
\fBzfs_initialize_chunk_size\fR (ulong)
.ad
.RS 12n
Size of writes used by \fBzpool initialize\fR.
This option is used by the test suite to facilitate testing.
.sp
Default value: \fB1,048,576\fR
.RE
.sp
.ne 2
.na

View File

@ -27,7 +27,7 @@
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd May 2, 2019
.Dd August 9, 2019
.Dt ZPOOL 8 SMM
.Os Linux
.Sh NAME
@ -43,12 +43,12 @@
.Ar pool vdev Ns ...
.Nm
.Cm attach
.Op Fl f
.Op Fl fw
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool device new_device
.Nm
.Cm checkpoint
.Op Fl d, -discard
.Op Fl d, -discard Oo Fl w, -wait Oc
.Ar pool
.Nm
.Cm clear
@ -117,6 +117,7 @@
.Nm
.Cm initialize
.Op Fl c | Fl s
.Op Fl w
.Ar pool
.Op Ar device Ns ...
.Nm
@ -155,7 +156,7 @@
.Ar pool
.Nm
.Cm remove
.Op Fl np
.Op Fl npw
.Ar pool Ar device Ns ...
.Nm
.Cm remove
@ -163,7 +164,7 @@
.Ar pool
.Nm
.Cm replace
.Op Fl f
.Op Fl fw
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool Ar device Op Ar new_device
.Nm
@ -172,6 +173,7 @@
.Nm
.Cm scrub
.Op Fl s | Fl p
.Op Fl w
.Ar pool Ns ...
.Nm
.Cm trim
@ -211,6 +213,13 @@
.Op Fl V Ar version
.Fl a Ns | Ns Ar pool Ns ...
.Nm
.Cm wait
.Op Fl Hp
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl t Ar activity Ns Oo , Ns Ar activity Ns Oc Ns ...
.Ar pool
.Op Ar interval
.Nm
.Cm version
.Sh DESCRIPTION
The
@ -988,7 +997,7 @@ supported at the moment is ashift.
.It Xo
.Nm
.Cm attach
.Op Fl f
.Op Fl fw
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool device new_device
.Xc
@ -1019,6 +1028,10 @@ Forces use of
.Ar new_device ,
even if it appears to be in use.
Not all devices can be overridden in this manner.
.It Fl w
Waits until
.Ar new_device
has finished resilvering before returning.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties. See the
.Sx Properties
@ -1028,7 +1041,7 @@ supported at the moment is ashift.
.It Xo
.Nm
.Cm checkpoint
.Op Fl d, -discard
.Op Fl d, -discard Oo Fl w, -wait Oc
.Ar pool
.Xc
Checkpoints the current state of
@ -1057,6 +1070,8 @@ command reports how much space the checkpoint takes from the pool.
.It Fl d, -discard
Discards an existing checkpoint from
.Ar pool .
.It Fl w, -wait
Waits until the checkpoint has finished being discarded before returning.
.El
.It Xo
.Nm
@ -1687,6 +1702,7 @@ Will also set -o cachefile=none when not explicitly specified.
.Nm
.Cm initialize
.Op Fl c | Fl s
.Op Fl w
.Ar pool
.Op Ar device Ns ...
.Xc
@ -1708,6 +1724,8 @@ initialized, the command will fail and no suspension will occur on any device.
Initializing can then be resumed by running
.Nm zpool Cm initialize
with no flags on the relevant target devices.
.It Fl w, -wait
Wait until the devices have finished initializing before returning.
.El
.It Xo
.Nm
@ -2049,7 +2067,7 @@ result in partially resilvered devices unless a second scrub is performed.
.It Xo
.Nm
.Cm remove
.Op Fl np
.Op Fl npw
.Ar pool Ar device Ns ...
.Xc
Removes the specified device from the pool.
@ -2091,6 +2109,8 @@ This is nonzero only for top-level vdevs.
Used in conjunction with the
.Fl n
flag, displays numbers as parsable (exact) values.
.It Fl w
Waits until the removal has completed before returning.
.El
.It Xo
.Nm
@ -2102,7 +2122,7 @@ Stops and cancels an in-progress removal of a top-level vdev.
.It Xo
.Nm
.Cm replace
.Op Fl f
.Op Fl fw
.Op Fl o Ar property Ns = Ns Ar value
.Ar pool Ar device Op Ar new_device
.Xc
@ -2144,11 +2164,14 @@ Sets the given pool properties. See the
section for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .
.It Fl w
Waits until the replacement has completed before returning.
.El
.It Xo
.Nm
.Cm scrub
.Op Fl s | Fl p
.Op Fl w
.Ar pool Ns ...
.Xc
Begins a scrub or resumes a paused scrub.
@ -2198,6 +2221,8 @@ checkpointed to disk.
To resume a paused scrub issue
.Nm zpool Cm scrub
again.
.It Fl w
Wait until scrub has completed before returning.
.El
.It Xo
.Nm
@ -2480,6 +2505,64 @@ supported legacy version number.
Displays the software version of the
.Nm
userland utility and the zfs kernel module.
.It Xo
.Nm
.Cm wait
.Op Fl Hp
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl t Ar activity Ns Oo , Ns Ar activity Ns Oc Ns ...
.Ar pool
.Op Ar interval
.Xc
Waits until all background activity of the given types has ceased in the given
pool.
The activity could cease because it has completed, or because it has been
paused or canceled by a user, or because the pool has been exported or
destroyed.
If no activities are specified, the command waits until background activity of
every type listed below has ceased.
If there is no activity of the given types in progress, the command returns
immediately.
.Pp
These are the possible values for
.Ar activity ,
along with what each one waits for:
.Bd -literal
discard Checkpoint to be discarded
free 'freeing' property to become 0
initialize All initializations to cease
replace All device replacements to cease
remove Device removal to cease
resilver Resilver to cease
scrub Scrub to cease
.Ed
.Pp
If an
.Ar interval
is provided, the amount of work remaining, in bytes, for each activity is
printed every
.Ar interval
seconds.
.Bl -tag -width Ds
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary
space.
.It Fl p
Display numbers in parsable (exact) values.
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Sy u
for a printed representation of the internal representation of time.
See
.Xr time 2 .
Specify
.Sy d
for standard date format.
See
.Xr date 1 .
.El
.El
.Sh EXIT STATUS
The following exit values are returned:

View File

@ -203,11 +203,21 @@ bpobj_close(bpobj_t *bpo)
mutex_destroy(&bpo->bpo_lock);
}
static boolean_t
bpobj_is_empty_impl(bpobj_t *bpo)
{
ASSERT(MUTEX_HELD(&bpo->bpo_lock));
return (bpo->bpo_phys->bpo_num_blkptrs == 0 &&
(!bpo->bpo_havesubobj || bpo->bpo_phys->bpo_num_subobjs == 0));
}
boolean_t
bpobj_is_empty(bpobj_t *bpo)
{
return (bpo->bpo_phys->bpo_num_blkptrs == 0 &&
(!bpo->bpo_havesubobj || bpo->bpo_phys->bpo_num_subobjs == 0));
mutex_enter(&bpo->bpo_lock);
boolean_t is_empty = bpobj_is_empty_impl(bpo);
mutex_exit(&bpo->bpo_lock);
return (is_empty);
}
/*
@ -387,7 +397,7 @@ bpobj_iterate_impl(bpobj_t *initial_bpo, bpobj_itor_t func, void *arg,
* If there are no entries, there should
* be no bytes.
*/
if (bpobj_is_empty(bpo)) {
if (bpobj_is_empty_impl(bpo)) {
ASSERT0(bpo->bpo_phys->bpo_bytes);
ASSERT0(bpo->bpo_phys->bpo_comp);
ASSERT0(bpo->bpo_phys->bpo_uncomp);

View File

@ -899,6 +899,8 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
spa_notify_waiters(spa);
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", (u_longlong_t)spa_get_errlog_size(spa));
@ -1038,6 +1040,7 @@ dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
scn->scn_phys_cached.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
spa_notify_waiters(spa);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
@ -3361,6 +3364,8 @@ dsl_process_async_destroys(dsl_pool_t *dp, dmu_tx_t *tx)
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
spa_notify_waiters(spa);
EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ));

View File

@ -1541,6 +1541,8 @@ spa_unload(spa_t *spa)
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If the log space map feature is enabled and the pool is getting
* exported (but not destroyed), we want to spend some time flushing
@ -2470,6 +2472,7 @@ livelist_delete_sync(void *arg, dmu_tx_t *tx)
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
@ -6947,6 +6950,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
@ -9228,6 +9232,279 @@ spa_total_metaslabs(spa_t *spa)
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants is initializing. */
static boolean_t
spa_vdev_initializing_impl(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
boolean_t initializing;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
mutex_exit(&spa->spa_activities_lock);
mutex_enter(&vd->vdev_initialize_lock);
mutex_enter(&spa->spa_activities_lock);
initializing = (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE);
mutex_exit(&vd->vdev_initialize_lock);
if (initializing)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_initializing_impl(vd->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized. Otherwise, it checks whether any vdev in the pool is being
* initialized. The caller must hold the config lock and spa_activities_lock.
*/
static int
spa_vdev_initializing(spa_t *spa, boolean_t use_guid, uint64_t guid,
boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_initializing_impl(vd);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
error = spa_vdev_initializing(spa, use_tag, tag, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' is the only activity that we use this for. The other
* activities can only have a single instance in progress in a pool at
* one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (!in_progress || spa->spa_waiters_cancel || error)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{

View File

@ -191,6 +191,7 @@ spa_checkpoint_discard_complete_sync(void *arg, dmu_tx_t *tx)
spa->spa_checkpoint_info.sci_timestamp = 0;
spa_feature_decr(spa, SPA_FEATURE_POOL_CHECKPOINT, tx);
spa_notify_waiters(spa);
spa_history_log_internal(spa, "spa discard checkpoint", tx,
"finished discarding checkpointed state from the pool");

View File

@ -650,12 +650,15 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
@ -767,6 +770,7 @@ spa_remove(spa_t *spa)
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
nvlist_free(spa->spa_config_splitting);
@ -818,6 +822,8 @@ spa_remove(spa_t *spa)
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
cv_destroy(&spa->spa_activities_cv);
cv_destroy(&spa->spa_waiters_cv);
mutex_destroy(&spa->spa_flushed_ms_lock);
mutex_destroy(&spa->spa_async_lock);
@ -832,6 +838,7 @@ spa_remove(spa_t *spa)
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
mutex_destroy(&spa->spa_activities_lock);
kmem_free(spa, sizeof (spa_t));
}

View File

@ -4764,6 +4764,35 @@ vdev_xlate(vdev_t *vd, const range_seg_t *logical_rs, range_seg_t *physical_rs)
physical_rs->rs_end = intermediate.rs_end;
}
/*
* Look at the vdev tree and determine whether any devices are currently being
* replaced.
*/
boolean_t
vdev_replace_in_progress(vdev_t *vdev)
{
ASSERT(spa_config_held(vdev->vdev_spa, SCL_ALL, RW_READER) != 0);
if (vdev->vdev_ops == &vdev_replacing_ops)
return (B_TRUE);
/*
* A 'spare' vdev indicates that we have a replace in progress, unless
* it has exactly two children, and the second, the hot spare, has
* finished being resilvered.
*/
if (vdev->vdev_ops == &vdev_spare_ops && (vdev->vdev_children > 2 ||
!vdev_dtl_empty(vdev->vdev_child[1], DTL_MISSING)))
return (B_TRUE);
for (int i = 0; i < vdev->vdev_children; i++) {
if (vdev_replace_in_progress(vdev->vdev_child[i]))
return (B_TRUE);
}
return (B_FALSE);
}
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);

View File

@ -46,7 +46,7 @@ unsigned long zfs_initialize_value = 0xdeadbeefdeadbeeeULL;
int zfs_initialize_limit = 1;
/* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
uint64_t zfs_initialize_chunk_size = 1024 * 1024;
unsigned long zfs_initialize_chunk_size = 1024 * 1024;
static boolean_t
vdev_initialize_should_stop(vdev_t *vd)
@ -150,6 +150,9 @@ vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
}
dmu_tx_commit(tx);
if (new_state != VDEV_INITIALIZE_ACTIVE)
spa_notify_waiters(spa);
}
static void
@ -732,4 +735,7 @@ EXPORT_SYMBOL(vdev_initialize_restart);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, ULONG, ZMOD_RW,
"Value written during zpool initialize");
ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, ULONG, ZMOD_RW,
"Size in bytes of writes by zpool initialize");
/* END CSTYLED */

View File

@ -697,6 +697,7 @@ spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
spa_vdev_removal_destroy(svr);
spa_sync_removing_state(spa, tx);
spa_notify_waiters(spa);
vdev_config_dirty(spa->spa_root_vdev);
}

View File

@ -4069,6 +4069,56 @@ zfs_ioc_pool_trim(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
return (total_errors > 0 ? EINVAL : 0);
}
/*
* This ioctl waits for activity of a particular type to complete. If there is
* no activity of that type in progress, it returns immediately, and the
* returned value "waited" is false. If there is activity in progress, and no
* tag is passed in, the ioctl blocks until all activity of that type is
* complete, and then returns with "waited" set to true.
*
* If a tag is provided, it identifies a particular instance of an activity to
* wait for. Currently, this is only valid for use with 'initialize', because
* that is the only activity for which there can be multiple instances running
* concurrently. In the case of 'initialize', the tag corresponds to the guid of
* the vdev on which to wait.
*
* If a thread waiting in the ioctl receives a signal, the call will return
* immediately, and the return value will be EINTR.
*
* innvl: {
* "wait_activity" -> int32_t
* (optional) "wait_tag" -> uint64_t
* }
*
* outnvl: "waited" -> boolean_t
*/
static const zfs_ioc_key_t zfs_keys_pool_wait[] = {
{ZPOOL_WAIT_ACTIVITY, DATA_TYPE_INT32, 0},
{ZPOOL_WAIT_TAG, DATA_TYPE_UINT64, ZK_OPTIONAL},
};
static int
zfs_ioc_wait(const char *name, nvlist_t *innvl, nvlist_t *outnvl)
{
int32_t activity;
uint64_t tag;
boolean_t waited;
int error;
if (nvlist_lookup_int32(innvl, ZPOOL_WAIT_ACTIVITY, &activity) != 0)
return (EINVAL);
if (nvlist_lookup_uint64(innvl, ZPOOL_WAIT_TAG, &tag) == 0)
error = spa_wait_tag(name, activity, tag, &waited);
else
error = spa_wait(name, activity, &waited);
if (error == 0)
fnvlist_add_boolean_value(outnvl, ZPOOL_WAIT_WAITED, waited);
return (error);
}
/*
* fsname is name of dataset to rollback (to most recent snapshot)
*
@ -6894,6 +6944,11 @@ zfs_ioctl_init(void)
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE,
zfs_keys_pool_trim, ARRAY_SIZE(zfs_keys_pool_trim));
zfs_ioctl_register("wait", ZFS_IOC_WAIT,
zfs_ioc_wait, zfs_secpolicy_none, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE,
zfs_keys_pool_wait, ARRAY_SIZE(zfs_keys_pool_wait));
/* IOCTLS that use the legacy function signature */
zfs_ioctl_register_legacy(ZFS_IOC_POOL_FREEZE, zfs_ioc_pool_freeze,

View File

@ -488,6 +488,19 @@ tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_009_neg']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_root/zpool_wait]
tests = ['zpool_wait_discard', 'zpool_wait_freeing',
'zpool_wait_initialize_basic', 'zpool_wait_initialize_cancel',
'zpool_wait_initialize_flag', 'zpool_wait_multiple',
'zpool_wait_no_activity', 'zpool_wait_remove', 'zpool_wait_remove_cancel',
'zpool_wait_usage']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_root/zpool_wait/scan]
tests = ['zpool_wait_replace_cancel', 'zpool_wait_resilver', 'zpool_wait_scrub_cancel',
'zpool_wait_replace', 'zpool_wait_scrub_basic', 'zpool_wait_scrub_flag']
tags = ['functional', 'cli_root', 'zpool_wait']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
@ -503,7 +516,7 @@ tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg']
'arc_summary_001_pos', 'arc_summary_002_neg', 'zpool_wait_privilege']
user =
tags = ['functional', 'cli_user', 'misc']

View File

@ -719,6 +719,21 @@ test_get_bookmark_props(const char *bookmark)
IOC_INPUT_TEST(ZFS_IOC_GET_BOOKMARK_PROPS, bookmark, NULL, NULL, 0);
}
static void
test_wait(const char *pool)
{
nvlist_t *required = fnvlist_alloc();
nvlist_t *optional = fnvlist_alloc();
fnvlist_add_int32(required, "wait_activity", 2);
fnvlist_add_uint64(optional, "wait_tag", 0xdeadbeefdeadbeef);
IOC_INPUT_TEST(ZFS_IOC_WAIT, pool, required, optional, EINVAL);
nvlist_free(required);
nvlist_free(optional);
}
static void
zfs_ioc_input_tests(const char *pool)
{
@ -805,6 +820,8 @@ zfs_ioc_input_tests(const char *pool)
test_vdev_initialize(pool);
test_vdev_trim(pool);
test_wait(pool);
/*
* cleanup
*/
@ -954,6 +971,7 @@ validate_ioc_values(void)
CHECK(ZFS_IOC_BASE + 80 == ZFS_IOC_POOL_TRIM);
CHECK(ZFS_IOC_BASE + 81 == ZFS_IOC_REDACT);
CHECK(ZFS_IOC_BASE + 82 == ZFS_IOC_GET_BOOKMARK_PROPS);
CHECK(ZFS_IOC_BASE + 83 == ZFS_IOC_WAIT);
CHECK(LINUX_IOC_BASE + 1 == ZFS_IOC_EVENTS_NEXT);
CHECK(LINUX_IOC_BASE + 2 == ZFS_IOC_EVENTS_CLEAR);
CHECK(LINUX_IOC_BASE + 3 == ZFS_IOC_EVENTS_SEEK);

View File

@ -2130,7 +2130,7 @@ function check_pool_status # pool token keyword <verbose>
}
#
# These 6 following functions are instance of check_pool_status()
# The following functions are instance of check_pool_status()
# is_pool_resilvering - to check if the pool is resilver in progress
# is_pool_resilvered - to check if the pool is resilver completed
# is_pool_scrubbing - to check if the pool is scrub in progress
@ -2139,6 +2139,7 @@ function check_pool_status # pool token keyword <verbose>
# is_pool_scrub_paused - to check if the pool has scrub paused
# is_pool_removing - to check if the pool is removing a vdev
# is_pool_removed - to check if the pool is remove completed
# is_pool_discarding - to check if the pool has checkpoint being discarded
#
function is_pool_resilvering #pool <verbose>
{
@ -2188,6 +2189,12 @@ function is_pool_removed #pool
return $?
}
function is_pool_discarding #pool
{
check_pool_status "$1" "checkpoint" "discarding"
return $?
}
function wait_for_degraded
{
typeset pool=$1

View File

@ -59,4 +59,5 @@ SUBDIRS = \
zpool_status \
zpool_sync \
zpool_trim \
zpool_upgrade
zpool_upgrade \
zpool_wait

View File

@ -155,21 +155,12 @@ function check_livelist_exists
log_fail "zdb could not find Livelist"
}
# Wait for the deferred destroy livelists to be removed
function wait_for_deferred_destroy
{
sync
deleted=$(zdb -vvvvv $TESTPOOL | grep "Deleted Livelist")
while [[ "$deleted" != "" ]]; do
deleted=$(zdb -vvvvv $TESTPOOL | grep "Deleted Livelist")
done
}
# Check that a livelist has been removed, waiting for deferred destroy entries
# to be cleared from zdb.
function check_livelist_gone
{
wait_for_deferred_destroy
log_must zpool wait -t free $TESTPOOL
zpool sync
zdb -vvvvv $TESTPOOL | grep "Livelist" && \
log_fail "zdb found Livelist after the clone is deleted."
}

View File

@ -176,11 +176,7 @@ function do_testing #<clear type> <vdevs>
dd if=/dev/zero of=$fbase.$i seek=512 bs=1024 count=$wcount conv=notrunc \
> /dev/null 2>&1
log_must sync
log_must zpool scrub $TESTPOOL1
# Wait for the completion of scrub operation
while is_pool_scrubbing $TESTPOOL1; do
sleep 1
done
log_must zpool scrub -w $TESTPOOL1
check_err $TESTPOOL1 && \
log_fail "No error generated."

View File

@ -73,8 +73,6 @@ log_must is_pool_resilvering $TESTPOOL
log_mustnot zpool scrub $TESTPOOL
log_must set_tunable32 zfs_scan_suspend_progress 0
while ! is_pool_resilvered $TESTPOOL; do
sleep 1
done
log_must zpool wait -t resilver $TESTPOOL
log_pass "Resilver prevent scrub from starting until the resilver completes"

View File

@ -48,18 +48,10 @@ log_assert "When scrubbing, detach device should not break system."
log_must zpool scrub $TESTPOOL
log_must zpool detach $TESTPOOL $DISK2
log_must zpool attach $TESTPOOL $DISK1 $DISK2
while ! is_pool_resilvered $TESTPOOL; do
sleep 1
done
log_must zpool attach -w $TESTPOOL $DISK1 $DISK2
log_must zpool scrub $TESTPOOL
log_must zpool detach $TESTPOOL $DISK1
log_must zpool attach $TESTPOOL $DISK2 $DISK1
while ! is_pool_resilvered $TESTPOOL; do
sleep 1
done
log_must zpool attach -w $TESTPOOL $DISK2 $DISK1
log_pass "When scrubbing, detach device should not break system."

View File

@ -58,11 +58,7 @@ done
log_must zfs unmount $TESTPOOL/$TESTFS2
log_must zfs unload-key $TESTPOOL/$TESTFS2
log_must zpool scrub $TESTPOOL
while ! is_pool_scrubbed $TESTPOOL; do
sleep 1
done
log_must zpool scrub -w $TESTPOOL
log_must check_pool_status $TESTPOOL "scan" "with 0 errors"

View File

@ -0,0 +1,19 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_wait
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
zpool_wait_discard.ksh \
zpool_wait_freeing.ksh \
zpool_wait_initialize_basic.ksh \
zpool_wait_initialize_cancel.ksh \
zpool_wait_initialize_flag.ksh \
zpool_wait_multiple.ksh \
zpool_wait_no_activity.ksh \
zpool_wait_remove.ksh \
zpool_wait_remove_cancel.ksh \
zpool_wait_usage.ksh
dist_pkgdata_DATA = \
zpool_wait.kshlib
SUBDIRS = scan

View File

@ -0,0 +1,20 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
default_cleanup

View File

@ -0,0 +1,10 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_wait/scan
dist_pkgdata_SCRIPTS = \
setup.ksh \
cleanup.ksh \
zpool_wait_replace.ksh \
zpool_wait_replace_cancel.ksh \
zpool_wait_resilver.ksh \
zpool_wait_scrub_basic.ksh \
zpool_wait_scrub_cancel.ksh \
zpool_wait_scrub_flag.ksh

View File

@ -0,0 +1,20 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
default_cleanup

View File

@ -0,0 +1,32 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
verify_runnable "global"
verify_disk_count $DISKS 3
#
# Set up a pool for use in the tests that do scrubbing and resilvering. Each
# test leaves the pool in the same state as when it started, so it is safe to
# share the same setup.
#
log_must zpool create -f $TESTPOOL $DISK1
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=1k count=256k
log_pass

View File

@ -0,0 +1,71 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when a replacing disks.
#
# STRATEGY:
# 1. Attach a disk to pool to form two-way mirror.
# 2. Start a replacement of the new disk.
# 3. Start 'zpool wait'.
# 4. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
# 5. Repeat 2-4, except using the '-w' flag with 'zpool replace' instead of
# using 'zpool wait'.
#
function cleanup
{
remove_io_delay
kill_if_running $pid
get_disklist $TESTPOOL | grep $DISK2 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK2
get_disklist $TESTPOOL | grep $DISK3 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK3
}
function in_progress
{
zpool status $TESTPOOL | grep 'replacing-' >/dev/null
}
typeset pid
log_onexit cleanup
log_must zpool attach -w $TESTPOOL $DISK1 $DISK2
add_io_delay $TESTPOOL
# Test 'zpool wait -t replace'
log_must zpool replace $TESTPOOL $DISK2 $DISK3
log_bkgrnd zpool wait -t replace $TESTPOOL
pid=$!
check_while_waiting $pid in_progress
# Test 'zpool replace -w'
log_bkgrnd zpool replace -w $TESTPOOL $DISK3 $DISK2
pid=$!
while ! is_pool_resilvering $TESTPOOL && proc_exists $pid; do
log_must sleep .5
done
check_while_waiting $pid in_progress
log_pass "'zpool wait -t replace' and 'zpool replace -w' work."

View File

@ -0,0 +1,64 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when a replacing disk is detached before the replacement
# completes.
#
# STRATEGY:
# 1. Attach a disk to pool to form two-way mirror.
# 2. Modify tunable so that resilver won't complete while test is running.
# 3. Start a replacement of the new disk.
# 4. Start a process that waits for the replace.
# 5. Wait a few seconds and then check that the wait process is actually
# waiting.
# 6. Cancel the replacement by detaching the replacing disk.
# 7. Check that the wait process returns reasonably promptly.
#
function cleanup
{
log_must set_tunable32 zfs_scan_suspend_progress 0
kill_if_running $pid
get_disklist $TESTPOOL | grep $DISK2 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK2
get_disklist $TESTPOOL | grep $DISK3 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK3
}
typeset pid
log_onexit cleanup
log_must zpool attach -w $TESTPOOL $DISK1 $DISK2
log_must set_tunable32 zfs_scan_suspend_progress 1
log_must zpool replace $TESTPOOL $DISK2 $DISK3
log_bkgrnd zpool wait -t replace $TESTPOOL
pid=$!
log_must sleep 3
proc_must_exist $pid
log_must zpool detach $TESTPOOL $DISK3
bkgrnd_proc_succeeded $pid
log_pass "'zpool wait -t replace' returns when replacing disk is detached."

View File

@ -0,0 +1,64 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for resilvering to complete.
#
# STRATEGY:
# 1. Attach a device to the pool so that resilvering starts.
# 2. Start 'zpool wait'.
# 3. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
# 4. Repeat 1-3, except using the '-w' flag with 'zpool attach' instead of using
# 'zpool wait'.
#
function cleanup
{
remove_io_delay
kill_if_running $pid
get_disklist $TESTPOOL | grep $DISK2 >/dev/null && \
log_must zpool detach $TESTPOOL $DISK2
}
typeset -r IN_PROGRESS_CHECK="is_pool_resilvering $TESTPOOL"
typeset pid
log_onexit cleanup
add_io_delay $TESTPOOL
# Test 'zpool wait -t resilver'
log_must zpool attach $TESTPOOL $DISK1 $DISK2
log_bkgrnd zpool wait -t resilver $TESTPOOL
pid=$!
check_while_waiting $pid "$IN_PROGRESS_CHECK"
log_must zpool detach $TESTPOOL $DISK2
# Test 'zpool attach -w'
log_bkgrnd zpool attach -w $TESTPOOL $DISK1 $DISK2
pid=$!
while ! is_pool_resilvering $TESTPOOL && proc_exists $pid; do
log_must sleep .5
done
check_while_waiting $pid "$IN_PROGRESS_CHECK"
log_pass "'zpool wait -t resilver' and 'zpool attach -w' work."

View File

@ -0,0 +1,49 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for a scrub to complete.
#
# STRATEGY:
# 1. Start a scrub.
# 2. Start 'zpool wait -t scrub'.
# 3. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
#
function cleanup
{
remove_io_delay
kill_if_running $pid
}
typeset pid
log_onexit cleanup
# Slow down scrub so that we actually have something to wait for.
add_io_delay $TESTPOOL
log_must zpool scrub $TESTPOOL
log_bkgrnd zpool wait -t scrub $TESTPOOL
pid=$!
check_while_waiting $pid "is_pool_scrubbing $TESTPOOL"
log_pass "'zpool wait -t scrub' works."

View File

@ -0,0 +1,66 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when a scrub is paused or canceled.
#
# STRATEGY:
# 1. Modify tunable so that scrubs won't complete while test is running.
# 2. Start a scrub.
# 3. Start a process that waits for the scrub.
# 4. Wait a few seconds and then check that the wait process is actually
# waiting.
# 5. Pause the scrub.
# 6. Check that the wait process returns reasonably promptly.
# 7. Repeat 2-6, except stop the scrub instead of pausing it.
#
function cleanup
{
log_must set_tunable32 zfs_scan_suspend_progress 0
kill_if_running $pid
is_pool_scrubbing $TESTPOOL && log_must zpool scrub -s $TESTPOOL
}
function do_test
{
typeset stop_cmd=$1
log_must zpool scrub $TESTPOOL
log_bkgrnd zpool wait -t scrub $TESTPOOL
pid=$!
log_must sleep 3
proc_must_exist $pid
log_must eval "$stop_cmd"
bkgrnd_proc_succeeded $pid
}
typeset pid
log_onexit cleanup
log_must set_tunable32 zfs_scan_suspend_progress 1
do_test "zpool scrub -p $TESTPOOL"
do_test "zpool scrub -s $TESTPOOL"
log_pass "'zpool wait -t scrub' works when scrub is canceled."

View File

@ -0,0 +1,52 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool scrub -w' waits while scrub is in progress.
#
# STRATEGY:
# 1. Start a scrub with the -w flag.
# 2. Wait a few seconds and then check that the wait process is actually
# waiting.
# 3. Stop the scrub, make sure that the command returns reasonably promptly.
#
function cleanup
{
log_must set_tunable32 zfs_scan_suspend_progress 0
kill_if_running $pid
}
typeset pid
log_onexit cleanup
log_must set_tunable32 zfs_scan_suspend_progress 1
log_bkgrnd zpool scrub -w $TESTPOOL
pid=$!
log_must sleep 3
proc_must_exist $pid
log_must zpool scrub -s $TESTPOOL
bkgrnd_proc_succeeded $pid
log_pass "'zpool scrub -w' works."

View File

@ -0,0 +1,23 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
verify_runnable "global"
verify_disk_count $DISKS 3
log_pass

View File

@ -0,0 +1,124 @@
#!/bin/ksh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
typeset -a disk_array=($(find_disks $DISKS))
typeset -r DISK1=${disk_array[0]}
typeset -r DISK2=${disk_array[1]}
typeset -r DISK3=${disk_array[2]}
#
# When the condition it is waiting for becomes true, 'zpool wait' should return
# promptly. We want to enforce this, but any check will be racey because it will
# take some small but indeterminate amount of time for the waiting thread to be
# woken up and for the process to exit.
#
# To deal with this, we provide a grace period after the condition becomes true
# during which 'zpool wait' can exit. If it hasn't exited by the time the grace
# period expires we assume something is wrong and fail the test. While there is
# no value that can really be correct, the idea is we choose something large
# enough that it shouldn't cause issues in practice.
#
typeset -r WAIT_EXIT_GRACE=2.0
function add_io_delay # pool
{
for disk in $(get_disklist $1); do
log_must zinject -d $disk -D20:1 $1
done
}
function remove_io_delay
{
log_must zinject -c all
}
function proc_exists # pid
{
ps -p $1 >/dev/null
}
function proc_must_exist # pid
{
proc_exists $1 || log_fail "zpool process exited too soon"
}
function proc_must_not_exist # pid
{
proc_exists $1 && log_fail "zpool process took too long to exit"
}
function get_time
{
date +'%H:%M:%S'
}
function kill_if_running
{
typeset pid=$1
[[ $pid ]] && proc_exists $pid && log_must kill -s TERM $pid
}
# Log a command and then start it running in the background
function log_bkgrnd
{
log_note "$(get_time) Starting cmd in background '$@'"
"$@" &
}
# Check that a background process has completed and exited with a status of 0
function bkgrnd_proc_succeeded
{
typeset pid=$1
log_must sleep $WAIT_EXIT_GRACE
proc_must_not_exist $pid
wait $pid || log_fail "zpool process exited with status $?"
log_note "$(get_time) wait completed successfully"
}
#
# Check that 'zpool wait' returns reasonably promptly after the condition
# waited for becomes true, and not before.
#
function check_while_waiting
{
# The pid of the waiting process
typeset wait_proc_pid=$1
# A check that should be true while the activity is in progress
typeset activity_check=$2
log_note "$(get_time) waiting for process $wait_proc_pid using" \
"activity check '$activity_check'"
while proc_exists $wait_proc_pid && eval "$activity_check"; do
log_must sleep .5
done
#
# If the activity being waited on is still in progress, then zpool wait
# exited too soon.
#
log_mustnot eval "$activity_check"
bkgrnd_proc_succeeded $wait_proc_pid
}
# Whether any vdev in the given pool is initializing
function is_vdev_initializing # pool
{
zpool status -i "$1" | grep 'initialized, started' >/dev/null
}

View File

@ -0,0 +1,87 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for checkpoint discard to complete.
#
# STRATEGY:
# 1. Create a pool.
# 2. Add some data to the pool.
# 3. Checkpoint the pool and delete the data so that the space is unique to the
# checkpoint.
# 4. Discard the checkpoint using the '-w' flag.
# 5. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
# 6. Repeat 2-5, but using 'zpool wait' instead of the '-w' flag.
#
function cleanup
{
log_must zinject -c all
poolexists $TESTPOOL && destroy_pool $TESTPOOL
kill_if_running $pid
[[ $default_mem_limit ]] && log_must set_tunable64 \
zfs_spa_discard_memory_limit $default_mem_limit
}
function do_test
{
typeset use_wait_flag=$1
log_must dd if=/dev/urandom of="$TESTFILE" bs=128k count=1k
log_must zpool checkpoint $TESTPOOL
# Make sure bulk of space is unique to checkpoint
log_must rm "$TESTFILE"
log_must zinject -d $DISK1 -D20:1 $TESTPOOL
if $use_wait_flag; then
log_bkgrnd zpool checkpoint -dw $TESTPOOL
pid=$!
while ! is_pool_discarding $TESTPOOL && proc_exists $pid; do
log_must sleep .5
done
else
log_must zpool checkpoint -d $TESTPOOL
log_bkgrnd zpool wait -t discard $TESTPOOL
pid=$!
fi
check_while_waiting $pid "is_pool_discarding $TESTPOOL"
log_must zinject -c all
}
typeset -r TESTFILE="/$TESTPOOL/testfile"
typeset pid default_mem_limit
log_onexit cleanup
default_mem_limit=$(get_tunable zfs_spa_discard_memory_limit)
log_must set_tunable64 zfs_spa_discard_memory_limit 32
log_must zpool create $TESTPOOL $DISK1
do_test true
do_test false
log_pass "'zpool wait -t discard' and 'zpool checkpoint -dw' work."

View File

@ -0,0 +1,112 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for background freeing to complete.
#
# STRATEGY:
# 1. Create a pool.
# 2. Modify tunables to make sure freeing is slow enough to observe.
# 3. Create a file system with some data.
# 4. Destroy the file system and call 'zpool wait'.
# 5. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
# 6. Repeat 3-5, except destroy a snapshot instead of a filesystem.
# 7. Repeat 3-5, except destroy a clone.
#
function cleanup
{
log_must set_tunable64 zfs_async_block_max_blocks $default_async_block_max_blocks
log_must set_tunable64 zfs_livelist_max_entries $default_max_livelist_entries
log_must set_tunable64 zfs_livelist_min_percent_shared $default_min_pct_shared
poolexists $TESTPOOL && destroy_pool $TESTPOOL
kill_if_running $pid
}
function test_wait
{
log_bkgrnd zpool wait -t free $TESTPOOL
pid=$!
check_while_waiting $pid '[[ $(get_pool_prop freeing $TESTPOOL) != "0" ]]'
}
typeset -r FS="$TESTPOOL/$TESTFS1"
typeset -r SNAP="$FS@snap1"
typeset -r CLONE="$TESTPOOL/clone"
typeset pid default_max_livelist_entries default_min_pct_shared
typeset default_async_block_max_blocks
log_onexit cleanup
log_must zpool create $TESTPOOL $DISK1
#
# Limit the number of blocks that can be freed in a single txg. This slows down
# freeing so that we actually have something to wait for.
#
default_async_block_max_blocks=$(get_tunable zfs_async_block_max_blocks)
log_must set_tunable64 zfs_async_block_max_blocks 8
#
# Space from clones gets freed one livelist per txg instead of being controlled
# by zfs_async_block_max_blocks. Limit the rate at which space is freed by
# limiting the size of livelists so that we end up with a number of them.
#
default_max_livelist_entries=$(get_tunable zfs_livelist_max_entries)
log_must set_tunable64 zfs_livelist_max_entries 16
# Don't disable livelists, no matter how much clone diverges from snapshot
default_min_pct_shared=$(get_tunable zfs_livelist_min_percent_shared)
log_must set_tunable64 zfs_livelist_min_percent_shared -1
#
# Test waiting for space from destroyed filesystem to be freed
#
log_must zfs create "$FS"
log_must dd if=/dev/zero of="/$FS/testfile" bs=1M count=128
log_must zfs destroy "$FS"
test_wait
#
# Test waiting for space from destroyed snapshot to be freed
#
log_must zfs create "$FS"
log_must dd if=/dev/zero of="/$FS/testfile" bs=1M count=128
log_must zfs snapshot "$SNAP"
# Make sure bulk of space is unique to snapshot
log_must rm "/$FS/testfile"
log_must zfs destroy "$SNAP"
test_wait
#
# Test waiting for space from destroyed clone to be freed
#
log_must zfs snapshot "$SNAP"
log_must zfs clone "$SNAP" "$CLONE"
# Add some data to the clone
for i in {1..50}; do
log_must dd if=/dev/urandom of="/$CLONE/testfile$i" bs=1k count=512
# Force each new file to be tracked by a new livelist
log_must zpool sync $TESTPOOL
done
log_must zfs destroy "$CLONE"
test_wait
log_pass "'zpool wait -t freeing' works."

View File

@ -0,0 +1,63 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for devices to complete initializing
#
# STRATEGY:
# 1. Create a pool.
# 2. Modify a tunable to make sure initializing is slow enough to observe.
# 3. Start initializing the vdev in the pool.
# 4. Start 'zpool wait'.
# 5. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
#
function cleanup
{
kill_if_running $pid
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ -d "$TESTDIR" ]] && log_must rm -r "$TESTDIR"
[[ "$default_chunk_sz" ]] && \
log_must set_tunable64 zfs_initialize_chunk_size $default_chunk_sz
}
typeset -r FILE_VDEV="$TESTDIR/file_vdev"
typeset pid default_chunk_sz
log_onexit cleanup
default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
log_must set_tunable64 zfs_initialize_chunk_size 2048
log_must mkdir "$TESTDIR"
log_must mkfile 256M "$FILE_VDEV"
log_must zpool create -f $TESTPOOL "$FILE_VDEV"
log_must zpool initialize $TESTPOOL "$FILE_VDEV"
log_bkgrnd zpool wait -t initialize $TESTPOOL
pid=$!
check_while_waiting $pid "is_vdev_initializing $TESTPOOL"
log_pass "'zpool wait -t initialize' works."

View File

@ -0,0 +1,77 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when an initialization operation is canceled.
#
# STRATEGY:
# 1. Create a pool.
# 2. Modify a tunable to make sure initializing is slow enough that it won't
# complete before the test finishes.
# 3. Start initializing the vdev in the pool.
# 4. Start 'zpool wait'.
# 5. Wait a few seconds and then check that the wait process is actually
# waiting.
# 6. Cancel the initialization of the device.
# 7. Check that the wait process returns reasonably promptly.
# 8. Repeat 3-7, except pause the initialization instead of canceling it.
#
function cleanup
{
kill_if_running $pid
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ "$default_chunk_sz" ]] &&
log_must set_tunable64 zfs_initialize_chunk_size $default_chunk_sz
}
function do_test
{
typeset stop_cmd=$1
log_must zpool initialize $TESTPOOL $DISK1
log_bkgrnd zpool wait -t initialize $TESTPOOL
pid=$!
# Make sure that we are really waiting
log_must sleep 3
proc_must_exist $pid
# Stop initialization and make sure process returns
log_must eval "$stop_cmd"
bkgrnd_proc_succeeded $pid
}
typeset pid default_chunk_sz
log_onexit cleanup
# Make sure the initialization takes a while
default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
log_must set_tunable64 zfs_initialize_chunk_size 512
log_must zpool create $TESTPOOL $DISK1
do_test "zpool initialize -c $TESTPOOL $DISK1"
do_test "zpool initialize -s $TESTPOOL $DISK1"
log_pass "'zpool wait' works when initialization is stopped before completion."

View File

@ -0,0 +1,88 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# -w flag for 'zpool initialize' waits for the completion of all and only those
# initializations kicked off by that invocation.
#
# STRATEGY:
# 1. Create a pool with 3 disks.
# 2. Start initializing disks 1 and 2 with one invocation of
# 'zpool initialize -w'
# 3. Start initializing disk 3 with a second invocation of 'zpool initialize -w'
# 4. Cancel the initialization of disk 1. Check that neither waiting process
# exits.
# 5. Cancel the initialization of disk 3. Check that only the second waiting
# process exits.
# 6. Cancel the initialization of disk 2. Check that the first waiting process
# exits.
#
function cleanup
{
kill_if_running $init12_pid
kill_if_running $init3_pid
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ "$default_chunk_sz" ]] &&
log_must set_tunable64 zfs_initialize_chunk_size $default_chunk_sz
}
typeset init12_pid init3_pid default_chunk_sz
log_onexit cleanup
log_must zpool create -f $TESTPOOL $DISK1 $DISK2 $DISK3
# Make sure the initialization takes a while
default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
log_must set_tunable64 zfs_initialize_chunk_size 512
log_bkgrnd zpool initialize -w $TESTPOOL $DISK1 $DISK2
init12_pid=$!
log_bkgrnd zpool initialize -w $TESTPOOL $DISK3
init3_pid=$!
# Make sure that we are really waiting
log_must sleep 3
proc_must_exist $init12_pid
proc_must_exist $init3_pid
#
# Cancel initialization of one of disks started by init12, make sure neither
# process exits
#
log_must zpool initialize -c $TESTPOOL $DISK1
proc_must_exist $init12_pid
proc_must_exist $init3_pid
#
# Cancel initialization started by init3, make sure that process exits, but
# init12 doesn't
#
log_must zpool initialize -c $TESTPOOL $DISK3
proc_must_exist $init12_pid
bkgrnd_proc_succeeded $init3_pid
# Cancel last initialization started by init12, make sure it returns.
log_must zpool initialize -c $TESTPOOL $DISK2
bkgrnd_proc_succeeded $init12_pid
log_pass "'zpool initialize -w' works."

View File

@ -0,0 +1,83 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for mulitple activities.
#
# STRATEGY:
# 1. Create a pool with some data.
# 2. Alterate running two different activities (scrub and initialize),
# making sure that they overlap such that one of the two is always
# running.
# 3. Wait for both activities with a single invocation of zpool wait.
# 4. Check that zpool wait doesn't return until both activities have
# stopped.
#
function cleanup
{
kill_if_running $pid
poolexists $TESTPOOL && destroy_pool $TESTPOOL
[[ "$default_chunk_sz" ]] && log_must set_tunable64 \
zfs_initialize_chunk_size $default_chunk_sz
log_must set_tunable32 zfs_scan_suspend_progress 0
}
typeset pid default_chunk_sz
log_onexit cleanup
log_must zpool create -f $TESTPOOL $DISK1
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=64k count=1k
default_chunk_sz=$(get_tunable zfs_initialize_chunk_size)
log_must set_tunable64 zfs_initialize_chunk_size 512
log_must set_tunable32 zfs_scan_suspend_progress 1
log_must zpool scrub $TESTPOOL
log_bkgrnd zpool wait -t scrub,initialize $TESTPOOL
pid=$!
log_must sleep 2
log_must zpool initialize $TESTPOOL $DISK1
log_must zpool scrub -s $TESTPOOL
log_must sleep 2
log_must zpool scrub $TESTPOOL
log_must zpool initialize -s $TESTPOOL $DISK1
log_must sleep 2
log_must zpool initialize $TESTPOOL $DISK1
log_must zpool scrub -s $TESTPOOL
log_must sleep 2
proc_must_exist $pid
# Cancel last activity, zpool wait should return
log_must zpool initialize -s $TESTPOOL $DISK1
bkgrnd_proc_succeeded $pid
log_pass "'zpool wait' works when waiting for mutliple activities."

View File

@ -0,0 +1,52 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' returns immediately when there is no activity in progress.
#
# STRATEGY:
# 1. Create an empty pool with no activity
# 2. Run zpool wait with various acitivies, make sure it always returns
# promptly
#
function cleanup {
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
typeset -r TIMEOUT_SECS=1
log_onexit cleanup
log_must zpool create $TESTPOOL $DISK1
# Wait for each activity
typeset activities=(free discard initialize replace remove resilver scrub)
for activity in ${activities[@]}; do
log_must timeout $TIMEOUT_SECS zpool wait -t $activity $TESTPOOL
done
# Wait for multiple activities at the same time
log_must timeout $TIMEOUT_SECS zpool wait -t scrub,initialize $TESTPOOL
log_must timeout $TIMEOUT_SECS zpool wait -t free,remove,discard $TESTPOOL
# Wait for all activities at the same time
log_must timeout $TIMEOUT_SECS zpool wait $TESTPOOL
log_pass "'zpool wait' returns immediately when no activity is in progress."

View File

@ -0,0 +1,85 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when waiting for a device to be removed.
#
# STRATEGY:
# 1. Create a pool with two disks and some data.
# 2. Modify a tunable to make sure removal doesn't make any progress.
# 3. Start removing one of the disks.
# 4. Start 'zpool wait'.
# 5. Sleep for a few seconds and check that the process is actually waiting.
# 6. Modify tunable to allow removal to complete.
# 7. Monitor the waiting process to make sure it returns neither too soon nor
# too late.
# 8. Repeat 1-7, except using the '-w' flag for 'zpool remove' instead of using
# 'zpool wait'.
#
function cleanup
{
kill_if_running $pid
log_must set_tunable32 zfs_removal_suspend_progress 0
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
function do_test
{
typeset use_flag=$1
log_must zpool create -f $TESTPOOL $DISK1 $DISK2
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=1k count=16k
# Start removal, but don't allow it to make any progress at first
log_must set_tunable32 zfs_removal_suspend_progress 1
if $use_flag; then
log_bkgrnd zpool remove -w $TESTPOOL $DISK1
pid=$!
while ! is_pool_removing $TESTPOOL && proc_exists $pid; do
log_must sleep .5
done
else
log_must zpool remove $TESTPOOL $DISK1
log_bkgrnd zpool wait -t remove $TESTPOOL
pid=$!
fi
# Make sure the 'zpool wait' is actually waiting
log_must sleep 3
proc_must_exist $pid
# Unpause removal, and wait for it to finish
log_must set_tunable32 zfs_removal_suspend_progress 0
check_while_waiting $pid "is_pool_removing $TESTPOOL"
log_must zpool destroy $TESTPOOL
}
log_onexit cleanup
typeset pid
do_test true
do_test false
log_pass "'zpool wait -t remove' and 'zpool remove -w' work."

View File

@ -0,0 +1,62 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' works when device removal is canceled.
#
# STRATEGY:
# 1. Create a pool with two disks and some data.
# 2. Modify a tunable to make sure removal won't complete while test is running.
# 3. Start removing one of the disks.
# 4. Start 'zpool wait'.
# 5. Sleep for a few seconds and check that the process is actually waiting.
# 6. Cancel the removal of the device.
# 7. Check that the wait process returns reasonably promptly.
#
function cleanup
{
kill_if_running $pid
log_must set_tunable32 zfs_removal_suspend_progress 0
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
typeset pid
log_must zpool create -f $TESTPOOL $DISK1 $DISK2
log_must dd if=/dev/urandom of="/$TESTPOOL/testfile" bs=1k count=16k
# Start removal, but don't allow it to make any progress
log_must set_tunable32 zfs_removal_suspend_progress 1
log_must zpool remove $TESTPOOL $DISK1
log_bkgrnd zpool wait -t remove $TESTPOOL
pid=$!
log_must sleep 3
proc_must_exist $pid
log_must zpool remove -s $TESTPOOL
bkgrnd_proc_succeeded $pid
log_pass "'zpool wait -t remove' works when removal is canceled."

View File

@ -0,0 +1,47 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zpool_wait/zpool_wait.kshlib
#
# DESCRIPTION:
# 'zpool wait' behaves sensibly when invoked incorrectly.
#
# STRATEGY:
# 1. Invoke 'zpool wait' incorrectly and check that it exits with a non-zero
# status.
# 2. Invoke 'zpool wait' with missing or bad arguments and check that it prints
# some sensible error message.
#
function cleanup {
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_must zpool create $TESTPOOL $DISK1
log_mustnot zpool wait
zpool wait 2>&1 | grep -i usage || \
log_fail "Usage message did not contain the word 'usage'."
zpool wait -t scrub fakepool 2>&1 | grep -i 'no such pool' || \
log_fail "Error message did not contain phrase 'no such pool'."
zpool wait -t foo $TESTPOOL 2>&1 | grep -i 'invalid activity' || \
log_fail "Error message did not contain phrase 'invalid activity'."
log_pass "'zpool wait' behaves sensibly when invoked incorrectly."

View File

@ -45,7 +45,8 @@ dist_pkgdata_SCRIPTS = \
zpool_upgrade_001_neg.ksh \
arcstat_001_pos.ksh \
arc_summary_001_pos.ksh \
arc_summary_002_neg.ksh
arc_summary_002_neg.ksh \
zpool_wait_privilege.ksh
dist_pkgdata_DATA = \
misc.cfg

View File

@ -0,0 +1,35 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2019 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
#
# zpool wait works when run as an unprivileged user
#
verify_runnable "global"
log_must zpool wait $TESTPOOL
# Make sure printing status works as unprivileged user.
output=$(zpool wait -H $TESTPOOL 1) || \
log_fail "'zpool wait -H $TESTPOOL 1' failed"
# There should be one line of status output in a pool with no activity.
log_must eval '[[ $(wc -l <<<$output) -ge 1 ]]'
log_pass "zpool wait works when run as a user"

View File

@ -81,9 +81,7 @@ log_must truncate -s 0 $ZED_DEBUG_LOG
# 4. Generate additional events.
log_must zpool offline $MPOOL $VDEV1
log_must zpool online $MPOOL $VDEV1
while ! is_pool_resilvered $MPOOL; do
sleep 1
done
log_must zpool wait -t resilver $MPOOL
log_must zpool scrub $MPOOL

View File

@ -90,10 +90,7 @@ while [[ $i -lt ${#disks[*]} ]]; do
log_must zpool online $TESTPOOL ${disks[$i]}
check_state $TESTPOOL ${disks[$i]} "online" || \
log_fail "Failed to set ${disks[$i]} online"
# Delay for resilver to complete
while ! is_pool_resilvered $TESTPOOL; do
log_must sleep 1
done
log_must zpool wait -t resilver $TESTPOOL
log_must zpool clear $TESTPOOL
while [[ $j -lt ${#disks[*]} ]]; do
if [[ $j -eq $i ]]; then
@ -125,10 +122,7 @@ while [[ $i -lt ${#disks[*]} ]]; do
log_must zpool online $TESTPOOL ${disks[$i]}
check_state $TESTPOOL ${disks[$i]} "online" || \
log_fail "Failed to set ${disks[$i]} online"
# Delay for resilver to complete
while ! is_pool_resilvered $TESTPOOL; do
log_must sleep 1
done
log_must zpool wait -t resilver $TESTPOOL
log_must zpool clear $TESTPOOL
fi
((i++))

View File

@ -229,14 +229,7 @@ function replace_missing_devs
log_must gnudd if=/dev/zero of=$vdev \
bs=1024k count=$(($MINDEVSIZE / (1024 * 1024))) \
oflag=fdatasync
log_must zpool replace -f $pool $vdev $vdev
while true; do
if ! is_pool_resilvered $pool ; then
log_must sleep 2
else
break
fi
done
log_must zpool replace -wf $pool $vdev $vdev
done
}

View File

@ -54,12 +54,7 @@ typeset -i cnt=$(random 2 5)
setup_test_env $TESTPOOL "" $cnt
damage_devs $TESTPOOL 1 "keep_label"
log_must zpool scrub $TESTPOOL
# Wait for the scrub to wrap, or is_healthy will be wrong.
while ! is_pool_scrubbed $TESTPOOL; do
sleep 1
done
log_must zpool scrub -w $TESTPOOL
log_mustnot is_healthy $TESTPOOL

View File

@ -28,9 +28,7 @@ function wait_for_removal # pool
typeset pool=$1
typeset callback=$2
while is_pool_removing $pool; do
sleep 1
done
log_must zpool wait -t remove $pool
#
# The pool state changes before the TXG finishes syncing; wait for

View File

@ -64,9 +64,7 @@ function wait_for_removing_cancel
{
typeset pool=$1
while is_pool_removing $pool; do
sleep 1
done
log_must zpool wait -t remove $pool
#
# The pool state changes before the TXG finishes syncing; wait for