OpenZFS 6865 - want zfs-tests cases for zpool labelclear command

Authored by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: John Kennedy <john.kennedy@delphix.com>
Approved by: Robert Mustacchi <rm@joyent.com>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Ported-by: Brian Behlendorf <behlendorf1@llnl.gov>

Porting Notes:
- Updated 'zpool labelclear' and 'zdb -l' such that they attempt
  to find a vdev given solely its short name.  This behavior is
  consistent with the upstream OpenZFS code and the test cases
  depend on it.  The actual implementation differs slightly due
  to device naming conventions on Linux.
- auto_online_001_pos, auto_replace_001_pos and add-o_ashift
  test cases updated to expect failure when no label exists.
- read_efi_label() and zpool_label_disk_check() are read-only
  operations and should use O_RDONLY at open time to enforce this.
- zpool_label_disk() and zpool_relabel_disk() write the partition
  information using O_DIRECT an fsync() and page cache invalidation
  to ensure a consistent view of the device.
- dump_label() in zdb should invalidate the page cache in order
  to get the authoritative label from disk.

OpenZFS-issue: https://www.illumos.org/issues/6865
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/c95076c
Closes #5981
This commit is contained in:
Yuri Pankov 2017-01-13 09:25:15 -08:00 committed by Brian Behlendorf
parent 047187c1bd
commit dbb38f6605
15 changed files with 359 additions and 114 deletions

View File

@ -2515,13 +2515,37 @@ dump_label(const char *dev)
bzero(labels, sizeof (labels));
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (ioctl(fd, BLKFLSBUF) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));

View File

@ -790,7 +790,10 @@ zpool_do_remove(int argc, char **argv)
}
/*
* zpool labelclear <vdev>
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
@ -798,8 +801,11 @@ zpool_do_remove(int argc, char **argv)
int
zpool_do_labelclear(int argc, char **argv)
{
char *vdev, *name;
char vdev[MAXPATHLEN];
char *name = NULL;
struct stat st;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
@ -822,90 +828,107 @@ zpool_do_labelclear(int argc, char **argv)
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev device name\n"));
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
vdev = argv[0];
if ((fd = open(vdev, O_RDWR)) < 0) {
(void) fprintf(stderr, gettext("Unable to open %s\n"), vdev);
return (B_FALSE);
}
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(vdev, argv[0], sizeof (vdev));
if (vdev[0] != '/' && stat(vdev, &st) != 0) {
int error;
name = NULL;
if (zpool_in_use(g_zfs, fd, &state, &name, &inuse) != 0) {
if (force)
goto wipe_label;
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
(void) fprintf(stderr,
gettext("Unable to determine pool state for %s\n"
"Use -f to force the clearing any label data\n"), vdev);
return (1);
}
if (inuse) {
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
(void) fprintf(stderr,
gettext("labelclear operation failed.\n"
"\tVdev %s is a member (%s), of pool \"%s\".\n"
"\tTo remove label information from this device, "
"export or destroy\n\tthe pool, or remove %s from "
"the configuration of this pool\n\tand retry the "
"labelclear operation.\n"),
vdev, zpool_pool_state_to_name(state), name, vdev);
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr,
gettext("labelclear operation failed.\n\tVdev "
"%s is a member of the exported pool \"%s\".\n"
"\tUse \"zpool labelclear -f %s\" to force the "
"removal of label\n\tinformation.\n"),
vdev, name, vdev);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr,
gettext("labelclear operation failed.\n"
"\tVdev %s is a member of the pool \"%s\".\n"
"\tThis pool is unknown to this system, but may "
"be active on\n\tanother system. Use "
"\'zpool labelclear -f %s\' to force the\n"
"\tremoval of label information.\n"),
vdev, name, vdev);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool... */
break;
if (error || (stat(vdev, &st) != 0)) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try specifying absolute "
"path instead\n"), argv[0]);
return (1);
}
}
wipe_label:
if (zpool_clear_label(fd) != 0) {
if ((fd = open(vdev, O_RDWR)) < 0) {
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
vdev, strerror(errno));
return (1);
}
if (zpool_read_label(fd, &config, NULL) != 0 || config == NULL) {
(void) fprintf(stderr,
gettext("Label clear failed on vdev %s\n"), vdev);
gettext("failed to check state for %s\n"), vdev);
return (1);
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
return (1);
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\"\n"),
vdev, zpool_pool_state_to_name(state), name);
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
close(fd);
if (name != NULL)
free(name);
free(name);
(void) close(fd);
return (ret);
}

View File

@ -506,31 +506,6 @@ check_device(const char *path, boolean_t force,
return (error);
}
/*
* By "whole disk" we mean an entire physical disk (something we can
* label, toggle the write cache on, etc.) as opposed to the full
* capacity of a pseudo-device such as lofi or did. We act as if we
* are labeling the disk, which should be a pretty good test of whether
* it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
* it isn't.
*/
static boolean_t
is_whole_disk(const char *path)
{
struct dk_gpt *label;
int fd;
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (B_FALSE);
if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
(void) close(fd);
return (B_FALSE);
}
efi_free(label);
(void) close(fd);
return (B_TRUE);
}
/*
* This may be a shorthand device path or it could be total gibberish.
* Check to see if it is a known device available in zfs_vdev_paths.
@ -545,7 +520,7 @@ is_shorthand_path(const char *arg, char *path, size_t path_size,
error = zfs_resolve_shortname(arg, path, path_size);
if (error == 0) {
*wholedisk = is_whole_disk(path);
*wholedisk = zfs_dev_is_whole_disk(path);
if (*wholedisk || (stat64(path, statbuf) == 0))
return (0);
}
@ -640,7 +615,7 @@ make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
/*
* Complete device or file path. Exact type is determined by
* examining the file descriptor afterwards. Symbolic links
* are resolved to their real paths for the is_whole_disk()
* are resolved to their real paths to determine whole disk
* and S_ISBLK/S_ISREG type checks. However, we are careful
* to store the given path as ZPOOL_CONFIG_PATH to ensure we
* can leverage udev's persistent device labels.
@ -651,7 +626,7 @@ make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
return (NULL);
}
wholedisk = is_whole_disk(path);
wholedisk = zfs_dev_is_whole_disk(path);
if (!wholedisk && (stat64(path, &statbuf) != 0)) {
(void) fprintf(stderr,
gettext("cannot open '%s': %s\n"),
@ -659,7 +634,7 @@ make_leaf_vdev(nvlist_t *props, const char *arg, uint64_t is_log)
return (NULL);
}
/* After is_whole_disk() check restore original passed path */
/* After whole disk check restore original passed path */
strlcpy(path, arg, sizeof (path));
} else {
err = is_shorthand_path(arg, path, sizeof (path),

View File

@ -213,6 +213,7 @@ AC_CONFIG_FILES([
tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_history/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile

View File

@ -286,6 +286,7 @@ extern int zpool_label_disk_wait(char *, int);
extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, char *);
int zfs_dev_is_dm(char *dev_name);
int zfs_dev_is_whole_disk(char *dev_name);
char *zfs_get_underlying_path(char *dev_name);
char *zfs_get_enclosure_sysfs_path(char *dev_name);

View File

@ -2361,12 +2361,18 @@ zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
* The module will do it for us in vdev_disk_open().
*/
error = efi_use_whole_disk(fd);
/* Flush the buffers to disk and invalidate the page cache. */
(void) fsync(fd);
(void) ioctl(fd, BLKFLSBUF);
(void) close(fd);
if (error && error != VT_ENOSPC) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"relabel '%s': unable to read disk capacity"), path);
return (zfs_error(hdl, EZFS_NOCAP, msg));
}
return (0);
}
@ -4062,7 +4068,7 @@ read_efi_label(nvlist_t *config, diskaddr_t *sb)
(void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
strrchr(path, '/'));
if ((fd = open(diskname, O_RDWR|O_DIRECT)) >= 0) {
if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
struct dk_gpt *vtoc;
if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
@ -4114,7 +4120,7 @@ zpool_label_disk_check(char *path)
struct dk_gpt *vtoc;
int fd, err;
if ((fd = open(path, O_RDWR|O_DIRECT)) < 0)
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (errno);
if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
@ -4246,13 +4252,21 @@ zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
vtoc->efi_parts[8].p_size = resv;
vtoc->efi_parts[8].p_tag = V_RESERVED;
if ((rval = efi_write(fd, vtoc)) != 0 || (rval = efi_rescan(fd)) != 0) {
/*
* Some block drivers (like pcata) may not support EFI
* GPT labels. Print out a helpful error message dir-
* ecting the user to manually label the disk and give
* a specific slice.
*/
rval = efi_write(fd, vtoc);
/* Flush the buffers to disk and invalidate the page cache. */
(void) fsync(fd);
(void) ioctl(fd, BLKFLSBUF);
if (rval == 0)
rval = efi_rescan(fd);
/*
* Some block drivers (like pcata) may not support EFI GPT labels.
* Print out a helpful error message directing the user to manually
* label the disk and give a specific slice.
*/
if (rval != 0) {
(void) close(fd);
efi_free(vtoc);
@ -4371,6 +4385,34 @@ zfs_dev_is_dm(char *dev_name)
return (1);
}
/*
* By "whole disk" we mean an entire physical disk (something we can
* label, toggle the write cache on, etc.) as opposed to the full
* capacity of a pseudo-device such as lofi or did. We act as if we
* are labeling the disk, which should be a pretty good test of whether
* it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
* it isn't.
*/
int
zfs_dev_is_whole_disk(char *dev_name)
{
struct dk_gpt *label;
int fd;
if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
return (0);
if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
(void) close(fd);
return (0);
}
efi_free(label);
(void) close(fd);
return (1);
}
/*
* Lookup the underlying device for a device name
*

View File

@ -308,6 +308,11 @@ tests = ['zpool_import_001_pos',
'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos',
'zpool_import_rename_001_pos']
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported']
pre =
post =
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg']

View File

@ -37,6 +37,7 @@ SUBDIRS = \
zpool_get \
zpool_history \
zpool_import \
zpool_labelclear \
zpool_offline \
zpool_online \
zpool_remove \

View File

@ -100,7 +100,7 @@ do
log_mustnot zpool add -o ashift="$badval" $disk2
log_must zpool destroy $TESTPOOL
log_must zpool labelclear $disk1
log_must zpool labelclear $disk2
log_mustnot zpool labelclear $disk2
done
log_pass "zpool add -o ashift=<n>' works with different ashift values"

View File

@ -0,0 +1,5 @@
pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/cli_root/zpool_labelclear
dist_pkgdata_SCRIPTS = \
labelclear.cfg \
zpool_labelclear_active.ksh \
zpool_labelclear_exported.ksh

View File

@ -0,0 +1,26 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2016 Nexenta Systems, Inc.
#
. $STF_SUITE/include/libtest.shlib
typeset LABELCLEAR="zpool labelclear"
typeset LABELREAD="zdb -lq"
typeset disks=(${DISKS[*]})
typeset disk1=${disks[0]}
typeset disk2=${disks[1]}
typeset disk3=${disks[2]}

View File

@ -0,0 +1,68 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2016 Nexenta Systems, Inc.
#
. $STF_SUITE/tests/functional/cli_root/zpool_labelclear/labelclear.cfg
# DESCRIPTION:
# Check that zpool labelclear will refuse to clear the label
# (with or without -f) on any vdevs of the imported pool.
#
# STRATEGY:
# 1. Create the pool with log device.
# 2. Try clearing the label on data and log devices.
# 3. Add auxilary (cache/spare) vdevs.
# 4. Try clearing the label on auxilary vdevs.
# 5. Check that zpool labelclear will return non-zero and
# labels are intact.
verify_runnable "global"
function cleanup
{
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_assert "zpool labelclear will fail on all vdevs of imported pool"
# Create simple pool, skip any mounts
log_must zpool create -O mountpoint=none -f $TESTPOOL $disk1 log $disk2
# Check that labelclear [-f] will fail on ACTIVE pool vdevs
log_mustnot $LABELCLEAR $disk1
log_must $LABELREAD $disk1
log_mustnot $LABELCLEAR -f $disk1
log_must $LABELREAD $disk1
log_mustnot $LABELCLEAR $disk2
log_must $LABELREAD $disk2
log_mustnot $LABELCLEAR -f $disk2
log_must $LABELREAD $disk2
# Add a cache/spare to the pool, check that labelclear [-f] will fail
# on the vdev and will succeed once it's removed from pool config
for vdevtype in "cache" "spare"; do
log_must zpool add $TESTPOOL $vdevtype $disk3
log_mustnot $LABELCLEAR $disk3
log_must $LABELREAD $disk3
log_mustnot $LABELCLEAR -f $disk3
log_must $LABELREAD $disk3
log_must zpool remove $TESTPOOL $disk3
log_must $LABELCLEAR $disk3
log_mustnot $LABELREAD $disk3
done
log_pass "zpool labelclear will fail on all vdevs of imported pool"

View File

@ -0,0 +1,74 @@
#!/bin/ksh -p
#
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2016 Nexenta Systems, Inc.
#
. $STF_SUITE/tests/functional/cli_root/zpool_labelclear/labelclear.cfg
# DESCRIPTION:
# Check that zpool labelclear will refuse to clear the label
# on ACTIVE vdevs of exported pool without -f, and will succeeded with -f.
#
# STRATEGY:
# 1. Create a pool with log device.
# 2. Export the pool.
# 3. Check that zpool labelclear returns non-zero when trying to
# clear the label on ACTIVE vdevs, and succeeds with -f.
# 4. Add auxilary vdevs (cache/spare).
# 5. Check that zpool labelclear succeeds on auxilary vdevs of
# exported pool.
verify_runnable "global"
function cleanup
{
poolexists $TESTPOOL && destroy_pool $TESTPOOL
}
log_onexit cleanup
log_assert "zpool labelclear will fail on ACTIVE vdevs of exported pool and" \
"succeed with -f"
for vdevtype in "" "cache" "spare"; do
# Create simple pool, skip any mounts
log_must zpool create -O mountpoint=none -f $TESTPOOL $disk1 log $disk2
# Add auxilary vdevs (cache/spare)
if [[ -n $vdevtype ]]; then
log_must zpool add $TESTPOOL $vdevtype $disk3
fi
# Export the pool
log_must zpool export $TESTPOOL
# Check that labelclear will fail without -f
log_mustnot $LABELCLEAR $disk1
log_must $LABELREAD $disk1
log_mustnot $LABELCLEAR $disk2
log_must $LABELREAD $disk2
# Check that labelclear will succeed with -f
log_must $LABELCLEAR -f $disk1
log_mustnot $LABELREAD $disk1
log_must $LABELCLEAR -f $disk2
log_mustnot $LABELREAD $disk2
# Check that labelclear on auxilary vdevs will succeed
if [[ -n $vdevtype ]]; then
log_must $LABELCLEAR $disk3
log_mustnot $LABELREAD $disk3
fi
done
log_pass "zpool labelclear will fail on ACTIVE vdevs of exported pool and" \
"succeed with -f"

View File

@ -76,7 +76,7 @@ fi
# Clear disk labels
for i in {0..2}
do
log_must zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
done
if is_loop_device $DISK1; then

View File

@ -87,7 +87,7 @@ log_onexit cleanup
# Clear disk labels
for i in {0..2}
do
log_must zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
zpool labelclear -f /dev/disk/by-id/"${devs_id[i]}"
done
setup