6beaed3f99
since Debian unstable has not been updated yet.
303 lines
9.9 KiB
Diff
303 lines
9.9 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: LOLi <loli10K@users.noreply.github.com>
|
|
Date: Thu, 28 Dec 2017 19:15:32 +0100
|
|
Subject: [PATCH] Fix 'zpool add' handling of nested interior VDEVs
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
|
|
When replacing a faulted device which was previously handled by a spare
|
|
multiple levels of nested interior VDEVs will be present in the pool
|
|
configuration; the following example illustrates one of the possible
|
|
situations:
|
|
|
|
NAME STATE READ WRITE CKSUM
|
|
testpool DEGRADED 0 0 0
|
|
raidz1-0 DEGRADED 0 0 0
|
|
spare-0 DEGRADED 0 0 0
|
|
replacing-0 DEGRADED 0 0 0
|
|
/var/tmp/fault-dev UNAVAIL 0 0 0 cannot open
|
|
/var/tmp/replace-dev ONLINE 0 0 0
|
|
/var/tmp/spare-dev1 ONLINE 0 0 0
|
|
/var/tmp/safe-dev ONLINE 0 0 0
|
|
spares
|
|
/var/tmp/spare-dev1 INUSE currently in use
|
|
|
|
This is safe and allowed, but get_replication() needs to handle this
|
|
situation gracefully to let zpool add new devices to the pool.
|
|
|
|
Reviewed-by: George Melikov <mail@gmelikov.ru>
|
|
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
|
|
Signed-off-by: loli10K <ezomori.nozomu@gmail.com>
|
|
Closes #6678
|
|
Closes #6996
|
|
(cherry picked from commit a8fa31b50b958306cd39c21e8518f776ee59f1b6)
|
|
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
|
---
|
|
.../functional/cli_root/zpool_add/Makefile.am | 3 +-
|
|
cmd/zpool/zpool_vdev.c | 6 +-
|
|
tests/runfiles/linux.run | 2 +-
|
|
tests/zfs-tests/include/libtest.shlib | 66 +++++++++++-
|
|
.../zpool_add/add_nested_replacing_spare.ksh | 111 +++++++++++++++++++++
|
|
5 files changed, 182 insertions(+), 6 deletions(-)
|
|
create mode 100755 tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh
|
|
|
|
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am b/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am
|
|
index 4b6b533fe..062028299 100644
|
|
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am
|
|
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile.am
|
|
@@ -15,4 +15,5 @@ dist_pkgdata_SCRIPTS = \
|
|
zpool_add_009_neg.ksh \
|
|
zpool_add_010_pos.ksh \
|
|
add-o_ashift.ksh \
|
|
- add_prop_ashift.ksh
|
|
+ add_prop_ashift.ksh \
|
|
+ add_nested_replacing_spare.ksh
|
|
diff --git a/cmd/zpool/zpool_vdev.c b/cmd/zpool/zpool_vdev.c
|
|
index 97faa5f9b..fd6bd9e76 100644
|
|
--- a/cmd/zpool/zpool_vdev.c
|
|
+++ b/cmd/zpool/zpool_vdev.c
|
|
@@ -860,9 +860,11 @@ get_replication(nvlist_t *nvroot, boolean_t fatal)
|
|
|
|
/*
|
|
* If this is a replacing or spare vdev, then
|
|
- * get the real first child of the vdev.
|
|
+ * get the real first child of the vdev: do this
|
|
+ * in a loop because replacing and spare vdevs
|
|
+ * can be nested.
|
|
*/
|
|
- if (strcmp(childtype,
|
|
+ while (strcmp(childtype,
|
|
VDEV_TYPE_REPLACING) == 0 ||
|
|
strcmp(childtype, VDEV_TYPE_SPARE) == 0) {
|
|
nvlist_t **rchild;
|
|
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
|
|
index f872c0cbf..303c27529 100644
|
|
--- a/tests/runfiles/linux.run
|
|
+++ b/tests/runfiles/linux.run
|
|
@@ -228,7 +228,7 @@ tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
|
|
'zpool_add_004_pos', 'zpool_add_005_pos', 'zpool_add_006_pos',
|
|
'zpool_add_007_neg', 'zpool_add_008_neg', 'zpool_add_009_neg',
|
|
'zpool_add_010_pos',
|
|
- 'add-o_ashift', 'add_prop_ashift']
|
|
+ 'add-o_ashift', 'add_prop_ashift', 'add_nested_replacing_spare']
|
|
tags = ['functional', 'cli_root', 'zpool_add']
|
|
|
|
[tests/functional/cli_root/zpool_attach]
|
|
diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib
|
|
index 345d1903d..86f172a6d 100644
|
|
--- a/tests/zfs-tests/include/libtest.shlib
|
|
+++ b/tests/zfs-tests/include/libtest.shlib
|
|
@@ -1988,6 +1988,31 @@ function check_hotspare_state # pool disk state{inuse,avail}
|
|
return 0
|
|
}
|
|
|
|
+#
|
|
+# Wait until a hotspare transitions to a given state or times out.
|
|
+#
|
|
+# Return 0 when pool/disk matches expected state, 1 on timeout.
|
|
+#
|
|
+function wait_hotspare_state # pool disk state timeout
|
|
+{
|
|
+ typeset pool=$1
|
|
+ typeset disk=${2#$/DEV_DSKDIR/}
|
|
+ typeset state=$3
|
|
+ typeset timeout=${4:-60}
|
|
+ typeset -i i=0
|
|
+
|
|
+ while [[ $i -lt $timeout ]]; do
|
|
+ if check_hotspare_state $pool $disk $state; then
|
|
+ return 0
|
|
+ fi
|
|
+
|
|
+ i=$((i+1))
|
|
+ sleep 1
|
|
+ done
|
|
+
|
|
+ return 1
|
|
+}
|
|
+
|
|
#
|
|
# Verify a given slog disk is inuse or avail
|
|
#
|
|
@@ -2026,6 +2051,31 @@ function check_vdev_state # pool disk state{online,offline,unavail}
|
|
return 0
|
|
}
|
|
|
|
+#
|
|
+# Wait until a vdev transitions to a given state or times out.
|
|
+#
|
|
+# Return 0 when pool/disk matches expected state, 1 on timeout.
|
|
+#
|
|
+function wait_vdev_state # pool disk state timeout
|
|
+{
|
|
+ typeset pool=$1
|
|
+ typeset disk=${2#$/DEV_DSKDIR/}
|
|
+ typeset state=$3
|
|
+ typeset timeout=${4:-60}
|
|
+ typeset -i i=0
|
|
+
|
|
+ while [[ $i -lt $timeout ]]; do
|
|
+ if check_vdev_state $pool $disk $state; then
|
|
+ return 0
|
|
+ fi
|
|
+
|
|
+ i=$((i+1))
|
|
+ sleep 1
|
|
+ done
|
|
+
|
|
+ return 1
|
|
+}
|
|
+
|
|
#
|
|
# Check the output of 'zpool status -v <pool>',
|
|
# and to see if the content of <token> contain the <keyword> specified.
|
|
@@ -3394,13 +3444,25 @@ function zed_stop
|
|
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
|
|
zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
|
|
kill $zedpid
|
|
- wait $zedpid
|
|
+ while ps -p $zedpid > /dev/null; do
|
|
+ sleep 1
|
|
+ done
|
|
rm -f ${ZEDLET_DIR}/zed.pid
|
|
fi
|
|
-
|
|
return 0
|
|
}
|
|
|
|
+#
|
|
+# Drain all zevents
|
|
+#
|
|
+function zed_events_drain
|
|
+{
|
|
+ while [ $(zpool events -H | wc -l) -ne 0 ]; do
|
|
+ sleep 1
|
|
+ zpool events -c >/dev/null
|
|
+ done
|
|
+}
|
|
+
|
|
#
|
|
# Check is provided device is being active used as a swap device.
|
|
#
|
|
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh
|
|
new file mode 100755
|
|
index 000000000..b38079852
|
|
--- /dev/null
|
|
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh
|
|
@@ -0,0 +1,111 @@
|
|
+#!/bin/ksh -p
|
|
+#
|
|
+# CDDL HEADER START
|
|
+#
|
|
+# The contents of this file are subject to the terms of the
|
|
+# Common Development and Distribution License (the "License").
|
|
+# You may not use this file except in compliance with the License.
|
|
+#
|
|
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
+# or http://www.opensolaris.org/os/licensing.
|
|
+# See the License for the specific language governing permissions
|
|
+# and limitations under the License.
|
|
+#
|
|
+# When distributing Covered Code, include this CDDL HEADER in each
|
|
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
+# If applicable, add the following below this CDDL HEADER, with the
|
|
+# fields enclosed by brackets "[]" replaced with your own identifying
|
|
+# information: Portions Copyright [yyyy] [name of copyright owner]
|
|
+#
|
|
+# CDDL HEADER END
|
|
+#
|
|
+
|
|
+#
|
|
+# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
|
|
+#
|
|
+
|
|
+. $STF_SUITE/include/libtest.shlib
|
|
+. $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib
|
|
+
|
|
+#
|
|
+# DESCRIPTION:
|
|
+# 'zpool add' works with nested replacing/spare vdevs
|
|
+#
|
|
+# STRATEGY:
|
|
+# 1. Create a redundant pool with a spare device
|
|
+# 2. Manually fault a device, wait for the hot-spare and then replace it:
|
|
+# this creates a situation where replacing and spare vdevs are nested.
|
|
+# 3. Verify 'zpool add' is able to add new devices to the pool.
|
|
+#
|
|
+
|
|
+verify_runnable "global"
|
|
+
|
|
+function cleanup
|
|
+{
|
|
+ zed_stop
|
|
+ zed_cleanup
|
|
+ log_must zinject -c all
|
|
+ destroy_pool $TESTPOOL
|
|
+ log_must rm -f $DATA_DEVS $SPARE_DEVS
|
|
+}
|
|
+
|
|
+log_assert "'zpool add' works with nested replacing/spare vdevs"
|
|
+log_onexit cleanup
|
|
+
|
|
+FAULT_DEV="$TEST_BASE_DIR/fault-dev"
|
|
+SAFE_DEV1="$TEST_BASE_DIR/safe-dev1"
|
|
+SAFE_DEV2="$TEST_BASE_DIR/safe-dev2"
|
|
+SAFE_DEV3="$TEST_BASE_DIR/safe-dev3"
|
|
+SAFE_DEVS="$SAFE_DEV1 $SAFE_DEV2 $SAFE_DEV3"
|
|
+REPLACE_DEV="$TEST_BASE_DIR/replace-dev"
|
|
+ADD_DEV="$TEST_BASE_DIR/add-dev"
|
|
+DATA_DEVS="$FAULT_DEV $SAFE_DEVS $REPLACE_DEV $ADD_DEV"
|
|
+SPARE_DEV1="$TEST_BASE_DIR/spare-dev1"
|
|
+SPARE_DEV2="$TEST_BASE_DIR/spare-dev2"
|
|
+SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2"
|
|
+
|
|
+# We need ZED running to work with spares
|
|
+zed_setup
|
|
+zed_start
|
|
+# Clear events from previous runs
|
|
+zed_events_drain
|
|
+
|
|
+for type in "mirror" "raidz1" "raidz2" "raidz3"
|
|
+do
|
|
+ # 1. Create a redundant pool with a spare device
|
|
+ truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS
|
|
+ log_must zpool create $TESTPOOL $type $FAULT_DEV $SAFE_DEVS
|
|
+ log_must zpool add $TESTPOOL spare $SPARE_DEV1
|
|
+
|
|
+ # 2.1 Fault a device, verify the spare is kicked in
|
|
+ log_must zinject -d $FAULT_DEV -e nxio -T all -f 100 $TESTPOOL
|
|
+ log_must zpool scrub $TESTPOOL
|
|
+ log_must wait_vdev_state $TESTPOOL $FAULT_DEV "UNAVAIL" 60
|
|
+ log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60
|
|
+ log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE"
|
|
+ log_must check_state $TESTPOOL "" "DEGRADED"
|
|
+
|
|
+ # 2.2 Replace the faulted device: this creates a replacing vdev inside a
|
|
+ # spare vdev
|
|
+ log_must zpool replace $TESTPOOL $FAULT_DEV $REPLACE_DEV
|
|
+ log_must wait_vdev_state $TESTPOOL $REPLACE_DEV "ONLINE" 60
|
|
+ zpool status | awk -v poolname="$TESTPOOL" -v type="$type" 'BEGIN {s=""}
|
|
+ $1 ~ poolname {c=4}; (c && c--) { s=s$1":" }
|
|
+ END { if (s != poolname":"type"-0:spare-0:replacing-0:") exit 1; }'
|
|
+ if [[ $? -ne 0 ]]; then
|
|
+ log_fail "Pool does not contain nested replacing/spare vdevs"
|
|
+ fi
|
|
+
|
|
+ # 3. Verify 'zpool add' is able to add new devices
|
|
+ log_must zpool add $TESTPOOL spare $SPARE_DEV2
|
|
+ log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL"
|
|
+ log_must zpool add -f $TESTPOOL $ADD_DEV
|
|
+ log_must wait_vdev_state $TESTPOOL $ADD_DEV "ONLINE" 60
|
|
+
|
|
+ # Cleanup
|
|
+ log_must zinject -c all
|
|
+ destroy_pool $TESTPOOL
|
|
+ log_must rm -f $DATA_DEVS $SPARE_DEVS
|
|
+done
|
|
+
|
|
+log_pass "'zpool add' works with nested replacing/spare vdevs"
|
|
--
|
|
2.14.2
|
|
|