OpenZFS 9245 - zfs-test failures: slog_013_pos and slog_014_pos

Test 13 would fail because of attempts to zpool destroy -f a pool that
was still busy. Changed those calls to destroy_pool which does a retry
loop, and the problem is no longer reproducible. Also removed some non
functional code in the test which is why it was originally commented out
by placing it after the call to log_pass.

Test 14 would fail because sometimes the check for a degraded pool would
complete before the pool had changed state. Changed the logic to check
in a loop with a timeout and the problem is no longer reproducible.

Authored by: John Wren Kennedy <john.kennedy@delphix.com>
Reviewed by: Matt Ahrens <matt@delphix.com>
Reviewed by: Chris Williamson <chris.williamson@delphix.com>
Reviewed by: Yuri Pankov <yuripv@yuripv.net>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Approved by: Dan McDonald <danmcd@joyent.com>
Ported-by: Brian Behlendorf <behlendorf1@llnl.gov>

Porting Notes:
* Re-enabled slog_013_pos.ksh

OpenZFS-issue: https://illumos.org/issues/9245
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/8f323b5
Closes #7585
This commit is contained in:
John Wren Kennedy 2016-08-17 15:15:27 -06:00 committed by Brian Behlendorf
parent 8a393be353
commit ab44e511e2
4 changed files with 28 additions and 38 deletions

View File

@ -2152,6 +2152,25 @@ function is_pool_removed #pool
return $?
}
function wait_for_degraded
{
typeset pool=$1
typeset timeout=${2:-30}
typeset t0=$SECONDS
while :; do
[[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break
log_note "$pool is not yet degraded."
sleep 1
if ((SECONDS - t0 > $timeout)); then
log_note "$pool not degraded after $timeout seconds."
return 1
fi
done
return 0
}
#
# Use create_pool()/destroy_pool() to clean up the information in
# in the given disk to avoid slice overlapping.

View File

@ -33,12 +33,8 @@
function cleanup
{
if datasetexists $TESTPOOL ; then
log_must zpool destroy -f $TESTPOOL
fi
if datasetexists $TESTPOOL2 ; then
log_must zpool destroy -f $TESTPOOL2
fi
poolexists $TESTPOOL && destroy_pool $TESTPOOL
poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2
rm -rf $TESTDIR
}

View File

@ -44,16 +44,9 @@
verify_runnable "global"
if ! $(is_physical_device $DISKS) ; then
log_unsupported "This directory cannot be run on raw files."
fi
function cleanup_testenv
{
cleanup
if datasetexists $TESTPOOL2 ; then
log_must zpool destroy -f $TESTPOOL2
fi
if [[ -n $lofidev ]]; then
if is_linux; then
losetup -d $lofidev
@ -71,19 +64,19 @@ log_onexit cleanup_testenv
dsk1=${DISKS%% *}
log_must zpool create $TESTPOOL ${DISKS#$dsk1}
# Add nomal disk
# Add provided disk
log_must zpool add $TESTPOOL log $dsk1
log_must verify_slog_device $TESTPOOL $dsk1 'ONLINE'
# Add nomal file
# Add normal file
log_must zpool add $TESTPOOL log $LDEV
ldev=$(random_get $LDEV)
log_must verify_slog_device $TESTPOOL $ldev 'ONLINE'
# Add lofi device
# Add loop back device
if is_linux; then
lofidev=$(losetup -f)
lofidev=${lofidev##*/}
log_must losetup $lofidev ${LDEV2%% *}
lofidev=${lofidev##*/}
else
lofidev=${LDEV2%% *}
log_must lofiadm -a $lofidev
@ -94,13 +87,3 @@ log_must verify_slog_device $TESTPOOL $lofidev 'ONLINE'
log_pass "Verify slog device can be disk, file, lofi device or any device " \
"that presents a block interface."
# Add file which reside in the itself
mntpnt=$(get_prop mountpoint $TESTPOOL)
log_must mkfile $MINVDEVSIZE $mntpnt/vdev
log_must zpool add $TESTPOOL $mntpnt/vdev
# Add ZFS volume
vol=$TESTPOOL/vol
log_must zpool create -V $MINVDEVSIZE $vol
log_must zpool add $TESTPOOL ${ZVOL_DEVDIR}/$vol

View File

@ -45,10 +45,8 @@ verify_runnable "global"
log_assert "log device can survive when one of the pool device get corrupted."
for type in "mirror" "raidz" "raidz2"
do
for spare in "" "spare"
do
for type in "mirror" "raidz" "raidz2"; do
for spare in "" "spare"; do
log_must zpool create $TESTPOOL $type $VDEV $spare $SDEV \
log $LDEV
@ -64,14 +62,8 @@ do
fi
log_must zpool scrub $TESTPOOL
log_must display_status $TESTPOOL
log_must zpool status $TESTPOOL 2>&1 >/dev/null
log_must zpool offline $TESTPOOL $VDIR/a
zpool status -v $TESTPOOL | \
grep "state: DEGRADED" 2>&1 >/dev/null
if (( $? != 0 )); then
log_fail "pool $TESTPOOL status should be DEGRADED"
fi
log_must wait_for_degraded $TESTPOOL
zpool status -v $TESTPOOL | grep logs | \
grep "DEGRADED" 2>&1 >/dev/null