mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 03:09:34 +03:00
Fix typos in tests/
Reviewed-by: Ryan Moeller <ryan@ixsystems.com> Reviewed-by: Richard Laager <rlaager@wiktel.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net> Closes #9247
This commit is contained in:
parent
7a7da11671
commit
36be89b8e5
@ -90,7 +90,7 @@ function find_mnttab_dev
|
||||
}
|
||||
|
||||
#
|
||||
# Save the systme current dump device configuration
|
||||
# Save the system current dump device configuration
|
||||
#
|
||||
function save_dump_dev
|
||||
{
|
||||
|
@ -146,7 +146,7 @@ function find_vfstab_dev
|
||||
}
|
||||
|
||||
#
|
||||
# Save the systme current dump device configuration
|
||||
# Save the system current dump device configuration
|
||||
#
|
||||
function save_dump_dev
|
||||
{
|
||||
|
@ -95,7 +95,7 @@ do
|
||||
log_must zpool create $opt $TESTPOOL ${pooltype[i]} \
|
||||
$file.1 $file.2 $file.3
|
||||
! poolexists $TESTPOOL && \
|
||||
log_fail "Createing pool with $opt fails."
|
||||
log_fail "Creating pool with $opt fails."
|
||||
mpt=`zfs mount | egrep "^$TESTPOOL[^/]" | awk '{print $2}'`
|
||||
(( ${#mpt} == 0 )) && \
|
||||
log_fail "$TESTPOOL created with $opt is not mounted."
|
||||
@ -105,12 +105,12 @@ do
|
||||
from the output of zfs mount"
|
||||
if [[ "$opt" == "-m $TESTDIR1" ]]; then
|
||||
[[ ! -d $TESTDIR1 ]] && \
|
||||
log_fail "$TESTDIR1 is not created auotmatically."
|
||||
log_fail "$TESTDIR1 is not created automatically."
|
||||
[[ "$mpt" != "$TESTDIR1" ]] && \
|
||||
log_fail "$TESTPOOL is not mounted on $TESTDIR1."
|
||||
elif [[ "$opt" == "-R $TESTDIR1" ]]; then
|
||||
[[ ! -d $TESTDIR1/$TESTPOOL ]] && \
|
||||
log_fail "$TESTDIR1/$TESTPOOL is not created auotmatically."
|
||||
log_fail "$TESTDIR1/$TESTPOOL is not created automatically."
|
||||
[[ "$mpt" != "$TESTDIR1/$TESTPOOL" ]] && \
|
||||
log_fail "$TESTPOOL is not mounted on $TESTDIR1/$TESTPOOL."
|
||||
else
|
||||
|
@ -41,7 +41,7 @@
|
||||
# STRATEGY:
|
||||
# 1. delete all devices in the swap
|
||||
# 2. create a zpool
|
||||
# 3. Verify the creation is successed.
|
||||
# 3. Verify the creation was successful
|
||||
#
|
||||
|
||||
verify_runnable "global"
|
||||
|
@ -34,7 +34,7 @@ log_assert "'zpool events -c' should successfully clear events."
|
||||
# 1. Clear all ZFS events
|
||||
# This is needed because we may already over the max number or events queued
|
||||
# (zfs_zevent_len_max) generated by previous tests: generating $EVENTS_NUM new
|
||||
# events and then counting them is racy and leads to failues, so start from 0.
|
||||
# events and then counting them is racy and leads to failures, so start from 0.
|
||||
log_must zpool events -c
|
||||
|
||||
# 2. Generate some new ZFS events
|
||||
|
@ -38,7 +38,7 @@
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Create pool, volume & snap
|
||||
# 2. Verify 'zpool history' can cope with incorret arguments.
|
||||
# 2. Verify 'zpool history' can cope with incorrect arguments.
|
||||
#
|
||||
|
||||
verify_runnable "global"
|
||||
|
@ -115,7 +115,7 @@ function test_common
|
||||
# further than the time that we took the checkpoint.
|
||||
#
|
||||
# Note that, ideally we would want to take a checkpoint
|
||||
# right after we recond the txg we plan to rewind to.
|
||||
# right after we record the txg we plan to rewind to.
|
||||
# But since we can't attach, detach or remove devices
|
||||
# while having a checkpoint, we take it after the
|
||||
# operation that changes the config.
|
||||
|
@ -54,7 +54,7 @@
|
||||
# 3. Export the test pool.
|
||||
# 4. Move one or more device files to other directory
|
||||
# 5. Verify 'zpool import -d' with the new directory
|
||||
# will handle moved files successfullly.
|
||||
# will handle moved files successfully.
|
||||
# Using the various combinations.
|
||||
# - Regular import
|
||||
# - Alternate Root Specified
|
||||
|
@ -24,8 +24,8 @@
|
||||
# STRATEGY:
|
||||
# 1. Create the pool with log device.
|
||||
# 2. Try clearing the label on data and log devices.
|
||||
# 3. Add auxilary (cache/spare) vdevs.
|
||||
# 4. Try clearing the label on auxilary vdevs.
|
||||
# 3. Add auxiliary (cache/spare) vdevs.
|
||||
# 4. Try clearing the label on auxiliary vdevs.
|
||||
# 5. Check that zpool labelclear will return non-zero and
|
||||
# labels are intact.
|
||||
|
||||
|
@ -26,8 +26,8 @@
|
||||
# 2. Export the pool.
|
||||
# 3. Check that zpool labelclear returns non-zero when trying to
|
||||
# clear the label on ACTIVE vdevs, and succeeds with -f.
|
||||
# 4. Add auxilary vdevs (cache/spare).
|
||||
# 5. Check that zpool labelclear succeeds on auxilary vdevs of
|
||||
# 4. Add auxiliary vdevs (cache/spare).
|
||||
# 5. Check that zpool labelclear succeeds on auxiliary vdevs of
|
||||
# exported pool.
|
||||
|
||||
verify_runnable "global"
|
||||
@ -44,7 +44,7 @@ log_assert "zpool labelclear will fail on ACTIVE vdevs of exported pool and" \
|
||||
for vdevtype in "" "cache" "spare"; do
|
||||
# Create simple pool, skip any mounts
|
||||
log_must zpool create -O mountpoint=none -f $TESTPOOL $disk1 log $disk2
|
||||
# Add auxilary vdevs (cache/spare)
|
||||
# Add auxiliary vdevs (cache/spare)
|
||||
if [[ -n $vdevtype ]]; then
|
||||
log_must zpool add $TESTPOOL $vdevtype $disk3
|
||||
fi
|
||||
@ -63,7 +63,7 @@ for vdevtype in "" "cache" "spare"; do
|
||||
log_must zpool labelclear -f $disk2
|
||||
log_mustnot zdb -lq $disk2
|
||||
|
||||
# Check that labelclear on auxilary vdevs will succeed
|
||||
# Check that labelclear on auxiliary vdevs will succeed
|
||||
if [[ -n $vdevtype ]]; then
|
||||
log_must zpool labelclear $disk3
|
||||
log_mustnot zdb -lq $disk3
|
||||
|
@ -42,7 +42,7 @@
|
||||
#
|
||||
# STRATEGY:
|
||||
# 1. Import pools of all versions
|
||||
# 2. Setup a test enviorment over the old pools.
|
||||
# 2. Setup a test environment over the old pools.
|
||||
# 3. Verify the commands related to 'zfs upgrade' succeed as expected.
|
||||
#
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
||||
|
||||
# This setup script is moderately complex, as it creates scenarios for all
|
||||
# of the tests included in this directory. Usually we'd want each test case
|
||||
# to setup/teardown it's own configuration, but this would be time consuming
|
||||
# to setup/teardown its own configuration, but this would be time consuming
|
||||
# given the nature of these tests. However, as a side-effect, one test
|
||||
# leaving the system in an unknown state could impact other test cases.
|
||||
|
||||
|
@ -49,7 +49,7 @@ function check_for_online
|
||||
| grep ONLINE )
|
||||
if [ -n "$RESULT" ]
|
||||
then
|
||||
log_fail "A disk was brough online!"
|
||||
log_fail "A disk was brought online!"
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ for i in $files ; do
|
||||
test_zpool_script "$i" "$testpool" "zpool iostat -Pv -c"
|
||||
done
|
||||
|
||||
# Test that we can run multiple scripts separated with a commma by running
|
||||
# Test that we can run multiple scripts separated with a comma by running
|
||||
# all the scripts in a single -c line.
|
||||
allscripts="$(echo $scripts | sed -r 's/[[:blank:]]+/,/g')"
|
||||
test_zpool_script "$allscripts" "$testpool" "zpool iostat -Pv -c"
|
||||
|
@ -68,7 +68,7 @@ for i in $files ; do
|
||||
test_zpool_script "$i" "$testpool" "zpool status -P -c"
|
||||
done
|
||||
|
||||
# Test that we can run multiple scripts separated with a commma by running
|
||||
# Test that we can run multiple scripts separated with a comma by running
|
||||
# all the scripts in a single -c line.
|
||||
allscripts="$(echo $scripts | sed -r 's/[[:blank:]]+/,/g')"
|
||||
test_zpool_script "$allscripts" "$testpool" "zpool status -P -c"
|
||||
|
Loading…
Reference in New Issue
Block a user