Fix typos in tests/

Reviewed-by: Ryan Moeller <ryan@ixsystems.com>
Reviewed-by: Richard Laager <rlaager@wiktel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net>
Closes #9250
This commit is contained in:
Andrea Gelmini 2019-09-03 03:14:53 +02:00 committed by Brian Behlendorf
parent cb14aa4ca9
commit c6e457dffb
14 changed files with 24 additions and 24 deletions

View File

@ -43,7 +43,7 @@
* is hold) occurred, zfs_dirty_inode open a txg failed, and wait previous
* txg "n" completed.
* 3. context #1 call uiomove to write, however page fault is occurred in
* uiomove, which means it need mm_sem, but mm_sem is hold by
* uiomove, which means it needs mm_sem, but mm_sem is hold by
* context #2, so it stuck and can't complete, then txg "n" will not
* complete.
*

View File

@ -131,7 +131,7 @@ function is_loop_device #disk
}
#
# Check if the given device is a multipath device and if there is a sybolic
# Check if the given device is a multipath device and if there is a symbolic
# link to a device mapper and to a disk
# Currently no support for dm devices alone without multipath
#

View File

@ -869,7 +869,7 @@ function zero_partitions #<whole_disk_name>
# Size should be specified with units as per
# the `format` command requirements eg. 100mb 3gb
#
# NOTE: This entire interface is problematic for the Linux parted utilty
# NOTE: This entire interface is problematic for the Linux parted utility
# which requires the end of the partition to be specified. It would be
# best to retire this interface and replace it with something more flexible.
# At the moment a best effort is made.
@ -1072,7 +1072,7 @@ function partition_disk #<slice_size> <whole_disk_name> <total_slices>
# dirnum: the maximum number of subdirectories to use, -1 no limit
# filenum: the maximum number of files per subdirectory
# bytes: number of bytes to write
# num_writes: numer of types to write out bytes
# num_writes: number of types to write out bytes
# data: the data that will be written
#
# E.g.
@ -2859,7 +2859,7 @@ function labelvtoc
#
# check if the system was installed as zfsroot or not
# return: 0 ture, otherwise false
# return: 0 if zfsroot, non-zero if not
#
function is_zfsroot
{

View File

@ -34,7 +34,7 @@
#
# Get the given file/directory access mode
#
# $1 object -- file or directroy
# $1 object -- file or directory
#
function get_mode #<obj>
{
@ -49,7 +49,7 @@ function get_mode #<obj>
#
# Get the given file/directory ACL
#
# $1 object -- file or directroy
# $1 object -- file or directory
#
function get_acl #<obj>
{
@ -64,7 +64,7 @@ function get_acl #<obj>
#
# Get the given file/directory ACL
#
# $1 object -- file or directroy
# $1 object -- file or directory
#
function get_compact_acl #<obj>
{
@ -243,12 +243,12 @@ function usr_exec #<commands> [...]
#
# Count how many ACEs for the specified file or directory.
#
# $1 file or directroy name
# $1 file or directory name
#
function count_ACE #<file or dir name>
{
if [[ ! -e $1 ]]; then
log_note "Need input file or directroy name."
log_note "Need input file or directory name."
return 1
fi
@ -399,7 +399,7 @@ function rwx_node #user node acl_spec|access
#
# Get the given file/directory xattr
#
# $1 object -- file or directroy
# $1 object -- file or directory
#
function get_xattr #<obj>
{

View File

@ -38,9 +38,9 @@
# Verify set, export and destroy when cachefile is set on pool.
#
# STRATEGY:
# 1. Create two pools with one same cahcefile1.
# 1. Create two pools with one same cachefile1.
# 2. Set cachefile of the two pools to another same cachefile2.
# 3. Verify cachefile1 not exist.
# 3. Verify cachefile1 does not exist.
# 4. Export the two pools.
# 5. Verify cachefile2 not exist.
# 6. Import the two pools and set cachefile to cachefile2.

View File

@ -19,7 +19,7 @@
# DESCRIPTION:
# For the filesystem with casesensitivity=insensitive, normalization=formD,
# check that lookup succeds using any name form.
# check that lookup succeeds using any name form.
#
# STRATEGY:
# For each c/n name form:

View File

@ -144,4 +144,4 @@ log_must_program $TESTPOOL - <<-EOF
return 0
EOF
log_pass "Listing zfs user properies should work correctly."
log_pass "Listing zfs user properties should work correctly."

View File

@ -90,7 +90,7 @@ snap_count=$(zfs list -t snapshot | grep $TESTPOOL | wc -l)
log_note "$snap_count snapshots created by ZCP"
if [ "$snap_count" -eq 0 ]; then
log_fail "Channel progam failed to run."
log_fail "Channel program failed to run."
elif [ "$snap_count" -gt 50 ]; then
log_fail "Too many snapshots after a cancel ($snap_count)."
else

View File

@ -150,7 +150,7 @@ log_note "Verify zfs clone property for multiple clones"
names=$(zfs list -rt all -o name $TESTPOOL)
log_must verify_clones 3 0
log_note "verfify clone property for clone deletion"
log_note "verify clone property for clone deletion"
i=1
for ds in $datasets; do
log_must zfs destroy $ds/$TESTCLONE.$i

View File

@ -92,7 +92,7 @@ for val in 1 2 3; do
check_used $used $val
done
log_note "Verify df(1M) can corectly display the space charged."
log_note "Verify df(1M) can correctly display the space charged."
for val in 1 2 3; do
used=`df -F zfs -k /$TESTPOOL/fs_$val/$FILE | grep $TESTPOOL/fs_$val \
| awk '{print $3}'`

View File

@ -33,7 +33,7 @@
#
# DESCRIPTION:
# 'zfs create -p' should work as expecteed
# 'zfs create -p' should work as expected
#
# STRATEGY:
# 1. To create $newdataset with -p option, first make sure the upper level

View File

@ -53,7 +53,7 @@ verify_runnable "both"
# run 'zfs destroy $opt <dataset>'. 3rd, check the system status.
#
# $1 option of 'zfs destroy'
# $2 dataset will be destroied.
# $2 dataset will be destroyed.
#
function test_n_check
{

View File

@ -145,8 +145,8 @@ if is_global_zone; then
check_dataset datasetexists $CTR $VOL
check_dataset datasetnonexists $VOLSNAP $VOLCLONE
# Due to recusive destroy being a best-effort operation,
# all of the non-busy datasets bellow should be gone now.
# Due to recursive destroy being a best-effort operation,
# all of the non-busy datasets below should be gone now.
check_dataset datasetnonexists $FS $FSSNAP $FSCLONE
fi

View File

@ -24,7 +24,7 @@
#
# DESCRIPTION:
# 'zfs destroy -R <snapshot>' can destroy all the child
# snapshots and preserves all the nested datasetss.
# snapshots and preserves all the nested datasets.
#
# STRATEGY:
# 1. Create nested datasets in the storage pool.
@ -57,7 +57,7 @@ for ds in $datasets; do
datasetexists $ds || log_fail "Create $ds dataset fail."
done
# create recursive nestedd snapshot
# create recursive nested snapshot
log_must zfs snapshot -r $TESTPOOL/$TESTFS1@snap
for ds in $datasets; do
datasetexists $ds@snap || log_fail "Create $ds@snap snapshot fail."