tests: clean out more temporary files

What remains is a bunch of anonymous untraceable /tmp/tmp.XXXXXXXXXX
files and bak.root.receive.staff1.3835 from an error branch, testdir.1,
testdir.3, and testroot454470 (with children) in testroot

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #13259
This commit is contained in:
наб 2022-03-26 11:19:17 +01:00 committed by Brian Behlendorf
parent 22ddc7f5ff
commit 6ef2151c80
18 changed files with 47 additions and 47 deletions

View File

@ -1903,6 +1903,8 @@ class ZFSTest(unittest.TestCase):
with self.assertRaises(lzc_exc.StreamIOError) as ctx:
lzc.lzc_send(snap, None, fd)
os.close(fd)
os.unlink(output.name)
self.assertEqual(ctx.exception.errno, errno.EBADF)
def test_recv_full(self):

View File

@ -747,7 +747,7 @@ if [ -d "$RESULTS_DIR" ]; then
cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results"
fi
rm -f "$RESULTS_FILE" "$REPORT_FILE"
rm -f "$RESULTS_FILE" "$REPORT_FILE" "$TEST_LIST" "$TEMP_RESULTS_FILE"
if [ -n "$SINGLETEST" ]; then
rm -f "$RUNFILES" >/dev/null 2>&1

View File

@ -145,7 +145,6 @@ function histo_check_test_pool
typeset -i this_rs
typeset -i this_ri
typeset -i sum_filesizes=0
typeset stripped
let histo_check_pool_size=$(get_pool_prop size ${pool})
if [[ ! ${histo_check_pool_size} =~ ${re_number} ]]; then
@ -243,6 +242,8 @@ function histo_check_test_pool
fi
fi
done < ${stripped}
rm "${stripped}"
if [ ${fail_value} -gt 0 ]; then
if [ ${error_count} -eq 1 ]; then
log_note "hctp: There was ${error_count} error"

View File

@ -62,6 +62,8 @@ function cleanup
bkmarkexists "$DATASET#$TESTBMCOPY" && \
destroy_dataset "$DATASET#$TESTBMCOPY"
log_must rm -f "$TEST_BASE_DIR/zfstest_datastream.$$"
}
log_assert "'zfs bookmark' should work only when passed valid arguments."

View File

@ -49,6 +49,7 @@ verify_runnable "both"
function cleanup
{
snapexists $SNAPFS && destroy_dataset $SNAPFS -Rf
log_must rm -df "/tmp/mnt$$"
}
log_onexit cleanup

View File

@ -49,24 +49,23 @@ verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && destroy_dataset $TESTPOOL/$TESTFS1 -f
log_must rm -df "/tmp/mnt$$"
}
log_onexit cleanup
log_assert "'zfs create -o property=value filesystem' can successfully create \
a ZFS filesystem with correct property set."
log_assert "'zfs create -o property=value filesystem' can successfully create" \
"a ZFS filesystem with correct property set."
typeset -i i=0
while (( $i < ${#RW_FS_PROP[*]} )); do
log_must zfs create -o ${RW_FS_PROP[$i]} $TESTPOOL/$TESTFS1
datasetexists $TESTPOOL/$TESTFS1 || \
log_fail "zfs create $TESTPOOL/$TESTFS1 fail."
propertycheck $TESTPOOL/$TESTFS1 ${RW_FS_PROP[i]} || \
log_fail "${RW_FS_PROP[i]} is failed to set."
log_must datasetexists $TESTPOOL/$TESTFS1
log_must propertycheck $TESTPOOL/$TESTFS1 ${RW_FS_PROP[i]}
log_must_busy zfs destroy -f $TESTPOOL/$TESTFS1
(( i = i + 1 ))
done
log_pass "'zfs create -o property=value filesystem' can successfully create \
a ZFS filesystem with correct property set."
log_pass "'zfs create -o property=value filesystem' can successfully create" \
"a ZFS filesystem with correct property set."

View File

@ -48,15 +48,16 @@ verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/$TESTFS1 && \
datasetexists $TESTPOOL/$TESTFS1 &&
destroy_dataset $TESTPOOL/$TESTFS1 -f
log_must rm -df "/tmp/mnt$$"
}
log_onexit cleanup
log_assert "'zfs create -o property=value filesystem' can successfully create \
a ZFS filesystem with multiple properties set."
log_assert "'zfs create -o property=value filesystem' can successfully create" \
"a ZFS filesystem with multiple properties set."
typeset -i i=0
typeset opts=""
@ -69,17 +70,15 @@ while (( $i < ${#RW_FS_PROP[*]} )); do
done
log_must zfs create $opts $TESTPOOL/$TESTFS1
datasetexists $TESTPOOL/$TESTFS1 || \
log_fail "zfs create $TESTPOOL/$TESTFS1 fail."
log_must datasetexists $TESTPOOL/$TESTFS1
i=0
while (( $i < ${#RW_FS_PROP[*]} )); do
if [[ ${RW_FS_PROP[$i]} != *"checksum"* ]]; then
propertycheck $TESTPOOL/$TESTFS1 ${RW_FS_PROP[i]} || \
log_fail "${RW_FS_PROP[i]} is failed to set."
log_must propertycheck $TESTPOOL/$TESTFS1 ${RW_FS_PROP[i]}
fi
(( i = i + 1 ))
done
log_pass "'zfs create -o property=value filesystem' can successfully create \
a ZFS filesystem with multiple properties set."
log_pass "'zfs create -o property=value filesystem' can successfully create" \
"a ZFS filesystem with multiple properties set."

View File

@ -49,12 +49,13 @@ verify_runnable "global"
function cleanup
{
datasetexists $TESTPOOL && destroy_pool $TESTPOOL
log_must rm -df "/tmp/mnt$$"
}
log_onexit cleanup
log_assert "'zpool create -O property=value pool' can successfully create a pool \
with correct filesystem property set."
log_assert "'zpool create -O property=value pool' can successfully create a pool" \
"with correct filesystem property set."
set -A RW_FS_PROP "quota=536870912" \
"reservation=536870912" \
@ -80,14 +81,11 @@ fi
typeset -i i=0
while (( $i < ${#RW_FS_PROP[*]} )); do
log_must zpool create -O ${RW_FS_PROP[$i]} -f $TESTPOOL $DISKS
datasetexists $TESTPOOL || \
log_fail "zpool create $TESTPOOL fail."
propertycheck $TESTPOOL ${RW_FS_PROP[i]} || \
log_fail "${RW_FS_PROP[i]} is failed to set."
log_must datasetexists $TESTPOOL
log_must propertycheck $TESTPOOL ${RW_FS_PROP[i]}
log_must zpool destroy $TESTPOOL
(( i = i + 1 ))
done
log_pass "'zpool create -O property=value pool' can successfully create a pool \
with correct filesystem property set."
log_pass "'zpool create -O property=value pool' can successfully create a pool" \
"with correct filesystem property set."

View File

@ -49,12 +49,13 @@ verify_runnable "global"
function cleanup
{
poolexists $TESTPOOL && destroy_pool $TESTPOOL
log_must rm -df "/tmp/mnt$$"
}
log_onexit cleanup
log_assert "'zpool create -O property=value pool' can successfully create a pool \
with multiple filesystem properties set."
log_assert "'zpool create -O property=value pool' can successfully create a pool" \
"with multiple filesystem properties set."
set -A RW_FS_PROP "quota=536870912" \
"reservation=536870912" \
@ -81,15 +82,13 @@ while (( $i < ${#RW_FS_PROP[*]} )); do
done
log_must zpool create $opts -f $TESTPOOL $DISKS
datasetexists $TESTPOOL || log_fail "zpool create $TESTPOOL fail."
log_must datasetexists $TESTPOOL
i=0
while (( $i < ${#RW_FS_PROP[*]} )); do
propertycheck $TESTPOOL ${RW_FS_PROP[i]} || \
log_fail "${RW_FS_PROP[i]} is failed to set."
log_must propertycheck $TESTPOOL ${RW_FS_PROP[i]}
(( i = i + 1 ))
done
log_pass "'zpool create -O property=value pool' can successfully create a pool \
with multiple filesystem properties set."
log_pass "'zpool create -O property=value pool' can successfully create a pool" \
"with multiple filesystem properties set."

View File

@ -64,7 +64,7 @@ function cleanup
if poolexists $POOL ; then
zpool export $POOL
fi
log_must rm -f $VDEV1 $VDEV2 $VDEV3
log_must rm -fd $VDEV1 $VDEV2 $VDEV3 $SUPPLY $MOUNTDIR
}
function damage_and_repair
@ -130,4 +130,3 @@ else
log_note observed $errcnt new checksum errors after a scrub
log_pass "$assertion"
fi

View File

@ -56,7 +56,7 @@ function cleanup
if poolexists $POOL ; then
destroy_pool $POOL
fi
log_must rm -f $VDEV1 $VDEV2
log_must rm -fd $VDEV1 $VDEV2 $MOUNTDIR
}
log_assert "Duplicate I/O ereport errors are not posted"

View File

@ -59,7 +59,7 @@ function cleanup
if poolexists $POOL ; then
log_must destroy_pool $POOL
fi
log_must rm -f $VDEV1 $VDEV2 $VDEV3
log_must rm -fd $VDEV1 $VDEV2 $VDEV3 $MOUNTDIR
}
log_assert "Check that the number of zpool errors match the number of events"

View File

@ -33,7 +33,7 @@ function cleanup
{
destroy_pool $TESTPOOL
destroy_pool $TESTPOOL2
rm -f $FILEDEV_PREFIX*
rm -fd $FILEDEV_PREFIX* $altroot
}
function setup_mirror # <conf>

View File

@ -43,7 +43,7 @@ function cleanup
if poolexists $TESTPOOL2 ; then
destroy_pool $TESTPOOL2
fi
rm -f $VDEV_TEMP $VDEV_M1 $VDEV_M2
rm -fd $VDEV_TEMP $VDEV_M1 $VDEV_M2 $altroot
}
log_onexit cleanup
@ -64,6 +64,6 @@ log_must zpool remove $TESTPOOL $VDEV_TEMP
log_must wait_for_removal $TESTPOOL
log_must zpool split -R $altroot $TESTPOOL $TESTPOOL2
log_must poolexists $TESTPOOL2
log_must test "$(get_pool_prop 'altroot' $TESTPOOL2)" == "$altroot"
log_must test "$(get_pool_prop 'altroot' $TESTPOOL2)" = "$altroot"
log_pass "'zpool split' works on pools with indirect VDEVs."

View File

@ -33,7 +33,7 @@ function cleanup
{
destroy_pool $TESTPOOL
destroy_pool $TESTPOOL2
rm -f $FILEDEV_PREFIX*
rm -fd $FILEDEV_PREFIX* $altroot
}
#

View File

@ -44,7 +44,7 @@ function cleanup
destroy_pool $TESTPOOL
destroy_pool $TESTPOOL2
unload_scsi_debug
rm -f "$FILE_DEVICE"
rm -fd "$FILE_DEVICE" "$ALTROOT"
}
function setup_mirror

View File

@ -31,7 +31,7 @@
. $STF_SUITE/tests/functional/xattr/xattr_common.kshlib
USES_NIS=$(<$TEST_BASE_DIR/zfs-xattr-test-nis.txt)
rm $TEST_BASE_DIR/zfs-xattr-test-nis.txt
rm $TEST_BASE_DIR/zfs-xattr-test-nis.txt $TEST_BASE_DIR/zfs-xattr-test-user.txt
if [ "${USES_NIS}" == "true" ]
then

View File

@ -50,8 +50,8 @@ verify_runnable "global"
function cleanup {
if ismounted /tmp/$NEWFS_DEFAULT_FS.$$ $NEWFS_DEFAULT_FS; then
log_must umount /tmp/$NEWFS_DEFAULT_FS.$$
log_must rm -rf /tmp/$NEWFS_DEFAULT_FS.$$
fi
log_must rm -rf /tmp/$NEWFS_DEFAULT_FS.$$
}
log_assert "Files from $NEWFS_DEFAULT_FS with xattrs copied to zfs retain xattr info."