mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 19:19:32 +03:00
Add zfs-test facility to automatically rerun failing tests
This was a project proposed as part of the Quality theme for the hackthon for the 2021 OpenZFS Developer Summit. The idea is to improve the usability of the automated tests that get run when a PR is created by having failing tests automatically rerun in order to make flaky tests less impactful. Reviewed-by: John Kennedy <john.kennedy@delphix.com> Reviewed-by: Tony Nguyen <tony.nguyen@delphix.com> Signed-off-by: Paul Dagnelie <pcd@delphix.com> Closes #12740
This commit is contained in:
parent
861dca065e
commit
2320e6eb43
2
.github/workflows/zfs-tests-functional.yml
vendored
2
.github/workflows/zfs-tests-functional.yml
vendored
@ -64,7 +64,7 @@ jobs:
|
|||||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: |
|
run: |
|
||||||
/usr/share/zfs/zfs-tests.sh -v -s 3G
|
/usr/share/zfs/zfs-tests.sh -vR -s 3G
|
||||||
- name: Prepare artifacts
|
- name: Prepare artifacts
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
|
2
.github/workflows/zfs-tests-sanity.yml
vendored
2
.github/workflows/zfs-tests-sanity.yml
vendored
@ -60,7 +60,7 @@ jobs:
|
|||||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: |
|
run: |
|
||||||
/usr/share/zfs/zfs-tests.sh -v -s 3G -r sanity
|
/usr/share/zfs/zfs-tests.sh -vR -s 3G -r sanity
|
||||||
- name: Prepare artifacts
|
- name: Prepare artifacts
|
||||||
if: failure()
|
if: failure()
|
||||||
run: |
|
run: |
|
||||||
|
@ -21,6 +21,10 @@
|
|||||||
# CDDL HEADER END
|
# CDDL HEADER END
|
||||||
#
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2020 OmniOS Community Edition (OmniOSce) Association.
|
||||||
|
#
|
||||||
|
|
||||||
BASE_DIR=$(dirname "$0")
|
BASE_DIR=$(dirname "$0")
|
||||||
SCRIPT_COMMON=common.sh
|
SCRIPT_COMMON=common.sh
|
||||||
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
|
if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then
|
||||||
@ -48,6 +52,7 @@ ITERATIONS=1
|
|||||||
ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
|
ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh"
|
||||||
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
|
ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh"
|
||||||
UNAME=$(uname -s)
|
UNAME=$(uname -s)
|
||||||
|
RERUN=""
|
||||||
|
|
||||||
# Override some defaults if on FreeBSD
|
# Override some defaults if on FreeBSD
|
||||||
if [ "$UNAME" = "FreeBSD" ] ; then
|
if [ "$UNAME" = "FreeBSD" ] ; then
|
||||||
@ -322,6 +327,7 @@ OPTIONS:
|
|||||||
-f Use files only, disables block device tests
|
-f Use files only, disables block device tests
|
||||||
-S Enable stack tracer (negative performance impact)
|
-S Enable stack tracer (negative performance impact)
|
||||||
-c Only create and populate constrained path
|
-c Only create and populate constrained path
|
||||||
|
-R Automatically rerun failing tests
|
||||||
-n NFSFILE Use the nfsfile to determine the NFS configuration
|
-n NFSFILE Use the nfsfile to determine the NFS configuration
|
||||||
-I NUM Number of iterations
|
-I NUM Number of iterations
|
||||||
-d DIR Use DIR for files and loopback devices
|
-d DIR Use DIR for files and loopback devices
|
||||||
@ -348,7 +354,7 @@ $0 -x
|
|||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do
|
while getopts 'hvqxkfScRn:d:s:r:?t:T:u:I:' OPTION; do
|
||||||
case $OPTION in
|
case $OPTION in
|
||||||
h)
|
h)
|
||||||
usage
|
usage
|
||||||
@ -376,6 +382,9 @@ while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do
|
|||||||
constrain_path
|
constrain_path
|
||||||
exit
|
exit
|
||||||
;;
|
;;
|
||||||
|
R)
|
||||||
|
RERUN="yes"
|
||||||
|
;;
|
||||||
n)
|
n)
|
||||||
nfsfile=$OPTARG
|
nfsfile=$OPTARG
|
||||||
[ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile"
|
[ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile"
|
||||||
@ -694,12 +703,35 @@ ${TEST_RUNNER} ${QUIET:+-q} \
|
|||||||
-i "${STF_SUITE}" \
|
-i "${STF_SUITE}" \
|
||||||
-I "${ITERATIONS}" \
|
-I "${ITERATIONS}" \
|
||||||
2>&1 | tee "$RESULTS_FILE"
|
2>&1 | tee "$RESULTS_FILE"
|
||||||
|
|
||||||
#
|
#
|
||||||
# Analyze the results.
|
# Analyze the results.
|
||||||
#
|
#
|
||||||
${ZTS_REPORT} "$RESULTS_FILE" >"$REPORT_FILE"
|
${ZTS_REPORT} ${RERUN:+--no-maybes} "$RESULTS_FILE" >"$REPORT_FILE"
|
||||||
RESULT=$?
|
RESULT=$?
|
||||||
|
|
||||||
|
if [ "$RESULT" -eq "2" ] && [ -n "$RERUN" ]; then
|
||||||
|
MAYBES="$($ZTS_REPORT --list-maybes)"
|
||||||
|
TEMP_RESULTS_FILE=$(mktemp -u -t zts-results-tmp.XXXXX -p "$FILEDIR")
|
||||||
|
TEST_LIST=$(mktemp -u -t test-list.XXXXX -p "$FILEDIR")
|
||||||
|
grep "^Test:.*\[FAIL\]" "$RESULTS_FILE" >"$TEMP_RESULTS_FILE"
|
||||||
|
for test_name in $MAYBES; do
|
||||||
|
grep "$test_name " "$TEMP_RESULTS_FILE" >>"$TEST_LIST"
|
||||||
|
done
|
||||||
|
${TEST_RUNNER} ${QUIET:+-q} \
|
||||||
|
-c "${RUNFILES}" \
|
||||||
|
-T "${TAGS}" \
|
||||||
|
-i "${STF_SUITE}" \
|
||||||
|
-I "${ITERATIONS}" \
|
||||||
|
-l "${TEST_LIST}" \
|
||||||
|
2>&1 | tee "$RESULTS_FILE"
|
||||||
|
#
|
||||||
|
# Analyze the results.
|
||||||
|
#
|
||||||
|
${ZTS_REPORT} --no-maybes "$RESULTS_FILE" >"$REPORT_FILE"
|
||||||
|
RESULT=$?
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
cat "$REPORT_FILE"
|
cat "$REPORT_FILE"
|
||||||
|
|
||||||
RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
|
RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE")
|
||||||
|
@ -27,6 +27,7 @@ except ImportError:
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import ctypes
|
import ctypes
|
||||||
|
import re
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
@ -495,6 +496,9 @@ Tags: %s
|
|||||||
self.timeout, self.user, self.pre, pre_user, self.post, post_user,
|
self.timeout, self.user, self.pre, pre_user, self.post, post_user,
|
||||||
self.failsafe, failsafe_user, self.tags)
|
self.failsafe, failsafe_user, self.tags)
|
||||||
|
|
||||||
|
def filter(self, keeplist):
|
||||||
|
self.tests = [x for x in self.tests if x in keeplist]
|
||||||
|
|
||||||
def verify(self):
|
def verify(self):
|
||||||
"""
|
"""
|
||||||
Check the pre/post/failsafe scripts, user and tests in this TestGroup.
|
Check the pre/post/failsafe scripts, user and tests in this TestGroup.
|
||||||
@ -656,6 +660,24 @@ class TestRun(object):
|
|||||||
|
|
||||||
testgroup.verify()
|
testgroup.verify()
|
||||||
|
|
||||||
|
def filter(self, keeplist):
|
||||||
|
for group in list(self.testgroups.keys()):
|
||||||
|
if group not in keeplist:
|
||||||
|
del self.testgroups[group]
|
||||||
|
continue
|
||||||
|
|
||||||
|
g = self.testgroups[group]
|
||||||
|
|
||||||
|
if g.pre and os.path.basename(g.pre) in keeplist[group]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
g.filter(keeplist[group])
|
||||||
|
|
||||||
|
for test in list(self.tests.keys()):
|
||||||
|
directory, base = os.path.split(test)
|
||||||
|
if directory not in keeplist or base not in keeplist[directory]:
|
||||||
|
del self.tests[test]
|
||||||
|
|
||||||
def read(self, options):
|
def read(self, options):
|
||||||
"""
|
"""
|
||||||
Read in the specified runfiles, and apply the TestRun properties
|
Read in the specified runfiles, and apply the TestRun properties
|
||||||
@ -743,10 +765,18 @@ class TestRun(object):
|
|||||||
|
|
||||||
for test in sorted(self.tests.keys()):
|
for test in sorted(self.tests.keys()):
|
||||||
config.add_section(test)
|
config.add_section(test)
|
||||||
|
for prop in Test.props:
|
||||||
|
if prop not in self.props:
|
||||||
|
config.set(test, prop,
|
||||||
|
getattr(self.tests[test], prop))
|
||||||
|
|
||||||
for testgroup in sorted(self.testgroups.keys()):
|
for testgroup in sorted(self.testgroups.keys()):
|
||||||
config.add_section(testgroup)
|
config.add_section(testgroup)
|
||||||
config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
|
config.set(testgroup, 'tests', self.testgroups[testgroup].tests)
|
||||||
|
for prop in TestGroup.props:
|
||||||
|
if prop not in self.props:
|
||||||
|
config.set(testgroup, prop,
|
||||||
|
getattr(self.testgroups[testgroup], prop))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(options.template, 'w') as f:
|
with open(options.template, 'w') as f:
|
||||||
@ -796,7 +826,7 @@ class TestRun(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
global LOG_FILE_OBJ
|
global LOG_FILE_OBJ
|
||||||
if options.cmd != 'wrconfig':
|
if not options.template:
|
||||||
try:
|
try:
|
||||||
old = os.umask(0)
|
old = os.umask(0)
|
||||||
os.makedirs(self.outputdir, mode=0o777)
|
os.makedirs(self.outputdir, mode=0o777)
|
||||||
@ -939,17 +969,37 @@ def find_tests(testrun, options):
|
|||||||
testrun.addtest(p, options)
|
testrun.addtest(p, options)
|
||||||
|
|
||||||
|
|
||||||
|
def filter_tests(testrun, options):
|
||||||
|
try:
|
||||||
|
fh = open(options.logfile, "r")
|
||||||
|
except Exception as e:
|
||||||
|
fail('%s' % e)
|
||||||
|
|
||||||
|
failed = {}
|
||||||
|
while True:
|
||||||
|
line = fh.readline()
|
||||||
|
if not line:
|
||||||
|
break
|
||||||
|
m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line)
|
||||||
|
if not m:
|
||||||
|
continue
|
||||||
|
group, test = m.group(1, 2)
|
||||||
|
try:
|
||||||
|
failed[group].append(test)
|
||||||
|
except KeyError:
|
||||||
|
failed[group] = [test]
|
||||||
|
fh.close()
|
||||||
|
|
||||||
|
testrun.filter(failed)
|
||||||
|
|
||||||
|
|
||||||
def fail(retstr, ret=1):
|
def fail(retstr, ret=1):
|
||||||
print('%s: %s' % (sys.argv[0], retstr))
|
print('%s: %s' % (sys.argv[0], retstr))
|
||||||
exit(ret)
|
exit(ret)
|
||||||
|
|
||||||
|
|
||||||
def options_cb(option, opt_str, value, parser):
|
def options_cb(option, opt_str, value, parser):
|
||||||
path_options = ['outputdir', 'template', 'testdir']
|
path_options = ['outputdir', 'template', 'testdir', 'logfile']
|
||||||
|
|
||||||
if option.dest == 'runfiles' and '-w' in parser.rargs or \
|
|
||||||
option.dest == 'template' and '-c' in parser.rargs:
|
|
||||||
fail('-c and -w are mutually exclusive.')
|
|
||||||
|
|
||||||
if opt_str in parser.rargs:
|
if opt_str in parser.rargs:
|
||||||
fail('%s may only be specified once.' % opt_str)
|
fail('%s may only be specified once.' % opt_str)
|
||||||
@ -957,8 +1007,6 @@ def options_cb(option, opt_str, value, parser):
|
|||||||
if option.dest == 'runfiles':
|
if option.dest == 'runfiles':
|
||||||
parser.values.cmd = 'rdconfig'
|
parser.values.cmd = 'rdconfig'
|
||||||
value = set(os.path.abspath(p) for p in value.split(','))
|
value = set(os.path.abspath(p) for p in value.split(','))
|
||||||
if option.dest == 'template':
|
|
||||||
parser.values.cmd = 'wrconfig'
|
|
||||||
if option.dest == 'tags':
|
if option.dest == 'tags':
|
||||||
value = [x.strip() for x in value.split(',')]
|
value = [x.strip() for x in value.split(',')]
|
||||||
|
|
||||||
@ -975,6 +1023,10 @@ def parse_args():
|
|||||||
help='Specify tests to run via config files.')
|
help='Specify tests to run via config files.')
|
||||||
parser.add_option('-d', action='store_true', default=False, dest='dryrun',
|
parser.add_option('-d', action='store_true', default=False, dest='dryrun',
|
||||||
help='Dry run. Print tests, but take no other action.')
|
help='Dry run. Print tests, but take no other action.')
|
||||||
|
parser.add_option('-l', action='callback', callback=options_cb,
|
||||||
|
default=None, dest='logfile', metavar='logfile',
|
||||||
|
type='string',
|
||||||
|
help='Read logfile and re-run tests which failed.')
|
||||||
parser.add_option('-g', action='store_true', default=False,
|
parser.add_option('-g', action='store_true', default=False,
|
||||||
dest='do_groups', help='Make directories TestGroups.')
|
dest='do_groups', help='Make directories TestGroups.')
|
||||||
parser.add_option('-o', action='callback', callback=options_cb,
|
parser.add_option('-o', action='callback', callback=options_cb,
|
||||||
@ -1021,9 +1073,6 @@ def parse_args():
|
|||||||
help='Number of times to run the test run.')
|
help='Number of times to run the test run.')
|
||||||
(options, pathnames) = parser.parse_args()
|
(options, pathnames) = parser.parse_args()
|
||||||
|
|
||||||
if not options.runfiles and not options.template:
|
|
||||||
options.cmd = 'runtests'
|
|
||||||
|
|
||||||
if options.runfiles and len(pathnames):
|
if options.runfiles and len(pathnames):
|
||||||
fail('Extraneous arguments.')
|
fail('Extraneous arguments.')
|
||||||
|
|
||||||
@ -1034,18 +1083,20 @@ def parse_args():
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
options = parse_args()
|
options = parse_args()
|
||||||
|
|
||||||
testrun = TestRun(options)
|
testrun = TestRun(options)
|
||||||
|
|
||||||
if options.cmd == 'runtests':
|
if options.runfiles:
|
||||||
find_tests(testrun, options)
|
|
||||||
elif options.cmd == 'rdconfig':
|
|
||||||
testrun.read(options)
|
testrun.read(options)
|
||||||
elif options.cmd == 'wrconfig':
|
else:
|
||||||
find_tests(testrun, options)
|
find_tests(testrun, options)
|
||||||
|
|
||||||
|
if options.logfile:
|
||||||
|
filter_tests(testrun, options)
|
||||||
|
|
||||||
|
if options.template:
|
||||||
testrun.write(options)
|
testrun.write(options)
|
||||||
exit(0)
|
exit(0)
|
||||||
else:
|
|
||||||
fail('Unknown command specified')
|
|
||||||
|
|
||||||
testrun.complete_outputdirs()
|
testrun.complete_outputdirs()
|
||||||
testrun.run(options)
|
testrun.run(options)
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
import argparse
|
||||||
|
|
||||||
#
|
#
|
||||||
# This script parses the stdout of zfstest, which has this format:
|
# This script parses the stdout of zfstest, which has this format:
|
||||||
@ -381,10 +382,33 @@ def process_results(pathname):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
class ListMaybesAction(argparse.Action):
|
||||||
|
def __init__(self,
|
||||||
|
option_strings,
|
||||||
|
dest="SUPPRESS",
|
||||||
|
default="SUPPRESS",
|
||||||
|
help="list flaky tests and exit"):
|
||||||
|
super(ListMaybesAction, self).__init__(
|
||||||
|
option_strings=option_strings,
|
||||||
|
dest=dest,
|
||||||
|
default=default,
|
||||||
|
nargs=0,
|
||||||
|
help=help)
|
||||||
|
|
||||||
|
def __call__(self, parser, namespace, values, option_string=None):
|
||||||
|
for test in maybe:
|
||||||
|
print(test)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if len(sys.argv) != 2:
|
parser = argparse.ArgumentParser(description='Analyze ZTS logs')
|
||||||
usage('usage: %s <pathname>' % sys.argv[0])
|
parser.add_argument('logfile')
|
||||||
results = process_results(sys.argv[1])
|
parser.add_argument('--list-maybes', action=ListMaybesAction)
|
||||||
|
parser.add_argument('--no-maybes', action='store_false', dest='maybes')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
results = process_results(args.logfile)
|
||||||
|
|
||||||
if summary['total'] == 0:
|
if summary['total'] == 0:
|
||||||
print("\n\nNo test results were found.")
|
print("\n\nNo test results were found.")
|
||||||
@ -393,6 +417,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
expected = []
|
expected = []
|
||||||
unexpected = []
|
unexpected = []
|
||||||
|
all_maybes = True
|
||||||
|
|
||||||
for test in list(results.keys()):
|
for test in list(results.keys()):
|
||||||
if results[test] == "PASS":
|
if results[test] == "PASS":
|
||||||
@ -405,11 +430,16 @@ if __name__ == "__main__":
|
|||||||
if setup in maybe and maybe[setup][0] == "SKIP":
|
if setup in maybe and maybe[setup][0] == "SKIP":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if ((test not in known or results[test] not in known[test][0]) and
|
if (test in known and results[test] in known[test][0]):
|
||||||
(test not in maybe or results[test] not in maybe[test][0])):
|
|
||||||
unexpected.append(test)
|
|
||||||
else:
|
|
||||||
expected.append(test)
|
expected.append(test)
|
||||||
|
elif test in maybe and results[test] in maybe[test][0]:
|
||||||
|
if results[test] == 'SKIP' or args.maybes:
|
||||||
|
expected.append(test)
|
||||||
|
elif not args.maybes:
|
||||||
|
unexpected.append(test)
|
||||||
|
else:
|
||||||
|
unexpected.append(test)
|
||||||
|
all_maybes = False
|
||||||
|
|
||||||
print("\nTests with results other than PASS that are expected:")
|
print("\nTests with results other than PASS that are expected:")
|
||||||
for test in sorted(expected):
|
for test in sorted(expected):
|
||||||
@ -455,5 +485,7 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
if len(unexpected) == 0:
|
if len(unexpected) == 0:
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
elif not args.maybes and all_maybes:
|
||||||
|
sys.exit(2)
|
||||||
else:
|
else:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
Loading…
Reference in New Issue
Block a user