2018-06-21 00:03:13 +03:00
|
|
|
#!/usr/bin/python
|
|
|
|
|
|
|
|
#
|
|
|
|
# This file and its contents are supplied under the terms of the
|
|
|
|
# Common Development and Distribution License ("CDDL"), version 1.0.
|
|
|
|
# You may only use this file in accordance with the terms of version
|
|
|
|
# 1.0 of the CDDL.
|
|
|
|
#
|
|
|
|
# A full copy of the text of the CDDL should have accompanied this
|
|
|
|
# source. A copy of the CDDL is also available via the Internet at
|
|
|
|
# http://www.illumos.org/license/CDDL.
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
# Copyright (c) 2017 by Delphix. All rights reserved.
|
|
|
|
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
|
|
|
|
#
|
|
|
|
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import sys
|
|
|
|
|
|
|
|
#
|
|
|
|
# This script parses the stdout of zfstest, which has this format:
|
|
|
|
#
|
|
|
|
# Test: /path/to/testa (run as root) [00:00] [PASS]
|
|
|
|
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
|
|
|
|
# Test: /path/to/testc (run as root) [00:00] [FAIL]
|
|
|
|
# [...many more results...]
|
|
|
|
#
|
|
|
|
# Results Summary
|
|
|
|
# FAIL 22
|
|
|
|
# SKIP 32
|
|
|
|
# PASS 1156
|
|
|
|
#
|
|
|
|
# Running Time: 02:50:31
|
|
|
|
# Percent passed: 95.5%
|
|
|
|
# Log directory: /var/tmp/test_results/20180615T205926
|
|
|
|
#
|
|
|
|
|
|
|
|
#
|
|
|
|
# Common generic reasons for a test or test group to be skipped.
|
|
|
|
#
|
|
|
|
# Some test cases are known to fail in ways which are not harmful or dangerous.
|
|
|
|
# In these cases simply mark the test as a known failure until it can be
|
|
|
|
# updated and the issue resolved. Note that it's preferable to open a unique
|
|
|
|
# issue on the GitHub issue tracker for each test case failure.
|
|
|
|
#
|
|
|
|
known_reason = 'Known issue'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some tests require that a test user be able to execute the zfs utilities.
|
|
|
|
# This may not be possible when testing in-tree due to the default permissions
|
|
|
|
# on the user's home directory. When testing this can be resolved by granting
|
|
|
|
# group read access.
|
|
|
|
#
|
|
|
|
# chmod 0750 $HOME
|
|
|
|
#
|
|
|
|
exec_reason = 'Test user execute permissions required for utilities'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some tests require that the DISKS provided can be partitioned. This is
|
|
|
|
# normally not an issue because loop back devices are used for DISKS and they
|
|
|
|
# can be partition. There is one notable exception, the CentOS 6.x kernel is
|
|
|
|
# old enough that it does not support partitioning loop back devices.
|
|
|
|
#
|
|
|
|
disk_reason = 'Partitionable DISKS required'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some tests require a minimum python version of 3.5 and will be skipped when
|
|
|
|
# the default system version is too old. There may also be tests which require
|
|
|
|
# additional python modules be installed, for example python-cffi is required
|
|
|
|
# by the pyzfs tests.
|
|
|
|
#
|
|
|
|
python_reason = 'Python v3.5 or newer required'
|
|
|
|
python_deps_reason = 'Python modules missing: python-cffi'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some tests require the O_TMPFILE flag which was first introduced in the
|
|
|
|
# 3.11 kernel.
|
|
|
|
#
|
|
|
|
tmpfile_reason = 'Kernel O_TMPFILE support required'
|
|
|
|
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
#
|
|
|
|
# Some tests may depend on udev change events being generated when block
|
|
|
|
# devices change capacity. This functionality wasn't available until the
|
|
|
|
# 2.6.38 kernel.
|
|
|
|
#
|
|
|
|
udev_reason = 'Kernel block device udev change events required'
|
|
|
|
|
2018-06-21 00:03:13 +03:00
|
|
|
#
|
|
|
|
# Some tests require that the NFS client and server utilities be installed.
|
|
|
|
#
|
|
|
|
share_reason = 'NFS client and server utilities required'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some tests require that the lsattr utility support the project id feature.
|
|
|
|
#
|
|
|
|
project_id_reason = 'lsattr with set/show project ID required'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some tests require that the kernel support user namespaces.
|
|
|
|
#
|
|
|
|
user_ns_reason = 'Kernel user namespace support required'
|
|
|
|
|
|
|
|
#
|
|
|
|
# Some rewind tests can fail since nothing guarantees that old MOS blocks
|
|
|
|
# are not overwritten. Snapshots protect datasets and data files but not
|
|
|
|
# the MOS. Reasonable efforts are made in the test case to increase the
|
|
|
|
# odds that some txgs will have their MOS data left untouched, but it is
|
|
|
|
# never a sure thing.
|
|
|
|
#
|
|
|
|
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
|
|
|
|
|
2018-06-29 19:40:32 +03:00
|
|
|
#
|
|
|
|
# Some tests may by structured in a way that relies on exact knowledge
|
|
|
|
# of how much free space in available in a pool. These tests cannot be
|
|
|
|
# made completely reliable because the internal details of how free space
|
|
|
|
# is managed are not exposed to user space.
|
|
|
|
#
|
|
|
|
enospc_reason = 'Exact free space reporting is not guaranteed'
|
|
|
|
|
2018-08-27 20:04:21 +03:00
|
|
|
#
|
|
|
|
# Some tests require a minimum version of the fio benchmark utility.
|
|
|
|
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
|
|
|
|
#
|
|
|
|
fio_reason = 'Fio v2.3 or newer required'
|
|
|
|
|
2018-06-21 00:03:13 +03:00
|
|
|
#
|
|
|
|
# Some tests are not applicable to Linux or need to be updated to operate
|
|
|
|
# in the manor required by Linux. Any tests which are skipped for this
|
|
|
|
# reason will be suppressed in the final analysis output.
|
|
|
|
#
|
|
|
|
na_reason = "N/A on Linux"
|
|
|
|
|
|
|
|
summary = {
|
|
|
|
'total': float(0),
|
|
|
|
'passed': float(0),
|
|
|
|
'logfile': "Could not determine logfile location."
|
|
|
|
}
|
|
|
|
|
|
|
|
#
|
|
|
|
# These tests are known to fail, thus we use this list to prevent these
|
|
|
|
# failures from failing the job as a whole; only unexpected failures
|
|
|
|
# bubble up to cause this script to exit with a non-zero exit status.
|
|
|
|
#
|
|
|
|
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
|
|
|
|
#
|
|
|
|
# For each known failure it is recommended to link to a GitHub issue by
|
|
|
|
# setting the reason to the issue number. Alternately, one of the generic
|
|
|
|
# reasons listed above can be used.
|
|
|
|
#
|
|
|
|
known = {
|
|
|
|
'casenorm/sensitive_none_lookup': ['FAIL', '7633'],
|
|
|
|
'casenorm/sensitive_none_delete': ['FAIL', '7633'],
|
|
|
|
'casenorm/sensitive_formd_lookup': ['FAIL', '7633'],
|
|
|
|
'casenorm/sensitive_formd_delete': ['FAIL', '7633'],
|
|
|
|
'casenorm/insensitive_none_lookup': ['FAIL', '7633'],
|
|
|
|
'casenorm/insensitive_none_delete': ['FAIL', '7633'],
|
|
|
|
'casenorm/insensitive_formd_lookup': ['FAIL', '7633'],
|
|
|
|
'casenorm/insensitive_formd_delete': ['FAIL', '7633'],
|
|
|
|
'casenorm/mixed_none_lookup': ['FAIL', '7633'],
|
|
|
|
'casenorm/mixed_none_lookup_ci': ['FAIL', '7633'],
|
|
|
|
'casenorm/mixed_none_delete': ['FAIL', '7633'],
|
|
|
|
'casenorm/mixed_formd_lookup': ['FAIL', '7633'],
|
|
|
|
'casenorm/mixed_formd_lookup_ci': ['FAIL', '7633'],
|
|
|
|
'casenorm/mixed_formd_delete': ['FAIL', '7633'],
|
|
|
|
'cli_root/zfs_receive/zfs_receive_004_neg': ['FAIL', known_reason],
|
|
|
|
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
|
|
|
|
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
|
|
|
|
'cli_root/zpool_create/zpool_create_016_pos': ['SKIP', na_reason],
|
|
|
|
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
|
|
|
|
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
|
|
|
|
'inuse/inuse_001_pos': ['SKIP', na_reason],
|
|
|
|
'inuse/inuse_003_pos': ['SKIP', na_reason],
|
|
|
|
'inuse/inuse_006_pos': ['SKIP', na_reason],
|
|
|
|
'inuse/inuse_007_pos': ['SKIP', na_reason],
|
|
|
|
'privilege/setup': ['SKIP', na_reason],
|
|
|
|
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
|
|
|
|
'removal/removal_condense_export': ['SKIP', known_reason],
|
|
|
|
'removal/removal_with_zdb': ['SKIP', known_reason],
|
|
|
|
'rootpool/setup': ['SKIP', na_reason],
|
|
|
|
'rsend/rsend_008_pos': ['SKIP', '6066'],
|
Fix "zfs destroy" when "sharenfs=on" is used
When using "zfs destroy" on a dataset that is using "sharenfs=on" and
has been automatically exported (by libzfs), the dataset will not be
automatically unexported as it should be. This workflow appears to have
been broken by this commit: 3fd3e56cfd543d7d7a1bf502bfc0db6e24139668
In that change, the "zfs_unmount" function was modified to use the
"mnt.mnt_special" field when determining the mount point that is being
unmounted, rather than "mnt.mnt_mountp".
As a result, when "mntpt" is passed into "zfs_unshare_proto", it's value
is now the dataset name rather than the mountpoint. Thus, when this
value is used with the "is_shared" function (via "zfs_unshare_proto") it
will not find a match (since that function assumes it'll be passed the
mountpoint) and incorrectly reports that the dataset is not shared.
This can be easily reproduced with the following commands:
$ sudo zpool create tank xvdb
$ sudo zfs create -o sharenfs=on tank/fish
$ sudo zfs destroy tank/fish
$ sudo zfs list -r tank
NAME USED AVAIL REFER MOUNTPOINT
tank 97.5K 7.27G 24K /tank
$ sudo exportfs
/tank/fish <world>
$ sudo cat /etc/dfs/sharetab
/tank/fish - nfs rw,crossmnt
At this point, the "tank/fish" filesystem doesn't exist, but it's still
listed as exported when looking at "exportfs" and "/etc/dfs/sharetab".
Also note, this change brings us back in-sync with the illumos code, as
it pertains to this one line; on illumos, "mnt.mnt_mountp" is used.
Reviewed by: loli10K <ezomori.nozomu@gmail.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: George Wilson <george.wilson@delphix.com>
Signed-off-by: Prakash Surya <prakash.surya@delphix.com>
Issue #6143
Closes #7941
2018-09-21 18:47:42 +03:00
|
|
|
'snapshot/rollback_003_pos': ['SKIP', '6143'],
|
2018-06-21 00:03:13 +03:00
|
|
|
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
|
|
|
|
'xattr/xattr_008_pos': ['SKIP', na_reason],
|
|
|
|
'xattr/xattr_009_neg': ['SKIP', na_reason],
|
|
|
|
'xattr/xattr_010_neg': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_misc/zvol_misc_001_neg': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_misc/zvol_misc_003_neg': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_misc/zvol_misc_004_pos': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_misc/zvol_misc_005_neg': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_misc/zvol_misc_006_pos': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_swap/zvol_swap_003_pos': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_swap/zvol_swap_005_pos': ['SKIP', na_reason],
|
|
|
|
'zvol/zvol_swap/zvol_swap_006_pos': ['SKIP', na_reason],
|
|
|
|
}
|
|
|
|
|
|
|
|
#
|
|
|
|
# These tests may occasionally fail or be skipped. We want there failures
|
|
|
|
# to be reported but only unexpected failures should bubble up to cause
|
|
|
|
# this script to exit with a non-zero exit status.
|
|
|
|
#
|
|
|
|
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
|
|
|
|
#
|
|
|
|
# For each known failure it is recommended to link to a GitHub issue by
|
|
|
|
# setting the reason to the issue number. Alternately, one of the generic
|
|
|
|
# reasons listed above can be used.
|
|
|
|
#
|
|
|
|
maybe = {
|
|
|
|
'cache/setup': ['SKIP', disk_reason],
|
|
|
|
'cache/cache_010_neg': ['FAIL', known_reason],
|
|
|
|
'chattr/setup': ['SKIP', exec_reason],
|
|
|
|
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
|
|
|
|
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
|
|
|
|
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', '5479'],
|
|
|
|
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', '6415'],
|
|
|
|
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', '6416'],
|
|
|
|
'cli_root/zfs_share/setup': ['SKIP', share_reason],
|
|
|
|
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
|
|
|
|
'cli_root/zfs_unshare/setup': ['SKIP', share_reason],
|
|
|
|
'cli_root/zpool_add/setup': ['SKIP', disk_reason],
|
|
|
|
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
|
|
|
|
'cli_root/zpool_create/setup': ['SKIP', disk_reason],
|
|
|
|
'cli_root/zpool_create/zpool_create_008_pos': ['FAIL', known_reason],
|
|
|
|
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', '6145'],
|
Add support for autoexpand property
While the autoexpand property may seem like a small feature it
depends on a significant amount of system infrastructure. Enough
of that infrastructure is now in place that with a few modifications
for Linux it can be supported.
Auto-expand works as follows; when a block device is modified
(re-sized, closed after being open r/w, etc) a change uevent is
generated for udev. The ZED, which is monitoring udev events,
passes the change event along to zfs_deliver_dle() if the disk
or partition contains a zfs_member as identified by blkid.
From here the device is matched against all imported pool vdevs
using the vdev_guid which was read from the label by blkid. If
a match is found the ZED reopens the pool vdev. This re-opening
is important because it allows the vdev to be briefly closed so
the disk partition table can be re-read. Otherwise, it wouldn't
be possible to report the maximum possible expansion size.
Finally, if the property autoexpand=on a vdev expansion will be
attempted. After performing some sanity checks on the disk to
verify that it is safe to expand, the primary partition (-part1)
will be expanded and the partition table updated. The partition
is then re-opened (again) to detect the updated size which allows
the new capacity to be used.
In order to make all of the above possible the following changes
were required:
* Updated the zpool_expand_001_pos and zpool_expand_003_pos tests.
These tests now create a pool which is layered on a loopback,
scsi_debug, and file vdev. This allows for testing of non-
partitioned block device (loopback), a partition block device
(scsi_debug), and a file which does not receive udev change
events. This provided for better test coverage, and by removing
the layering on ZFS volumes there issues surrounding layering
one pool on another are avoided.
* zpool_find_vdev_by_physpath() updated to accept a vdev guid.
This allows for matching by guid rather than path which is a
more reliable way for the ZED to reference a vdev.
* Fixed zfs_zevent_wait() signal handling which could result
in the ZED spinning when a signal was not handled.
* Removed vdev_disk_rrpart() functionality which can be abandoned
in favor of kernel provided blkdev_reread_part() function.
* Added a rwlock which is held as a writer while a disk is being
reopened. This is important to prevent errors from occurring
for any configuration related IOs which bypass the SCL_ZIO lock.
The zpool_reopen_007_pos.ksh test case was added to verify IO
error are never observed when reopening. This is not expected
to impact IO performance.
Additional fixes which aren't critical but were discovered and
resolved in the course of developing this functionality.
* Added PHYS_PATH="/dev/zvol/dataset" to the vdev configuration for
ZFS volumes. This is as good as a unique physical path, while the
volumes are not used in the test cases anymore for other reasons
this improvement was included.
Reviewed by: Richard Elling <Richard.Elling@RichardElling.com>
Signed-off-by: Sara Hartse <sara.hartse@delphix.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #120
Closes #2437
Closes #5771
Closes #7366
Closes #7582
Closes #7629
2018-07-24 01:40:15 +03:00
|
|
|
'cli_root/zpool_expand/setup': ['SKIP', udev_reason],
|
2018-06-21 00:03:13 +03:00
|
|
|
'cli_root/zpool_export/setup': ['SKIP', disk_reason],
|
|
|
|
'cli_root/zpool_import/setup': ['SKIP', disk_reason],
|
|
|
|
'cli_root/zpool_import/import_rewind_device_replaced':
|
|
|
|
['FAIL', rewind_reason],
|
|
|
|
'cli_root/zpool_import/import_rewind_config_changed':
|
|
|
|
['FAIL', rewind_reason],
|
|
|
|
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', '6839'],
|
|
|
|
'cli_root/zpool_remove/setup': ['SKIP', disk_reason],
|
|
|
|
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', '6141'],
|
|
|
|
'cli_user/misc/arc_summary3_001_pos': ['SKIP', python_reason],
|
|
|
|
'delegate/setup': ['SKIP', exec_reason],
|
|
|
|
'fault/auto_online_001_pos': ['SKIP', disk_reason],
|
|
|
|
'fault/auto_replace_001_pos': ['SKIP', disk_reason],
|
|
|
|
'history/history_004_pos': ['FAIL', '7026'],
|
|
|
|
'history/history_005_neg': ['FAIL', '6680'],
|
|
|
|
'history/history_006_neg': ['FAIL', '5657'],
|
|
|
|
'history/history_008_pos': ['FAIL', known_reason],
|
|
|
|
'history/history_010_pos': ['SKIP', exec_reason],
|
|
|
|
'inuse/inuse_005_pos': ['SKIP', disk_reason],
|
|
|
|
'inuse/inuse_008_pos': ['SKIP', disk_reason],
|
|
|
|
'inuse/inuse_009_pos': ['SKIP', disk_reason],
|
2018-08-27 20:04:21 +03:00
|
|
|
'io/mmap': ['SKIP', fio_reason],
|
2018-06-21 00:03:13 +03:00
|
|
|
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
|
|
|
|
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
|
2018-06-29 19:40:32 +03:00
|
|
|
'no_space/enospc_002_pos': ['FAIL', enospc_reason],
|
2018-06-21 00:03:13 +03:00
|
|
|
'projectquota/setup': ['SKIP', exec_reason],
|
2018-08-12 19:38:53 +03:00
|
|
|
'redundancy/redundancy_004_neg': ['FAIL', '7290'],
|
2018-07-25 20:15:39 +03:00
|
|
|
'reservation/reservation_008_pos': ['FAIL', '7741'],
|
2018-06-21 00:03:13 +03:00
|
|
|
'reservation/reservation_018_pos': ['FAIL', '5642'],
|
|
|
|
'rsend/rsend_019_pos': ['FAIL', '6086'],
|
|
|
|
'rsend/rsend_020_pos': ['FAIL', '6446'],
|
|
|
|
'rsend/rsend_021_pos': ['FAIL', '6446'],
|
|
|
|
'rsend/rsend_024_pos': ['FAIL', '5665'],
|
|
|
|
'rsend/send-c_volume': ['FAIL', '6087'],
|
|
|
|
'snapshot/clone_001_pos': ['FAIL', known_reason],
|
2018-10-02 03:15:57 +03:00
|
|
|
'snapshot/snapshot_009_pos': ['FAIL', '7961'],
|
|
|
|
'snapshot/snapshot_010_pos': ['FAIL', '7961'],
|
2018-06-21 00:03:13 +03:00
|
|
|
'snapused/snapused_004_pos': ['FAIL', '5513'],
|
|
|
|
'tmpfile/setup': ['SKIP', tmpfile_reason],
|
|
|
|
'threadsappend/threadsappend_001_pos': ['FAIL', '6136'],
|
|
|
|
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
|
|
|
|
'user_namespace/setup': ['SKIP', user_ns_reason],
|
|
|
|
'userquota/setup': ['SKIP', exec_reason],
|
|
|
|
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'],
|
|
|
|
'write_dirs/setup': ['SKIP', disk_reason],
|
|
|
|
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'],
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def usage(s):
|
2018-09-26 21:02:26 +03:00
|
|
|
print(s)
|
2018-06-21 00:03:13 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
def process_results(pathname):
|
|
|
|
try:
|
|
|
|
f = open(pathname)
|
2018-09-26 21:02:26 +03:00
|
|
|
except IOError as e:
|
|
|
|
print('Error opening file: %s' % e)
|
2018-06-21 00:03:13 +03:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
prefix = '/zfs-tests/tests/functional/'
|
2018-10-25 09:26:08 +03:00
|
|
|
pattern = \
|
|
|
|
r'^Test:\s*\S*%s(\S+)\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]' \
|
2018-06-21 00:03:13 +03:00
|
|
|
% prefix
|
2018-10-25 09:26:08 +03:00
|
|
|
pattern_log = r'^\s*Log directory:\s*(\S*)'
|
2018-06-21 00:03:13 +03:00
|
|
|
|
|
|
|
d = {}
|
|
|
|
for l in f.readlines():
|
|
|
|
m = re.match(pattern, l)
|
|
|
|
if m and len(m.groups()) == 4:
|
|
|
|
summary['total'] += 1
|
|
|
|
if m.group(4) == "PASS":
|
|
|
|
summary['passed'] += 1
|
|
|
|
d[m.group(1)] = m.group(4)
|
|
|
|
continue
|
|
|
|
|
|
|
|
m = re.match(pattern_log, l)
|
|
|
|
if m:
|
|
|
|
summary['logfile'] = m.group(1)
|
|
|
|
|
|
|
|
return d
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
if len(sys.argv) is not 2:
|
|
|
|
usage('usage: %s <pathname>' % sys.argv[0])
|
|
|
|
results = process_results(sys.argv[1])
|
|
|
|
|
|
|
|
if summary['total'] == 0:
|
2018-09-26 21:02:26 +03:00
|
|
|
print("\n\nNo test results were found.")
|
|
|
|
print("Log directory: %s" % summary['logfile'])
|
2018-06-21 00:03:13 +03:00
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
expected = []
|
|
|
|
unexpected = []
|
|
|
|
|
2018-09-26 21:02:26 +03:00
|
|
|
for test in list(results.keys()):
|
2018-06-21 00:03:13 +03:00
|
|
|
if results[test] == "PASS":
|
|
|
|
continue
|
|
|
|
|
|
|
|
setup = test.replace(os.path.basename(test), "setup")
|
|
|
|
if results[test] == "SKIP" and test != setup:
|
|
|
|
if setup in known and known[setup][0] == "SKIP":
|
|
|
|
continue
|
|
|
|
if setup in maybe and maybe[setup][0] == "SKIP":
|
|
|
|
continue
|
|
|
|
|
|
|
|
if ((test not in known or results[test] not in known[test][0]) and
|
|
|
|
(test not in maybe or results[test] not in maybe[test][0])):
|
|
|
|
unexpected.append(test)
|
|
|
|
else:
|
|
|
|
expected.append(test)
|
|
|
|
|
2018-09-26 21:02:26 +03:00
|
|
|
print("\nTests with results other than PASS that are expected:")
|
2018-06-21 00:03:13 +03:00
|
|
|
for test in sorted(expected):
|
|
|
|
issue_url = 'https://github.com/zfsonlinux/zfs/issues/'
|
|
|
|
|
|
|
|
# Include the reason why the result is expected, given the following:
|
|
|
|
# 1. Suppress test results which set the "N/A on Linux" reason.
|
|
|
|
# 2. Numerical reasons are assumed to be GitHub issue numbers.
|
|
|
|
# 3. When an entire test group is skipped only report the setup reason.
|
|
|
|
if test in known:
|
|
|
|
if known[test][1] == na_reason:
|
|
|
|
continue
|
|
|
|
elif known[test][1].isdigit():
|
|
|
|
expect = issue_url + known[test][1]
|
|
|
|
else:
|
|
|
|
expect = known[test][1]
|
|
|
|
elif test in maybe:
|
|
|
|
if maybe[test][1].isdigit():
|
|
|
|
expect = issue_url + maybe[test][1]
|
|
|
|
else:
|
|
|
|
expect = maybe[test][1]
|
|
|
|
elif setup in known and known[setup][0] == "SKIP" and setup != test:
|
|
|
|
continue
|
|
|
|
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
expect = "UNKNOWN REASON"
|
2018-09-26 21:02:26 +03:00
|
|
|
print(" %s %s (%s)" % (results[test], test, expect))
|
2018-06-21 00:03:13 +03:00
|
|
|
|
2018-09-26 21:02:26 +03:00
|
|
|
print("\nTests with result of PASS that are unexpected:")
|
2018-06-21 00:03:13 +03:00
|
|
|
for test in sorted(known.keys()):
|
|
|
|
# We probably should not be silently ignoring the case
|
|
|
|
# where "test" is not in "results".
|
|
|
|
if test not in results or results[test] != "PASS":
|
|
|
|
continue
|
2018-09-26 21:02:26 +03:00
|
|
|
print(" %s %s (expected %s)" % (results[test], test,
|
|
|
|
known[test][0]))
|
2018-06-21 00:03:13 +03:00
|
|
|
|
2018-09-26 21:02:26 +03:00
|
|
|
print("\nTests with results other than PASS that are unexpected:")
|
2018-06-21 00:03:13 +03:00
|
|
|
for test in sorted(unexpected):
|
|
|
|
expect = "PASS" if test not in known else known[test][0]
|
2018-09-26 21:02:26 +03:00
|
|
|
print(" %s %s (expected %s)" % (results[test], test, expect))
|
2018-06-21 00:03:13 +03:00
|
|
|
|
|
|
|
if len(unexpected) == 0:
|
|
|
|
sys.exit(0)
|
|
|
|
else:
|
|
|
|
sys.exit(1)
|