Merge branch 'issue-2094'

This patch stack was designed to accomplish the following:

* Cleanly address the pool incompatibility which was accidentally
  introduced post-0.6.2 but pre-0.6.3.  This required adding
  infrastructure which can be used from now on to notify system
  administrators of errata which affect their pool.

* Add additional automated regression testing to ensure this type
  of compatibility issue is caught prior to the change being merged.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #2094
This commit is contained in:
Brian Behlendorf 2014-02-21 12:12:20 -08:00
commit 3965d2e6ee
15 changed files with 641 additions and 14 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "scripts/zfs-images"]
path = scripts/zfs-images
url = https://github.com/zfsonlinux/zfs-images

View File

@ -1608,7 +1608,8 @@ show_import(nvlist_t *config)
uint64_t guid; uint64_t guid;
char *msgid; char *msgid;
nvlist_t *nvroot; nvlist_t *nvroot;
int reason; zpool_status_t reason;
zpool_errata_t errata;
const char *health; const char *health;
uint_t vsc; uint_t vsc;
int namewidth; int namewidth;
@ -1627,7 +1628,7 @@ show_import(nvlist_t *config)
(uint64_t **)&vs, &vsc) == 0); (uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux); health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid); reason = zpool_import_status(config, &msgid, &errata);
(void) printf(gettext(" pool: %s\n"), name); (void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid); (void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
@ -1715,6 +1716,11 @@ show_import(nvlist_t *config)
"resilvered.\n")); "resilvered.\n"));
break; break;
case ZPOOL_STATUS_ERRATA:
(void) printf(gettext(" status: Errata #%d detected.\n"),
errata);
break;
default: default:
/* /*
* No other status can be seen when importing pools. * No other status can be seen when importing pools.
@ -1736,6 +1742,34 @@ show_import(nvlist_t *config)
(void) printf(gettext(" action: The pool can be " (void) printf(gettext(" action: The pool can be "
"imported using its name or numeric " "imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n")); "identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else { } else {
(void) printf(gettext(" action: The pool can be " (void) printf(gettext(" action: The pool can be "
"imported using its name or numeric " "imported using its name or numeric "
@ -4125,13 +4159,14 @@ status_callback(zpool_handle_t *zhp, void *data)
status_cbdata_t *cbp = data; status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot; nvlist_t *config, *nvroot;
char *msgid; char *msgid;
int reason; zpool_status_t reason;
zpool_errata_t errata;
const char *health; const char *health;
uint_t c; uint_t c;
vdev_stat_t *vs; vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL); config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid); reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++; cbp->cb_count++;
@ -4349,6 +4384,28 @@ status_callback(zpool_handle_t *zhp, void *data)
"'zpool clear'.\n")); "'zpool clear'.\n"));
break; break;
case ZPOOL_STATUS_ERRATA:
(void) printf(gettext("status: Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext("action: To correct the issue "
"run 'zpool scrub'.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default: default:
/* /*
* The remaining errors can't actually be generated, yet. * The remaining errors can't actually be generated, yet.

View File

@ -312,6 +312,7 @@ typedef enum {
ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */ ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */ ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */ ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
ZPOOL_STATUS_ERRATA, /* informational errata available */
/* /*
* If the pool has unsupported features but can still be opened in * If the pool has unsupported features but can still be opened in
@ -347,8 +348,10 @@ typedef enum {
ZPOOL_STATUS_OK ZPOOL_STATUS_OK
} zpool_status_t; } zpool_status_t;
extern zpool_status_t zpool_get_status(zpool_handle_t *, char **); extern zpool_status_t zpool_get_status(zpool_handle_t *, char **,
extern zpool_status_t zpool_import_status(nvlist_t *, char **); zpool_errata_t *);
extern zpool_status_t zpool_import_status(nvlist_t *, char **,
zpool_errata_t *);
extern void zpool_dump_ddt(const ddt_stat_t *dds, const ddt_histogram_t *ddh); extern void zpool_dump_ddt(const ddt_stat_t *dds, const ddt_histogram_t *ddh);
/* /*

View File

@ -72,6 +72,8 @@ typedef enum dsl_scan_flags {
DSF_VISIT_DS_AGAIN = 1<<0, DSF_VISIT_DS_AGAIN = 1<<0,
} dsl_scan_flags_t; } dsl_scan_flags_t;
#define DSL_SCAN_FLAGS_MASK (DSF_VISIT_DS_AGAIN)
/* /*
* Every pool will have one dsl_scan_t and this structure will contain * Every pool will have one dsl_scan_t and this structure will contain
* in-memory information about the scan and a pointer to the on-disk * in-memory information about the scan and a pointer to the on-disk

View File

@ -548,6 +548,7 @@ typedef struct zpool_rewind_policy {
#define ZPOOL_CONFIG_CAN_RDONLY "can_rdonly" /* not stored on disk */ #define ZPOOL_CONFIG_CAN_RDONLY "can_rdonly" /* not stored on disk */
#define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read" #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read"
#define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */ #define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */
#define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */
/* /*
* The persistent vdev state is stored as separate values rather than a single * The persistent vdev state is stored as separate values rather than a single
* 'vdev_state' entry. This is because a device can be in multiple states, such * 'vdev_state' entry. This is because a device can be in multiple states, such
@ -704,6 +705,17 @@ typedef enum dsl_scan_state {
DSS_NUM_STATES DSS_NUM_STATES
} dsl_scan_state_t; } dsl_scan_state_t;
/*
* Errata described by http://zfsonlinux.org/msg/ZFS-8000-ER. The ordering
* of this enum must be maintained to ensure the errata identifiers map to
* the correct documentation. New errata may only be appended to the list
* and must contain corresponding documentation at the above link.
*/
typedef enum zpool_errata {
ZPOOL_ERRATA_NONE,
ZPOOL_ERRATA_ZOL_2094_SCRUB,
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY,
} zpool_errata_t;
/* /*
* Vdev statistics. Note: all fields should be 64-bit because this * Vdev statistics. Note: all fields should be 64-bit because this

View File

@ -236,6 +236,7 @@ struct spa {
uint64_t spa_deadman_calls; /* number of deadman calls */ uint64_t spa_deadman_calls; /* number of deadman calls */
hrtime_t spa_sync_starttime; /* starting time of spa_sync */ hrtime_t spa_sync_starttime; /* starting time of spa_sync */
uint64_t spa_deadman_synctime; /* deadman expiration timer */ uint64_t spa_deadman_synctime; /* deadman expiration timer */
uint64_t spa_errata; /* errata issues detected */
spa_stats_t spa_stats; /* assorted spa statistics */ spa_stats_t spa_stats; /* assorted spa statistics */
/* /*

View File

@ -260,7 +260,6 @@ struct zbookmark {
uint64_t zb_object; uint64_t zb_object;
int64_t zb_level; int64_t zb_level;
uint64_t zb_blkid; uint64_t zb_blkid;
char * zb_func;
}; };
#define SET_BOOKMARK(zb, objset, object, level, blkid) \ #define SET_BOOKMARK(zb, objset, object, level, blkid) \
@ -269,7 +268,6 @@ struct zbookmark {
(zb)->zb_object = object; \ (zb)->zb_object = object; \
(zb)->zb_level = level; \ (zb)->zb_level = level; \
(zb)->zb_blkid = blkid; \ (zb)->zb_blkid = blkid; \
(zb)->zb_func = FTAG; \
} }
#define ZB_DESTROYED_OBJSET (-1ULL) #define ZB_DESTROYED_OBJSET (-1ULL)

View File

@ -67,6 +67,7 @@ static char *zfs_msgid_table[] = {
"ZFS-8000-HC", "ZFS-8000-HC",
"ZFS-8000-JQ", "ZFS-8000-JQ",
"ZFS-8000-K4", "ZFS-8000-K4",
"ZFS-8000-ER",
}; };
#define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0])) #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
@ -182,7 +183,7 @@ find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
* only picks the most damaging of all the current errors to report. * only picks the most damaging of all the current errors to report.
*/ */
static zpool_status_t static zpool_status_t
check_status(nvlist_t *config, boolean_t isimport) check_status(nvlist_t *config, boolean_t isimport, zpool_errata_t *erratap)
{ {
nvlist_t *nvroot; nvlist_t *nvroot;
vdev_stat_t *vs; vdev_stat_t *vs;
@ -193,6 +194,7 @@ check_status(nvlist_t *config, boolean_t isimport)
uint64_t stateval; uint64_t stateval;
uint64_t suspended; uint64_t suspended;
uint64_t hostid = 0; uint64_t hostid = 0;
uint64_t errata = 0;
unsigned long system_hostid = gethostid() & 0xffffffff; unsigned long system_hostid = gethostid() & 0xffffffff;
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
@ -356,13 +358,22 @@ check_status(nvlist_t *config, boolean_t isimport)
} }
} }
/*
* Informational errata available.
*/
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
if (errata) {
*erratap = errata;
return (ZPOOL_STATUS_ERRATA);
}
return (ZPOOL_STATUS_OK); return (ZPOOL_STATUS_OK);
} }
zpool_status_t zpool_status_t
zpool_get_status(zpool_handle_t *zhp, char **msgid) zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata)
{ {
zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE); zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata);
if (ret >= NMSGID) if (ret >= NMSGID)
*msgid = NULL; *msgid = NULL;
@ -373,9 +384,9 @@ zpool_get_status(zpool_handle_t *zhp, char **msgid)
} }
zpool_status_t zpool_status_t
zpool_import_status(nvlist_t *config, char **msgid) zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata)
{ {
zpool_status_t ret = check_status(config, B_TRUE); zpool_status_t ret = check_status(config, B_TRUE, errata);
if (ret >= NMSGID) if (ret >= NMSGID)
*msgid = NULL; *msgid = NULL;

View File

@ -123,6 +123,42 @@ dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT, err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS, DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys); &scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
bcopy(zaptmp, &scn->scn_phys,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT) if (err == ENOENT)
return (0); return (0);
else if (err) else if (err)
@ -319,6 +355,9 @@ dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
} }
scn->scn_phys.scn_end_time = gethrestime_sec(); scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
} }
/* ARGSUSED */ /* ARGSUSED */

View File

@ -4083,6 +4083,8 @@ spa_tryimport(nvlist_t *tryconfig)
spa->spa_uberblock.ub_timestamp) == 0); spa->spa_uberblock.ub_timestamp) == 0);
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0); spa->spa_load_info) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata) == 0);
/* /*
* If the bootfs property exists on this pool then we * If the bootfs property exists on this pool then we

View File

@ -365,6 +365,8 @@ spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
txg) == 0); txg) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_guid(spa)) == 0); spa_guid(spa)) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata) == 0);
VERIFY(spa->spa_comment == NULL || nvlist_add_string(config, VERIFY(spa->spa_comment == NULL || nvlist_add_string(config,
ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0); ZPOOL_CONFIG_COMMENT, spa->spa_comment) == 0);

View File

@ -207,7 +207,6 @@ spa_read_history_add(spa_t *spa, const zbookmark_t *zb, uint32_t aflags)
return; return;
srh = kmem_zalloc(sizeof (spa_read_history_t), KM_PUSHPAGE); srh = kmem_zalloc(sizeof (spa_read_history_t), KM_PUSHPAGE);
strlcpy(srh->origin, zb->zb_func, sizeof (srh->origin));
strlcpy(srh->comm, getcomm(), sizeof (srh->comm)); strlcpy(srh->comm, getcomm(), sizeof (srh->comm));
srh->start = gethrtime(); srh->start = gethrtime();
srh->objset = zb->zb_objset; srh->objset = zb->zb_objset;

View File

@ -7,6 +7,7 @@ dist_pkgdata_SCRIPTS = \
$(top_builddir)/scripts/common.sh \ $(top_builddir)/scripts/common.sh \
$(top_srcdir)/scripts/zconfig.sh \ $(top_srcdir)/scripts/zconfig.sh \
$(top_srcdir)/scripts/zfault.sh \ $(top_srcdir)/scripts/zfault.sh \
$(top_srcdir)/scripts/zimport.sh \
$(top_srcdir)/scripts/zfs.sh \ $(top_srcdir)/scripts/zfs.sh \
$(top_srcdir)/scripts/zpool-create.sh \ $(top_srcdir)/scripts/zpool-create.sh \
$(top_srcdir)/scripts/zpios.sh \ $(top_srcdir)/scripts/zpios.sh \
@ -17,6 +18,7 @@ dist_pkgdata_SCRIPTS = \
ZFS=$(top_builddir)/scripts/zfs.sh ZFS=$(top_builddir)/scripts/zfs.sh
ZCONFIG=$(top_builddir)/scripts/zconfig.sh ZCONFIG=$(top_builddir)/scripts/zconfig.sh
ZFAULT=$(top_builddir)/scripts/zfault.sh ZFAULT=$(top_builddir)/scripts/zfault.sh
ZIMPORT=$(top_builddir)/scripts/zimport.sh
ZTEST=$(top_builddir)/cmd/ztest/ztest ZTEST=$(top_builddir)/cmd/ztest/ztest
ZPIOS_SANITY=$(top_builddir)/scripts/zpios-sanity.sh ZPIOS_SANITY=$(top_builddir)/scripts/zpios-sanity.sh

1
scripts/zfs-images Submodule

@ -0,0 +1 @@
Subproject commit 3331601f6dc50ef2c9779c1656218701b48b276c

495
scripts/zimport.sh Executable file
View File

@ -0,0 +1,495 @@
#!/bin/sh
#
# Verify that an assortment of known good reference pools can be imported
# using different versions of the ZoL code.
#
# By default references pools for the major ZFS implementation will be
# checked against the most recent ZoL tags and the master development branch.
# Alternate tags or branches may be verified with the '-s <src-tag> option.
# Passing the keyword "installed" will instruct the script to test whatever
# version is installed.
#
# Preferentially a reference pool is used for all tests. However, if one
# does not exist and the pool-tag matches one of the src-tags then a new
# reference pool will be created using binaries from that source build.
# This is particularly useful when you need to test your changes before
# opening a pull request. The keyword 'all' can be used as short hand
# refer to all available reference pools.
#
# New reference pools may be added by placing a bzip2 compressed tarball
# of the pool in the scripts/zfs-images directory and then passing
# the -p <pool-tag> option. To increase the test coverage reference pools
# should be collected for all the major ZFS implementations. Having these
# pools easily available is also helpful to the developers.
#
# Care should be taken to run these tests with a kernel supported by all
# the listed tags. Otherwise build failure will cause false positives.
#
#
# EXAMPLES:
#
# The following example will verify the zfs-0.6.2 tag, the master branch,
# and the installed zfs version can correctly import the listed pools.
# Note there is no reference pool available for master and installed but
# because binaries are available one is automatically constructed. The
# working directory is also preserved between runs (-k) preventing the
# need to rebuild from source for multiple runs.
#
# zimport.sh -k -f /var/tmp/zimport \
# -s "zfs-0.6.2 master installed" \
# -p "zevo-1.1.1 zol-0.6.2 zol-0.6.2-173 master installed"
#
# --------------------- ZFS on Linux Source Versions --------------
# zfs-0.6.2 master 0.6.2-175_g36eb554
# -----------------------------------------------------------------
# Clone SPL Local Local Skip
# Clone ZFS Local Local Skip
# Build SPL Pass Pass Skip
# Build ZFS Pass Pass Skip
# -----------------------------------------------------------------
# zevo-1.1.1 Pass Pass Pass
# zol-0.6.2 Pass Pass Pass
# zol-0.6.2-173 Fail Pass Pass
# master Pass Pass Pass
# installed Pass Pass Pass
#
basedir="$(dirname $0)"
SCRIPT_COMMON=common.sh
if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then
. "${basedir}/${SCRIPT_COMMON}"
else
echo "Missing helper script ${SCRIPT_COMMON}" && exit 1
fi
PROG=zimport.sh
SRC_TAGS="zfs-0.6.1 zfs-0.6.2 master"
POOL_TAGS="all master"
TEST_DIR=`mktemp -u -d -p /var/tmp zimport.XXXXXXXX`
KEEP=0
VERBOSE=0
COLOR=1
REPO="https://github.com/zfsonlinux"
IMAGES_DIR="$SCRIPTDIR/zfs-images/"
IMAGES_TAR="https://github.com/zfsonlinux/zfs-images/tarball/master"
CPUS=`grep -c ^processor /proc/cpuinfo`
ERROR=0
usage() {
cat << EOF
USAGE:
zimport.sh [hvl] [-r repo] [-s src-tag] [-i pool-dir] [-p pool-tag] [-f path]
DESCRIPTION:
ZPOOL import verification tests
OPTIONS:
-h Show this message
-v Verbose
-c No color
-k Keep temporary directory
-r <repo> Source repository ($REPO)
-s <src-tag>... Verify ZoL versions with the listed tags
-i <pool-dir> Pool image directory
-p <pool-tag>... Verify pools created with the listed tags
-f <path> Temporary directory to use
EOF
}
while getopts 'hvckr:s:i:p:f:?' OPTION; do
case $OPTION in
h)
usage
exit 1
;;
v)
VERBOSE=1
;;
c)
COLOR=0
;;
k)
KEEP=1
;;
r)
REPO="$OPTARG"
;;
s)
SRC_TAGS="$OPTARG"
;;
i)
IMAGES_DIR="$OPTARG"
;;
p)
POOL_TAGS="$OPTARG"
;;
f)
TEST_DIR="$OPTARG"
;;
?)
usage
exit
;;
esac
done
# Initialize the test suite
init
check_modules || die "ZFS modules must be unloaded"
SRC_DIR="$TEST_DIR/src"
SRC_DIR_SPL="$SRC_DIR/spl"
SRC_DIR_ZFS="$SRC_DIR/zfs"
if [ $COLOR -eq 0 ]; then
COLOR_GREEN=""
COLOR_BROWN=""
COLOR_RED=""
COLOR_RESET=""
fi
pass_nonewline() {
echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t\t"
}
skip_nonewline() {
echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t\t"
}
fail_nonewline() {
echo -n -e "${COLOR_RED}Fail${COLOR_RESET}\t\t"
}
#
# Set several helper variables which are derived from a source tag.
#
# SPL_TAG - The tag zfs-x.y.z is translated to spl-x.y.z.
# SPL_DIR - The spl directory name.
# SPL_URL - The spl github URL to fetch the tarball.
# ZFS_TAG - The passed zfs-x.y.z tag
# ZFS_DIR - The zfs directory name
# ZFS_URL - The zfs github URL to fetch the tarball
#
src_set_vars() {
local TAG=$1
SPL_TAG=`echo $TAG | sed -e 's/zfs/spl/'`
SPL_DIR=$SRC_DIR_SPL/$SPL_TAG
SPL_URL=$REPO/spl/tarball/$SPL_TAG
ZFS_TAG=$TAG
ZFS_DIR=$SRC_DIR_ZFS/$ZFS_TAG
ZFS_URL=$REPO/zfs/tarball/$ZFS_TAG
if [ "$TAG" = "installed" ]; then
ZPOOL_CMD=`which zpool`
ZFS_CMD=`which zfs`
ZFS_SH="/usr/share/zfs/zfs.sh"
ZPOOL_CREATE="/usr/share/zfs/zpool-create.sh"
else
ZPOOL_CMD="./cmd/zpool/zpool"
ZFS_CMD="./cmd/zfs/zfs"
ZFS_SH="./scripts/zfs.sh"
ZPOOL_CREATE="./scripts/zpool-create.sh"
fi
}
#
# Set several helper variables which are derived from a pool name such
# as zol-0.6.x, zevo-1.1.1, etc. These refer to example pools from various
# ZFS implementations which are used to verify compatibility.
#
# POOL_TAG - The example pools name in scripts/zfs-images/.
# POOL_BZIP - The full path to the example bzip2 compressed pool.
# POOL_DIR - The top level test path for this pool.
# POOL_DIR_PRISTINE - The directory containing a pristine version of the pool.
# POOL_DIR_COPY - The directory containing a working copy of the pool.
# POOL_DIR_SRC - Location of a source build if it exists for this pool.
#
pool_set_vars() {
local TAG=$1
POOL_TAG=$TAG
POOL_BZIP=$IMAGES_DIR/$POOL_TAG.tar.bz2
POOL_DIR=$TEST_DIR/pools/$POOL_TAG
POOL_DIR_PRISTINE=$POOL_DIR/pristine
POOL_DIR_COPY=$POOL_DIR/copy
POOL_DIR_SRC=`echo -n "$SRC_DIR_ZFS/"; \
echo "$POOL_TAG" | sed -e 's/zol/zfs/'`
}
#
# Construct a non-trivial pool given a specific version of the source. More
# interesting pools provide better test coverage so this function should
# extended as needed to create more realistic pools.
#
pool_create() {
pool_set_vars $1
src_set_vars $1
if [ "$POOL_TAG" != "installed" ]; then
cd $POOL_DIR_SRC
fi
$ZFS_SH zfs="spa_config_path=$POOL_DIR_PRISTINE" || fail
# Create a file vdev RAIDZ pool.
FILEDIR="$POOL_DIR_PRISTINE" $ZPOOL_CREATE \
-c file-raidz -p $POOL_TAG -v >/dev/null || fail
# Create a pool/fs filesystem with some random contents.
$ZFS_CMD create $POOL_TAG/fs || fail
populate /$POOL_TAG/fs/ 10 100
# Snapshot that filesystem, clone it, remove the files/dirs,
# replace them with new files/dirs.
$ZFS_CMD snap $POOL_TAG/fs@snap || fail
$ZFS_CMD clone $POOL_TAG/fs@snap $POOL_TAG/clone || fail
rm -Rf /$POOL_TAG/clone/* || fail
populate /$POOL_TAG/clone/ 10 100
# Scrub the pool, delay slightly, then export it. It is now
# somewhat interesting for testing purposes.
$ZPOOL_CMD scrub $POOL_TAG || fail
sleep 10
$ZPOOL_CMD export $POOL_TAG || fail
$ZFS_SH -u || fail
}
# If the zfs-images directory doesn't exist fetch a copy from Github then
# cache it in the $TEST_DIR and update $IMAGES_DIR.
if [ ! -d $IMAGES_DIR ]; then
IMAGES_DIR="$TEST_DIR/zfs-images"
mkdir -p $IMAGES_DIR
curl -sL $IMAGES_TAR | \
tar -xz -C $IMAGES_DIR --strip-components=1 || fail
fi
# Given the available images in the zfs-images directory substitute the
# list of available images for the reserved keywork 'all'.
for TAG in $POOL_TAGS; do
if [ "$TAG" = "all" ]; then
ALL_TAGS=`ls $IMAGES_DIR | grep "tar.bz2" | \
sed 's/.tar.bz2//' | tr '\n' ' '`
NEW_TAGS="$NEW_TAGS $ALL_TAGS"
else
NEW_TAGS="$NEW_TAGS $TAG"
fi
done
POOL_TAGS="$NEW_TAGS"
if [ $VERBOSE -ne 0 ]; then
echo "---------------------------- Options ----------------------------"
echo "VERBOSE=$VERBOSE"
echo "KEEP=$KEEP"
echo "REPO=$REPO"
echo "SRC_TAGS="$SRC_TAGS""
echo "POOL_TAGS="$POOL_TAGS""
echo "PATH=$TEST_DIR"
echo
fi
if [ ! -d $TEST_DIR ]; then
mkdir -p $TEST_DIR
fi
# Print a header for all tags which are being tested.
echo "--------------------- ZFS on Linux Source Versions --------------"
printf "%-16s" " "
for TAG in $SRC_TAGS; do
src_set_vars $TAG
if [ "$TAG" = "installed" ]; then
ZFS_VERSION=`modinfo zfs | awk '/version:/ { print $2; exit }'`
if [ -n "$ZFS_VERSION" ]; then
printf "%-16s" $ZFS_VERSION
else
echo "ZFS is not installed\n"
fail
fi
else
printf "%-16s" $TAG
fi
done
echo -e "\n-----------------------------------------------------------------"
#
# Attempt to generate the tarball from your local git repository, if that
# fails then attempt to download the tarball from Github.
#
printf "%-16s" "Clone SPL"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
if [ -d $SPL_DIR ]; then
skip_nonewline
elif [ "$SPL_TAG" = "installed" ]; then
skip_nonewline
else
cd $SPLSRC
if [ ! -d $SRC_DIR_SPL ]; then
mkdir -p $SRC_DIR_SPL
fi
git archive --format=tar --prefix=$SPL_TAG/ $SPL_TAG \
-o $SRC_DIR_SPL/$SPL_TAG.tar &>/dev/nul || \
rm $SRC_DIR_SPL/$SPL_TAG.tar
if [ -s $SRC_DIR_SPL/$SPL_TAG.tar ]; then
tar -xf $SRC_DIR_SPL/$SPL_TAG.tar -C $SRC_DIR_SPL
rm $SRC_DIR_SPL/$SPL_TAG.tar
echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t"
else
mkdir -p $SPL_DIR || fail
curl -sL $SPL_URL | tar -xz -C $SPL_DIR \
--strip-components=1 || fail
echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t"
fi
fi
done
printf "\n"
#
# Attempt to generate the tarball from your local git repository, if that
# fails then attempt to download the tarball from Github.
#
printf "%-16s" "Clone ZFS"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
if [ -d $ZFS_DIR ]; then
skip_nonewline
elif [ "$ZFS_TAG" = "installed" ]; then
skip_nonewline
else
cd $SRCDIR
if [ ! -d $SRC_DIR_ZFS ]; then
mkdir -p $SRC_DIR_ZFS
fi
git archive --format=tar --prefix=$ZFS_TAG/ $ZFS_TAG \
-o $SRC_DIR_ZFS/$ZFS_TAG.tar &>/dev/nul || \
rm $SRC_DIR_ZFS/$ZFS_TAG.tar
if [ -s $SRC_DIR_ZFS/$ZFS_TAG.tar ]; then
tar -xf $SRC_DIR_ZFS/$ZFS_TAG.tar -C $SRC_DIR_ZFS
rm $SRC_DIR_ZFS/$ZFS_TAG.tar
echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t"
else
mkdir -p $ZFS_DIR || fail
curl -sL $ZFS_URL | tar -xz -C $ZFS_DIR \
--strip-components=1 || fail
echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t"
fi
fi
done
printf "\n"
# Build the listed tags
printf "%-16s" "Build SPL"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
if [ -f $SPL_DIR/module/spl/spl.ko ]; then
skip_nonewline
elif [ "$SPL_TAG" = "installed" ]; then
skip_nonewline
else
cd $SPL_DIR
make distclean &>/dev/null
sh ./autogen.sh &>/dev/null || fail
./configure &>/dev/null || fail
make -s -j$CPUS &>/dev/null || fail
pass_nonewline
fi
done
printf "\n"
# Build the listed tags
printf "%-16s" "Build ZFS"
for TAG in $SRC_TAGS; do
src_set_vars $TAG
if [ -f $ZFS_DIR/module/zfs/zfs.ko ]; then
skip_nonewline
elif [ "$ZFS_TAG" = "installed" ]; then
skip_nonewline
else
cd $ZFS_DIR
make distclean &>/dev/null
sh ./autogen.sh &>/dev/null || fail
./configure --with-spl=$SPL_DIR &>/dev/null || fail
make -s -j$CPUS &>/dev/null || fail
pass_nonewline
fi
done
printf "\n"
echo "-----------------------------------------------------------------"
# Either create a new pool using 'zpool create', or alternately restore an
# existing pool from another ZFS implementation for compatibility testing.
for TAG in $POOL_TAGS; do
pool_set_vars $TAG
SKIP=0
printf "%-16s" $POOL_TAG
rm -Rf $POOL_DIR
mkdir -p $POOL_DIR_PRISTINE
# Use the existing compressed image if available.
if [ -f $POOL_BZIP ]; then
tar -xjf $POOL_BZIP -C $POOL_DIR_PRISTINE \
--strip-components=1 || fail
# Use the installed version to create the pool.
elif [ "$TAG" = "installed" ]; then
pool_create $TAG
# A source build is available to create the pool.
elif [ -d $POOL_DIR_SRC ]; then
pool_create $TAG
else
SKIP=1
fi
# Verify 'zpool import' works for all listed source versions.
for TAG in $SRC_TAGS; do
if [ $SKIP -eq 1 ]; then
skip_nonewline
continue
fi
src_set_vars $TAG
if [ "$TAG" != "installed" ]; then
cd $ZFS_DIR
fi
$ZFS_SH zfs="spa_config_path=$POOL_DIR_COPY"
cp -a --sparse=always $POOL_DIR_PRISTINE $POOL_DIR_COPY || fail
POOL_NAME=`$ZPOOL_CMD import -d $POOL_DIR_COPY | \
awk '/pool:/ { print $2; exit 0 }'`
$ZPOOL_CMD import -N -d $POOL_DIR_COPY $POOL_NAME &>/dev/null
if [ $? -ne 0 ]; then
fail_nonewline
ERROR=1
else
$ZPOOL_CMD export $POOL_NAME || fail
pass_nonewline
fi
rm -Rf $POOL_DIR_COPY
$ZFS_SH -u || fail
done
printf "\n"
done
if [ ! $KEEP ]; then
rm -Rf $TEST_DIR
fi
exit $ERROR