mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 01:51:00 +03:00
Various ZED fixes
* Teach ZED to handle spares usingi the configured ashift: if the zpool 'ashift' property is set then ZED should use its value when kicking in a hotspare; with this change 512e disks can be used as spares for VDEVs that were created with ashift=9, even if ZFS natively detects them as 4K block devices. * Introduce an additional auto_spare test case which verifies that in the face of multiple device failures an appropiate number of spares are kicked in. * Fix zed_stop() in "libtest.shlib" which did not correctly wait the target pid. * Fix ZED crashing on startup caused by a race condition in libzfs when used in multi-threaded context. * Convert ZED over to using the tpool library which is already present in the Illumos FMA code. Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: loli10K <ezomori.nozomu@gmail.com> Closes #2562 Closes #6858
This commit is contained in:
parent
3ab3166347
commit
4e9b156960
@ -350,19 +350,3 @@ zfs_agent_fini(void)
|
|||||||
|
|
||||||
g_zfs_hdl = NULL;
|
g_zfs_hdl = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* In ZED context, all the FMA agents run in the same thread
|
|
||||||
* and do not require a unique libzfs instance. Modules should
|
|
||||||
* use these stubs.
|
|
||||||
*/
|
|
||||||
libzfs_handle_t *
|
|
||||||
__libzfs_init(void)
|
|
||||||
{
|
|
||||||
return (g_zfs_hdl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
__libzfs_fini(libzfs_handle_t *hdl)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
@ -39,13 +39,6 @@ extern int zfs_slm_init(void);
|
|||||||
extern void zfs_slm_fini(void);
|
extern void zfs_slm_fini(void);
|
||||||
extern void zfs_slm_event(const char *, const char *, nvlist_t *);
|
extern void zfs_slm_event(const char *, const char *, nvlist_t *);
|
||||||
|
|
||||||
/*
|
|
||||||
* In ZED context, all the FMA agents run in the same thread
|
|
||||||
* and do not require a unique libzfs instance.
|
|
||||||
*/
|
|
||||||
extern libzfs_handle_t *__libzfs_init(void);
|
|
||||||
extern void __libzfs_fini(libzfs_handle_t *);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -919,27 +919,27 @@ _zfs_diagnosis_init(fmd_hdl_t *hdl)
|
|||||||
{
|
{
|
||||||
libzfs_handle_t *zhdl;
|
libzfs_handle_t *zhdl;
|
||||||
|
|
||||||
if ((zhdl = __libzfs_init()) == NULL)
|
if ((zhdl = libzfs_init()) == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool",
|
if ((zfs_case_pool = uu_list_pool_create("zfs_case_pool",
|
||||||
sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node),
|
sizeof (zfs_case_t), offsetof(zfs_case_t, zc_node),
|
||||||
NULL, UU_LIST_POOL_DEBUG)) == NULL) {
|
NULL, UU_LIST_POOL_DEBUG)) == NULL) {
|
||||||
__libzfs_fini(zhdl);
|
libzfs_fini(zhdl);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((zfs_cases = uu_list_create(zfs_case_pool, NULL,
|
if ((zfs_cases = uu_list_create(zfs_case_pool, NULL,
|
||||||
UU_LIST_DEBUG)) == NULL) {
|
UU_LIST_DEBUG)) == NULL) {
|
||||||
uu_list_pool_destroy(zfs_case_pool);
|
uu_list_pool_destroy(zfs_case_pool);
|
||||||
__libzfs_fini(zhdl);
|
libzfs_fini(zhdl);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
|
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
|
||||||
uu_list_destroy(zfs_cases);
|
uu_list_destroy(zfs_cases);
|
||||||
uu_list_pool_destroy(zfs_case_pool);
|
uu_list_pool_destroy(zfs_case_pool);
|
||||||
__libzfs_fini(zhdl);
|
libzfs_fini(zhdl);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -975,5 +975,5 @@ _zfs_diagnosis_fini(fmd_hdl_t *hdl)
|
|||||||
uu_list_pool_destroy(zfs_case_pool);
|
uu_list_pool_destroy(zfs_case_pool);
|
||||||
|
|
||||||
zhdl = fmd_hdl_getspecific(hdl);
|
zhdl = fmd_hdl_getspecific(hdl);
|
||||||
__libzfs_fini(zhdl);
|
libzfs_fini(zhdl);
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,6 @@
|
|||||||
* trigger the FMA fault that we skipped earlier.
|
* trigger the FMA fault that we skipped earlier.
|
||||||
*
|
*
|
||||||
* ZFS on Linux porting notes:
|
* ZFS on Linux porting notes:
|
||||||
* In lieu of a thread pool, just spawn a thread on demmand.
|
|
||||||
* Linux udev provides a disk insert for both the disk and the partition
|
* Linux udev provides a disk insert for both the disk and the partition
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@ -83,6 +82,7 @@
|
|||||||
#include <sys/sunddi.h>
|
#include <sys/sunddi.h>
|
||||||
#include <sys/sysevent/eventdefs.h>
|
#include <sys/sysevent/eventdefs.h>
|
||||||
#include <sys/sysevent/dev.h>
|
#include <sys/sysevent/dev.h>
|
||||||
|
#include <thread_pool.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include "zfs_agents.h"
|
#include "zfs_agents.h"
|
||||||
@ -97,12 +97,12 @@ typedef void (*zfs_process_func_t)(zpool_handle_t *, nvlist_t *, boolean_t);
|
|||||||
libzfs_handle_t *g_zfshdl;
|
libzfs_handle_t *g_zfshdl;
|
||||||
list_t g_pool_list; /* list of unavailable pools at initialization */
|
list_t g_pool_list; /* list of unavailable pools at initialization */
|
||||||
list_t g_device_list; /* list of disks with asynchronous label request */
|
list_t g_device_list; /* list of disks with asynchronous label request */
|
||||||
|
tpool_t *g_tpool;
|
||||||
boolean_t g_enumeration_done;
|
boolean_t g_enumeration_done;
|
||||||
pthread_t g_zfs_tid;
|
pthread_t g_zfs_tid; /* zfs_enum_pools() thread */
|
||||||
|
|
||||||
typedef struct unavailpool {
|
typedef struct unavailpool {
|
||||||
zpool_handle_t *uap_zhp;
|
zpool_handle_t *uap_zhp;
|
||||||
pthread_t uap_enable_tid; /* dataset enable thread if activated */
|
|
||||||
list_node_t uap_node;
|
list_node_t uap_node;
|
||||||
} unavailpool_t;
|
} unavailpool_t;
|
||||||
|
|
||||||
@ -135,7 +135,6 @@ zfs_unavail_pool(zpool_handle_t *zhp, void *data)
|
|||||||
unavailpool_t *uap;
|
unavailpool_t *uap;
|
||||||
uap = malloc(sizeof (unavailpool_t));
|
uap = malloc(sizeof (unavailpool_t));
|
||||||
uap->uap_zhp = zhp;
|
uap->uap_zhp = zhp;
|
||||||
uap->uap_enable_tid = 0;
|
|
||||||
list_insert_tail((list_t *)data, uap);
|
list_insert_tail((list_t *)data, uap);
|
||||||
} else {
|
} else {
|
||||||
zpool_close(zhp);
|
zpool_close(zhp);
|
||||||
@ -512,19 +511,14 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
|
|||||||
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
|
(dp->dd_func)(zhp, nvl, dp->dd_islabeled);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
void
|
||||||
zfs_enable_ds(void *arg)
|
zfs_enable_ds(void *arg)
|
||||||
{
|
{
|
||||||
unavailpool_t *pool = (unavailpool_t *)arg;
|
unavailpool_t *pool = (unavailpool_t *)arg;
|
||||||
|
|
||||||
assert(pool->uap_enable_tid = pthread_self());
|
|
||||||
|
|
||||||
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
|
(void) zpool_enable_datasets(pool->uap_zhp, NULL, 0);
|
||||||
zpool_close(pool->uap_zhp);
|
zpool_close(pool->uap_zhp);
|
||||||
pool->uap_zhp = NULL;
|
free(pool);
|
||||||
|
|
||||||
/* Note: zfs_slm_fini() will cleanup this pool entry on exit */
|
|
||||||
return (NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
@ -559,15 +553,13 @@ zfs_iter_pool(zpool_handle_t *zhp, void *data)
|
|||||||
for (pool = list_head(&g_pool_list); pool != NULL;
|
for (pool = list_head(&g_pool_list); pool != NULL;
|
||||||
pool = list_next(&g_pool_list, pool)) {
|
pool = list_next(&g_pool_list, pool)) {
|
||||||
|
|
||||||
if (pool->uap_enable_tid != 0)
|
|
||||||
continue; /* entry already processed */
|
|
||||||
if (strcmp(zpool_get_name(zhp),
|
if (strcmp(zpool_get_name(zhp),
|
||||||
zpool_get_name(pool->uap_zhp)))
|
zpool_get_name(pool->uap_zhp)))
|
||||||
continue;
|
continue;
|
||||||
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
|
if (zfs_toplevel_state(zhp) >= VDEV_STATE_DEGRADED) {
|
||||||
/* send to a background thread; keep on list */
|
list_remove(&g_pool_list, pool);
|
||||||
(void) pthread_create(&pool->uap_enable_tid,
|
(void) tpool_dispatch(g_tpool, zfs_enable_ds,
|
||||||
NULL, zfs_enable_ds, pool);
|
pool);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -857,7 +849,7 @@ zfs_enum_pools(void *arg)
|
|||||||
int
|
int
|
||||||
zfs_slm_init()
|
zfs_slm_init()
|
||||||
{
|
{
|
||||||
if ((g_zfshdl = __libzfs_init()) == NULL)
|
if ((g_zfshdl = libzfs_init()) == NULL)
|
||||||
return (-1);
|
return (-1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -869,7 +861,7 @@ zfs_slm_init()
|
|||||||
|
|
||||||
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
|
if (pthread_create(&g_zfs_tid, NULL, zfs_enum_pools, NULL) != 0) {
|
||||||
list_destroy(&g_pool_list);
|
list_destroy(&g_pool_list);
|
||||||
__libzfs_fini(g_zfshdl);
|
libzfs_fini(g_zfshdl);
|
||||||
return (-1);
|
return (-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -887,19 +879,15 @@ zfs_slm_fini()
|
|||||||
|
|
||||||
/* wait for zfs_enum_pools thread to complete */
|
/* wait for zfs_enum_pools thread to complete */
|
||||||
(void) pthread_join(g_zfs_tid, NULL);
|
(void) pthread_join(g_zfs_tid, NULL);
|
||||||
|
/* destroy the thread pool */
|
||||||
|
if (g_tpool != NULL) {
|
||||||
|
tpool_wait(g_tpool);
|
||||||
|
tpool_destroy(g_tpool);
|
||||||
|
}
|
||||||
|
|
||||||
while ((pool = (list_head(&g_pool_list))) != NULL) {
|
while ((pool = (list_head(&g_pool_list))) != NULL) {
|
||||||
/*
|
|
||||||
* each pool entry has two possibilities
|
|
||||||
* 1. was made available (so wait for zfs_enable_ds thread)
|
|
||||||
* 2. still unavailable (just close the pool)
|
|
||||||
*/
|
|
||||||
if (pool->uap_enable_tid)
|
|
||||||
(void) pthread_join(pool->uap_enable_tid, NULL);
|
|
||||||
else if (pool->uap_zhp != NULL)
|
|
||||||
zpool_close(pool->uap_zhp);
|
|
||||||
|
|
||||||
list_remove(&g_pool_list, pool);
|
list_remove(&g_pool_list, pool);
|
||||||
|
zpool_close(pool->uap_zhp);
|
||||||
free(pool);
|
free(pool);
|
||||||
}
|
}
|
||||||
list_destroy(&g_pool_list);
|
list_destroy(&g_pool_list);
|
||||||
@ -910,7 +898,7 @@ zfs_slm_fini()
|
|||||||
}
|
}
|
||||||
list_destroy(&g_device_list);
|
list_destroy(&g_device_list);
|
||||||
|
|
||||||
__libzfs_fini(g_zfshdl);
|
libzfs_fini(g_zfshdl);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -176,6 +176,8 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
|
|||||||
nvlist_t **spares;
|
nvlist_t **spares;
|
||||||
uint_t s, nspares;
|
uint_t s, nspares;
|
||||||
char *dev_name;
|
char *dev_name;
|
||||||
|
zprop_source_t source;
|
||||||
|
int ashift;
|
||||||
|
|
||||||
config = zpool_get_config(zhp, NULL);
|
config = zpool_get_config(zhp, NULL);
|
||||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
||||||
@ -189,6 +191,11 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
|
|||||||
&spares, &nspares) != 0)
|
&spares, &nspares) != 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* lookup "ashift" pool property, we may need it for the replacement
|
||||||
|
*/
|
||||||
|
ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
|
||||||
|
|
||||||
replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
|
replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
|
||||||
|
|
||||||
(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
|
(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
|
||||||
@ -207,6 +214,11 @@ replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
|
|||||||
&spare_name) != 0)
|
&spare_name) != 0)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/* if set, add the "ashift" pool property to the spare nvlist */
|
||||||
|
if (source != ZPROP_SRC_DEFAULT)
|
||||||
|
(void) nvlist_add_uint64(spares[s],
|
||||||
|
ZPOOL_CONFIG_ASHIFT, ashift);
|
||||||
|
|
||||||
(void) nvlist_add_nvlist_array(replacement,
|
(void) nvlist_add_nvlist_array(replacement,
|
||||||
ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
|
ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
|
||||||
|
|
||||||
@ -483,7 +495,7 @@ _zfs_retire_init(fmd_hdl_t *hdl)
|
|||||||
zfs_retire_data_t *zdp;
|
zfs_retire_data_t *zdp;
|
||||||
libzfs_handle_t *zhdl;
|
libzfs_handle_t *zhdl;
|
||||||
|
|
||||||
if ((zhdl = __libzfs_init()) == NULL)
|
if ((zhdl = libzfs_init()) == NULL)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
|
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
|
||||||
@ -504,7 +516,7 @@ _zfs_retire_fini(fmd_hdl_t *hdl)
|
|||||||
|
|
||||||
if (zdp != NULL) {
|
if (zdp != NULL) {
|
||||||
zfs_retire_clear_data(hdl, zdp);
|
zfs_retire_clear_data(hdl, zdp);
|
||||||
__libzfs_fini(zdp->zrd_hdl);
|
libzfs_fini(zdp->zrd_hdl);
|
||||||
fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
|
fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -464,7 +464,7 @@ tags = ['functional', 'exec']
|
|||||||
|
|
||||||
[tests/functional/fault]
|
[tests/functional/fault]
|
||||||
tests = ['auto_online_001_pos', 'auto_replace_001_pos', 'auto_spare_001_pos',
|
tests = ['auto_online_001_pos', 'auto_replace_001_pos', 'auto_spare_001_pos',
|
||||||
'auto_spare_002_pos.ksh']
|
'auto_spare_002_pos', 'auto_spare_ashift', 'auto_spare_multiple']
|
||||||
tags = ['functional', 'fault']
|
tags = ['functional', 'fault']
|
||||||
|
|
||||||
[tests/functional/features/async_destroy]
|
[tests/functional/features/async_destroy]
|
||||||
|
@ -353,16 +353,35 @@ function insert_disk #disk scsi_host
|
|||||||
|
|
||||||
#
|
#
|
||||||
# Load scsi_debug module with specified parameters
|
# Load scsi_debug module with specified parameters
|
||||||
|
# $blksz can be either one of: < 512b | 512e | 4Kn >
|
||||||
#
|
#
|
||||||
function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
|
function load_scsi_debug # dev_size_mb add_host num_tgts max_luns blksz
|
||||||
{
|
{
|
||||||
typeset devsize=$1
|
typeset devsize=$1
|
||||||
typeset hosts=$2
|
typeset hosts=$2
|
||||||
typeset tgts=$3
|
typeset tgts=$3
|
||||||
typeset luns=$4
|
typeset luns=$4
|
||||||
|
typeset blksz=$5
|
||||||
|
|
||||||
[[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
|
[[ -z $devsize ]] || [[ -z $hosts ]] || [[ -z $tgts ]] || \
|
||||||
[[ -z $luns ]] && log_fail "Arguments invalid or missing"
|
[[ -z $luns ]] || [[ -z $blksz ]] && \
|
||||||
|
log_fail "Arguments invalid or missing"
|
||||||
|
|
||||||
|
case "$5" in
|
||||||
|
'512b')
|
||||||
|
typeset sector=512
|
||||||
|
typeset blkexp=0
|
||||||
|
;;
|
||||||
|
'512e')
|
||||||
|
typeset sector=512
|
||||||
|
typeset blkexp=3
|
||||||
|
;;
|
||||||
|
'4Kn')
|
||||||
|
typeset sector=4096
|
||||||
|
typeset blkexp=0
|
||||||
|
;;
|
||||||
|
*) log_fail "Unsupported blksz value: $5" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
if is_linux; then
|
if is_linux; then
|
||||||
modprobe -n scsi_debug
|
modprobe -n scsi_debug
|
||||||
@ -375,7 +394,8 @@ function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
|
|||||||
log_fail "scsi_debug module already installed"
|
log_fail "scsi_debug module already installed"
|
||||||
else
|
else
|
||||||
log_must modprobe scsi_debug dev_size_mb=$devsize \
|
log_must modprobe scsi_debug dev_size_mb=$devsize \
|
||||||
add_host=$hosts num_tgts=$tgts max_luns=$luns
|
add_host=$hosts num_tgts=$tgts max_luns=$luns \
|
||||||
|
sector_size=$sector physblk_exp=$blkexp
|
||||||
block_device_wait
|
block_device_wait
|
||||||
lsscsi | egrep scsi_debug > /dev/null
|
lsscsi | egrep scsi_debug > /dev/null
|
||||||
if (($? == 1)); then
|
if (($? == 1)); then
|
||||||
@ -385,6 +405,16 @@ function load_scsi_debug # dev_size_mb add_host num_tgts max_luns
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Unload scsi_debug module, if needed.
|
||||||
|
#
|
||||||
|
function unload_scsi_debug
|
||||||
|
{
|
||||||
|
if lsmod | grep scsi_debug >/dev/null; then
|
||||||
|
log_must modprobe -r scsi_debug
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
# Get scsi_debug device name.
|
# Get scsi_debug device name.
|
||||||
# Returns basename of scsi_debug device (for example "sdb").
|
# Returns basename of scsi_debug device (for example "sdb").
|
||||||
|
@ -3158,13 +3158,25 @@ function zed_stop
|
|||||||
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
|
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
|
||||||
zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
|
zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
|
||||||
kill $zedpid
|
kill $zedpid
|
||||||
wait $zedpid
|
while ps -p $zedpid > /dev/null; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
rm -f ${ZEDLET_DIR}/zed.pid
|
rm -f ${ZEDLET_DIR}/zed.pid
|
||||||
fi
|
fi
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Drain all zevents
|
||||||
|
#
|
||||||
|
function zed_events_drain
|
||||||
|
{
|
||||||
|
while [ $(zpool events -H | wc -l) -ne 0 ]; do
|
||||||
|
sleep 1
|
||||||
|
zpool events -c >/dev/null
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
# Check is provided device is being active used as a swap device.
|
# Check is provided device is being active used as a swap device.
|
||||||
#
|
#
|
||||||
|
@ -27,7 +27,7 @@ if is_linux; then
|
|||||||
for SDDEVICE in $(get_debug_device); do
|
for SDDEVICE in $(get_debug_device); do
|
||||||
unplug $SDDEVICE
|
unplug $SDDEVICE
|
||||||
done
|
done
|
||||||
modprobe -r scsi_debug
|
unload_scsi_debug
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log_pass
|
log_pass
|
||||||
|
@ -22,7 +22,7 @@ verify_runnable "global"
|
|||||||
|
|
||||||
# Create scsi_debug devices for the reopen tests
|
# Create scsi_debug devices for the reopen tests
|
||||||
if is_linux; then
|
if is_linux; then
|
||||||
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS
|
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS '512b'
|
||||||
else
|
else
|
||||||
log_unsupported "scsi debug module unsupported"
|
log_unsupported "scsi debug module unsupported"
|
||||||
fi
|
fi
|
||||||
|
@ -6,4 +6,6 @@ dist_pkgdata_SCRIPTS = \
|
|||||||
auto_online_001_pos.ksh \
|
auto_online_001_pos.ksh \
|
||||||
auto_replace_001_pos.ksh \
|
auto_replace_001_pos.ksh \
|
||||||
auto_spare_001_pos.ksh \
|
auto_spare_001_pos.ksh \
|
||||||
auto_spare_002_pos.ksh
|
auto_spare_002_pos.ksh \
|
||||||
|
auto_spare_ashift.ksh \
|
||||||
|
auto_spare_multiple.ksh
|
||||||
|
@ -54,9 +54,8 @@ fi
|
|||||||
|
|
||||||
function cleanup
|
function cleanup
|
||||||
{
|
{
|
||||||
#online last disk before fail
|
destroy_pool $TESTPOOL
|
||||||
insert_disk $offline_disk $host
|
unload_scsi_debug
|
||||||
poolexists $TESTPOOL && destroy_pool $TESTPOOL
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log_assert "Testing automated auto-online FMA test"
|
log_assert "Testing automated auto-online FMA test"
|
||||||
@ -65,8 +64,8 @@ log_onexit cleanup
|
|||||||
|
|
||||||
# If using the default loop devices, need a scsi_debug device for auto-online
|
# If using the default loop devices, need a scsi_debug device for auto-online
|
||||||
if is_loop_device $DISK1; then
|
if is_loop_device $DISK1; then
|
||||||
SD=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}')
|
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS '512b'
|
||||||
SDDEVICE=$(echo $SD | nawk -F / '{print $3}')
|
SDDEVICE=$(get_debug_device)
|
||||||
SDDEVICE_ID=$(get_persistent_disk_name $SDDEVICE)
|
SDDEVICE_ID=$(get_persistent_disk_name $SDDEVICE)
|
||||||
autoonline_disks="$SDDEVICE"
|
autoonline_disks="$SDDEVICE"
|
||||||
else
|
else
|
||||||
|
@ -57,27 +57,23 @@ fi
|
|||||||
|
|
||||||
function setup
|
function setup
|
||||||
{
|
{
|
||||||
lsmod | egrep scsi_debug > /dev/null
|
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS '512b'
|
||||||
if (($? == 1)); then
|
SD=$(get_debug_device)
|
||||||
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS
|
SDDEVICE_ID=$(get_persistent_disk_name $SD)
|
||||||
fi
|
|
||||||
# Register vdev_id alias rule for scsi_debug device to create a
|
# Register vdev_id alias rule for scsi_debug device to create a
|
||||||
# persistent path
|
# persistent path
|
||||||
SD=$(lsscsi | nawk '/scsi_debug/ {print $6; exit}' \
|
|
||||||
| nawk -F / '{print $3}')
|
|
||||||
SDDEVICE_ID=$(get_persistent_disk_name $SD)
|
|
||||||
log_must eval "echo "alias scsidebug /dev/disk/by-id/$SDDEVICE_ID" \
|
log_must eval "echo "alias scsidebug /dev/disk/by-id/$SDDEVICE_ID" \
|
||||||
>> $VDEVID_CONF"
|
>> $VDEVID_CONF"
|
||||||
block_device_wait
|
block_device_wait
|
||||||
|
SDDEVICE=$(udevadm info -q all -n $DEV_DSKDIR/$SD \
|
||||||
SDDEVICE=$(udevadm info -q all -n $DEV_DSKDIR/$SD | egrep ID_VDEV \
|
| awk -F'=' '/ID_VDEV=/{print $2; exit}')
|
||||||
| nawk '{print $2; exit}' | nawk -F = '{print $2; exit}')
|
|
||||||
[[ -z $SDDEVICE ]] && log_fail "vdev rule was not registered properly"
|
[[ -z $SDDEVICE ]] && log_fail "vdev rule was not registered properly"
|
||||||
}
|
}
|
||||||
|
|
||||||
function cleanup
|
function cleanup
|
||||||
{
|
{
|
||||||
poolexists $TESTPOOL && destroy_pool $TESTPOOL
|
destroy_pool $TESTPOOL
|
||||||
|
unload_scsi_debug
|
||||||
}
|
}
|
||||||
|
|
||||||
log_assert "Testing automated auto-replace FMA test"
|
log_assert "Testing automated auto-replace FMA test"
|
||||||
@ -112,7 +108,7 @@ log_must zpool export -F $TESTPOOL
|
|||||||
# Offline disk
|
# Offline disk
|
||||||
remove_disk $SD
|
remove_disk $SD
|
||||||
block_device_wait
|
block_device_wait
|
||||||
log_must modprobe -r scsi_debug
|
unload_scsi_debug
|
||||||
|
|
||||||
# Reimport pool with drive missing
|
# Reimport pool with drive missing
|
||||||
log_must zpool import $TESTPOOL
|
log_must zpool import $TESTPOOL
|
||||||
|
@ -42,7 +42,7 @@ verify_runnable "both"
|
|||||||
function cleanup
|
function cleanup
|
||||||
{
|
{
|
||||||
log_must zinject -c all
|
log_must zinject -c all
|
||||||
poolexists $TESTPOOL && destroy_pool $TESTPOOL
|
destroy_pool $TESTPOOL
|
||||||
rm -f $VDEV_FILES $SPARE_FILE
|
rm -f $VDEV_FILES $SPARE_FILE
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,6 +50,9 @@ log_assert "Testing automated auto-spare FMA test"
|
|||||||
|
|
||||||
log_onexit cleanup
|
log_onexit cleanup
|
||||||
|
|
||||||
|
# Clear events from previous runs
|
||||||
|
zed_events_drain
|
||||||
|
|
||||||
TESTFILE="/$TESTPOOL/$TESTFS/testfile"
|
TESTFILE="/$TESTPOOL/$TESTFS/testfile"
|
||||||
|
|
||||||
for type in "mirror" "raidz" "raidz2"; do
|
for type in "mirror" "raidz" "raidz2"; do
|
||||||
|
@ -42,7 +42,7 @@ verify_runnable "both"
|
|||||||
function cleanup
|
function cleanup
|
||||||
{
|
{
|
||||||
log_must zinject -c all
|
log_must zinject -c all
|
||||||
poolexists $TESTPOOL && destroy_pool $TESTPOOL
|
destroy_pool $TESTPOOL
|
||||||
rm -f $VDEV_FILES $SPARE_FILE
|
rm -f $VDEV_FILES $SPARE_FILE
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -50,6 +50,9 @@ log_assert "Testing automated auto-spare FMA test"
|
|||||||
|
|
||||||
log_onexit cleanup
|
log_onexit cleanup
|
||||||
|
|
||||||
|
# Clear events from previous runs
|
||||||
|
zed_events_drain
|
||||||
|
|
||||||
TESTFILE="/$TESTPOOL/$TESTFS/testfile"
|
TESTFILE="/$TESTPOOL/$TESTFS/testfile"
|
||||||
|
|
||||||
for type in "mirror" "raidz" "raidz2"; do
|
for type in "mirror" "raidz" "raidz2"; do
|
||||||
@ -65,8 +68,14 @@ for type in "mirror" "raidz" "raidz2"; do
|
|||||||
log_must dd if=/dev/urandom of=$TESTFILE bs=1M count=16
|
log_must dd if=/dev/urandom of=$TESTFILE bs=1M count=16
|
||||||
|
|
||||||
# 4. Inject CHECKSUM ERRORS on read with a zinject error handler
|
# 4. Inject CHECKSUM ERRORS on read with a zinject error handler
|
||||||
|
# NOTE: checksum events are ratelimited to max 5 per second, ZED needs
|
||||||
|
# 10 to kick in a spare
|
||||||
log_must zinject -d $FAULT_FILE -e corrupt -f 50 -T read $TESTPOOL
|
log_must zinject -d $FAULT_FILE -e corrupt -f 50 -T read $TESTPOOL
|
||||||
log_must cp $TESTFILE /dev/null
|
log_must cp $TESTFILE /dev/null
|
||||||
|
log_must sleep 1
|
||||||
|
log_must cp $TESTFILE /dev/null
|
||||||
|
log_must sleep 1
|
||||||
|
log_must cp $TESTFILE /dev/null
|
||||||
|
|
||||||
# 5. Verify the ZED kicks in a hot spare and expected pool/device status
|
# 5. Verify the ZED kicks in a hot spare and expected pool/device status
|
||||||
log_note "Wait for ZED to auto-spare"
|
log_note "Wait for ZED to auto-spare"
|
||||||
|
101
tests/zfs-tests/tests/functional/fault/auto_spare_ashift.ksh
Executable file
101
tests/zfs-tests/tests/functional/fault/auto_spare_ashift.ksh
Executable file
@ -0,0 +1,101 @@
|
|||||||
|
#!/bin/ksh -p
|
||||||
|
|
||||||
|
#
|
||||||
|
# CDDL HEADER START
|
||||||
|
#
|
||||||
|
# This file and its contents are supplied under the terms of the
|
||||||
|
# Common Development and Distribution License ("CDDL"), version 1.0.
|
||||||
|
# You may only use this file in accordance with the terms of version
|
||||||
|
# 1.0 of the CDDL.
|
||||||
|
#
|
||||||
|
# A full copy of the text of the CDDL should have accompanied this
|
||||||
|
# source. A copy of the CDDL is also available via the Internet at
|
||||||
|
# http://www.illumos.org/license/CDDL.
|
||||||
|
#
|
||||||
|
# CDDL HEADER END
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright (c) 2017 by Intel Corporation. All rights reserved.
|
||||||
|
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
. $STF_SUITE/include/libtest.shlib
|
||||||
|
. $STF_SUITE/include/math.shlib
|
||||||
|
. $STF_SUITE/tests/functional/fault/fault.cfg
|
||||||
|
|
||||||
|
#
|
||||||
|
# DESCRIPTION:
|
||||||
|
# Testing Fault Management Agent ZED Logic - Automated Auto-Spare Test when
|
||||||
|
# drive is faulted and a custom ashift value needs to be provided to replace it.
|
||||||
|
#
|
||||||
|
# STRATEGY:
|
||||||
|
# 1. Create a pool from 512b devices and set "ashift" pool property accordingly
|
||||||
|
# 2. Add one 512e spare device (4Kn would generate IO errors on replace)
|
||||||
|
# 3. Inject IO errors with a zinject error handler
|
||||||
|
# 4. Start a scrub
|
||||||
|
# 5. Verify the ZED kicks in the hot spare and expected pool/device status
|
||||||
|
# 6. Clear the fault
|
||||||
|
# 7. Verify the hot spare is available and expected pool/device status
|
||||||
|
#
|
||||||
|
|
||||||
|
verify_runnable "both"
|
||||||
|
|
||||||
|
function cleanup
|
||||||
|
{
|
||||||
|
log_must zinject -c all
|
||||||
|
destroy_pool $TESTPOOL
|
||||||
|
unload_scsi_debug
|
||||||
|
rm -f $SAFE_DEVICE $FAIL_DEVICE
|
||||||
|
}
|
||||||
|
|
||||||
|
log_assert "ZED should replace a device using the configured ashift property"
|
||||||
|
log_onexit cleanup
|
||||||
|
|
||||||
|
# Clear events from previous runs
|
||||||
|
zed_events_drain
|
||||||
|
|
||||||
|
SAFE_DEVICE="$TEST_BASE_DIR/safe-dev"
|
||||||
|
FAIL_DEVICE="$TEST_BASE_DIR/fail-dev"
|
||||||
|
|
||||||
|
# 1. Create a pool from 512b devices and set "ashift" pool property accordingly
|
||||||
|
for vdev in $SAFE_DEVICE $FAIL_DEVICE; do
|
||||||
|
truncate -s $SPA_MINDEVSIZE $vdev
|
||||||
|
done
|
||||||
|
log_must zpool create -f $TESTPOOL mirror $SAFE_DEVICE $FAIL_DEVICE
|
||||||
|
# NOTE: file VDEVs should be added as 512b devices, verify this "just in case"
|
||||||
|
for vdev in $SAFE_DEVICE $FAIL_DEVICE; do
|
||||||
|
verify_eq "9" "$(zdb -e -l $vdev | awk '/ashift: /{print $2}')" "ashift"
|
||||||
|
done
|
||||||
|
log_must zpool set ashift=9 $TESTPOOL
|
||||||
|
|
||||||
|
# 2. Add one 512e spare device (4Kn would generate IO errors on replace)
|
||||||
|
# NOTE: must be larger than the existing 512b devices, add 32m of fudge
|
||||||
|
load_scsi_debug $(($SPA_MINDEVSIZE/1024/1024+32)) $SDHOSTS $SDTGTS $SDLUNS '512e'
|
||||||
|
SPARE_DEVICE=$(get_debug_device)
|
||||||
|
log_must_busy zpool add $TESTPOOL spare $SPARE_DEVICE
|
||||||
|
|
||||||
|
# 3. Inject IO errors with a zinject error handler
|
||||||
|
log_must zinject -d $FAIL_DEVICE -e io -T all -f 100 $TESTPOOL
|
||||||
|
|
||||||
|
# 4. Start a scrub
|
||||||
|
log_must zpool scrub $TESTPOOL
|
||||||
|
|
||||||
|
# 5. Verify the ZED kicks in a hot spare and expected pool/device status
|
||||||
|
log_note "Wait for ZED to auto-spare"
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAIL_DEVICE "FAULTED" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $SPARE_DEVICE "ONLINE" 60
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEVICE "INUSE"
|
||||||
|
log_must check_state $TESTPOOL "" "DEGRADED"
|
||||||
|
|
||||||
|
# 6. Clear the fault
|
||||||
|
log_must zinject -c all
|
||||||
|
log_must zpool clear $TESTPOOL $FAIL_DEVICE
|
||||||
|
|
||||||
|
# 7. Verify the hot spare is available and expected pool/device status
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAIL_DEVICE "ONLINE" 60
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEVICE "AVAIL"
|
||||||
|
log_must is_pool_resilvered $TESTPOOL
|
||||||
|
log_must check_state $TESTPOOL "" "ONLINE"
|
||||||
|
|
||||||
|
log_pass "ZED successfully replaces a device using the configured ashift property"
|
152
tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh
Executable file
152
tests/zfs-tests/tests/functional/fault/auto_spare_multiple.ksh
Executable file
@ -0,0 +1,152 @@
|
|||||||
|
#!/bin/ksh -p
|
||||||
|
|
||||||
|
#
|
||||||
|
# CDDL HEADER START
|
||||||
|
#
|
||||||
|
# This file and its contents are supplied under the terms of the
|
||||||
|
# Common Development and Distribution License ("CDDL"), version 1.0.
|
||||||
|
# You may only use this file in accordance with the terms of version
|
||||||
|
# 1.0 of the CDDL.
|
||||||
|
#
|
||||||
|
# A full copy of the text of the CDDL should have accompanied this
|
||||||
|
# source. A copy of the CDDL is also available via the Internet at
|
||||||
|
# http://www.illumos.org/license/CDDL.
|
||||||
|
#
|
||||||
|
# CDDL HEADER END
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright (c) 2017 by Intel Corporation. All rights reserved.
|
||||||
|
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
|
||||||
|
#
|
||||||
|
|
||||||
|
. $STF_SUITE/include/libtest.shlib
|
||||||
|
. $STF_SUITE/tests/functional/fault/fault.cfg
|
||||||
|
|
||||||
|
#
|
||||||
|
# DESCRIPTION:
|
||||||
|
# Testing Fault Management Agent ZED Logic - Automated Auto-Spare Test when
|
||||||
|
# multiple drives are faulted.
|
||||||
|
#
|
||||||
|
# STRATEGY:
|
||||||
|
# 1. Create a pool with two hot spares
|
||||||
|
# 2. Inject IO ERRORS with a zinject error handler on the first device
|
||||||
|
# 3. Start a scrub
|
||||||
|
# 4. Verify the ZED kicks in a hot spare and expected pool/device status
|
||||||
|
# 5. Inject IO ERRORS on a second device
|
||||||
|
# 6. Start a scrub
|
||||||
|
# 7. Verify the ZED kicks in a second hot spare
|
||||||
|
# 8. Clear the fault on both devices
|
||||||
|
# 9. Verify the hot spares are available and expected pool/device status
|
||||||
|
# 10. Rinse and repeat, this time faulting both devices at the same time
|
||||||
|
#
|
||||||
|
|
||||||
|
verify_runnable "both"
|
||||||
|
|
||||||
|
function cleanup
|
||||||
|
{
|
||||||
|
log_must zinject -c all
|
||||||
|
destroy_pool $TESTPOOL
|
||||||
|
rm -f $DATA_DEVS $SPARE_DEVS
|
||||||
|
}
|
||||||
|
|
||||||
|
log_assert "ZED should be able to handle multiple faulted devices"
|
||||||
|
log_onexit cleanup
|
||||||
|
|
||||||
|
# Clear events from previous runs
|
||||||
|
zed_events_drain
|
||||||
|
|
||||||
|
FAULT_DEV1="$TEST_BASE_DIR/fault-dev1"
|
||||||
|
FAULT_DEV2="$TEST_BASE_DIR/fault-dev2"
|
||||||
|
SAFE_DEV1="$TEST_BASE_DIR/safe-dev1"
|
||||||
|
SAFE_DEV2="$TEST_BASE_DIR/safe-dev2"
|
||||||
|
DATA_DEVS="$FAULT_DEV1 $FAULT_DEV2 $SAFE_DEV1 $SAFE_DEV2"
|
||||||
|
SPARE_DEV1="$TEST_BASE_DIR/spare-dev1"
|
||||||
|
SPARE_DEV2="$TEST_BASE_DIR/spare-dev2"
|
||||||
|
SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2"
|
||||||
|
|
||||||
|
for type in "mirror" "raidz" "raidz2" "raidz3"; do
|
||||||
|
# 1. Create a pool with two hot spares
|
||||||
|
truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS
|
||||||
|
log_must zpool create -f $TESTPOOL $type $DATA_DEVS spare $SPARE_DEVS
|
||||||
|
|
||||||
|
# 2. Inject IO ERRORS with a zinject error handler on the first device
|
||||||
|
log_must zinject -d $FAULT_DEV1 -e io -T all -f 100 $TESTPOOL
|
||||||
|
|
||||||
|
# 3. Start a scrub
|
||||||
|
log_must zpool scrub $TESTPOOL
|
||||||
|
|
||||||
|
# 4. Verify the ZED kicks in a hot spare and expected pool/device status
|
||||||
|
log_note "Wait for ZED to auto-spare"
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAULT_DEV1 "FAULTED" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE"
|
||||||
|
log_must check_state $TESTPOOL "" "DEGRADED"
|
||||||
|
|
||||||
|
# 5. Inject IO ERRORS on a second device
|
||||||
|
log_must zinject -d $FAULT_DEV2 -e io -T all -f 100 $TESTPOOL
|
||||||
|
|
||||||
|
# 6. Start a scrub
|
||||||
|
while is_pool_scrubbing $TESTPOOL || is_pool_resilvering $TESTPOOL; do
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
log_must zpool scrub $TESTPOOL
|
||||||
|
|
||||||
|
# 7. Verify the ZED kicks in a second hot spare
|
||||||
|
log_note "Wait for ZED to auto-spare"
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAULT_DEV2 "FAULTED" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $SPARE_DEV2 "ONLINE" 60
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "INUSE"
|
||||||
|
log_must check_state $TESTPOOL "" "DEGRADED"
|
||||||
|
|
||||||
|
# 8. Clear the fault on both devices
|
||||||
|
log_must zinject -c all
|
||||||
|
log_must zpool clear $TESTPOOL $FAULT_DEV1
|
||||||
|
log_must zpool clear $TESTPOOL $FAULT_DEV2
|
||||||
|
|
||||||
|
# 9. Verify the hot spares are available and expected pool/device status
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAULT_DEV1 "ONLINE" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAULT_DEV2 "ONLINE" 60
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "AVAIL"
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL"
|
||||||
|
log_must check_state $TESTPOOL "" "ONLINE"
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
cleanup
|
||||||
|
done
|
||||||
|
|
||||||
|
# Rinse and repeat, this time faulting both devices at the same time
|
||||||
|
# NOTE: "raidz" is exluded since it cannot survive 2 faulted devices
|
||||||
|
# NOTE: "mirror" is a 4-way mirror here and should survive this test
|
||||||
|
for type in "mirror" "raidz2" "raidz3"; do
|
||||||
|
# 1. Create a pool with two hot spares
|
||||||
|
truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS
|
||||||
|
log_must zpool create -f $TESTPOOL $type $DATA_DEVS spare $SPARE_DEVS
|
||||||
|
|
||||||
|
# 2. Inject IO ERRORS with a zinject error handler on two devices
|
||||||
|
log_must eval "zinject -d $FAULT_DEV1 -e io -T all -f 100 $TESTPOOL &"
|
||||||
|
log_must eval "zinject -d $FAULT_DEV2 -e io -T all -f 100 $TESTPOOL &"
|
||||||
|
|
||||||
|
# 3. Start a scrub
|
||||||
|
log_must zpool scrub $TESTPOOL
|
||||||
|
|
||||||
|
# 4. Verify the ZED kicks in two hot spares and expected pool/device status
|
||||||
|
log_note "Wait for ZED to auto-spare"
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAULT_DEV1 "FAULTED" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $FAULT_DEV2 "FAULTED" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60
|
||||||
|
log_must wait_vdev_state $TESTPOOL $SPARE_DEV2 "ONLINE" 60
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE"
|
||||||
|
log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "INUSE"
|
||||||
|
log_must check_state $TESTPOOL "" "DEGRADED"
|
||||||
|
|
||||||
|
# 5. Clear the fault on both devices
|
||||||
|
log_must zinject -c all
|
||||||
|
log_must zpool clear $TESTPOOL $FAULT_DEV1
|
||||||
|
log_must zpool clear $TESTPOOL $FAULT_DEV2
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
cleanup
|
||||||
|
done
|
||||||
|
|
||||||
|
log_pass "ZED successfully handles multiple faulted devices"
|
@ -33,14 +33,4 @@ cleanup_devices $DISKS
|
|||||||
zed_stop
|
zed_stop
|
||||||
zed_cleanup
|
zed_cleanup
|
||||||
|
|
||||||
SDDEVICE=$(get_debug_device)
|
|
||||||
|
|
||||||
# Offline disk and remove scsi_debug module
|
|
||||||
if is_linux; then
|
|
||||||
if [ -n "$SDDEVICE" ]; then
|
|
||||||
remove_disk $SDDEVICE
|
|
||||||
fi
|
|
||||||
modprobe -r scsi_debug
|
|
||||||
fi
|
|
||||||
|
|
||||||
log_pass
|
log_pass
|
||||||
|
@ -31,8 +31,4 @@ verify_runnable "global"
|
|||||||
zed_setup
|
zed_setup
|
||||||
zed_start
|
zed_start
|
||||||
|
|
||||||
# Create a scsi_debug device to be used with auto-online (if using loop devices)
|
|
||||||
# and auto-replace regardless of other devices
|
|
||||||
load_scsi_debug $SDSIZE $SDHOSTS $SDTGTS $SDLUNS
|
|
||||||
|
|
||||||
log_pass
|
log_pass
|
||||||
|
Loading…
Reference in New Issue
Block a user