zed: unclean disk attachment faults the vdev

If the attached disk already contains a vdev GUID, it
means the disk is not clean. In such a scenario, the
physical path would be a match that makes the disk
faulted when trying to online it. So, we would only
want to proceed if either GUID matches with the last
attached disk or the disk is in a clean state.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Alexander Motin <mav@FreeBSD.org>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Ameer Hamza <ahamza@ixsystems.com>
Closes #14181
This commit is contained in:
Ameer Hamza 2022-11-29 22:24:10 +05:00 committed by GitHub
parent 303678350a
commit e996c502e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 21 additions and 9 deletions

View File

@ -535,6 +535,7 @@ typedef struct dev_data {
boolean_t dd_islabeled; boolean_t dd_islabeled;
uint64_t dd_pool_guid; uint64_t dd_pool_guid;
uint64_t dd_vdev_guid; uint64_t dd_vdev_guid;
uint64_t dd_new_vdev_guid;
const char *dd_new_devid; const char *dd_new_devid;
} dev_data_t; } dev_data_t;
@ -545,6 +546,7 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
char *path = NULL; char *path = NULL;
uint_t c, children; uint_t c, children;
nvlist_t **child; nvlist_t **child;
uint64_t guid = 0;
/* /*
* First iterate over any children. * First iterate over any children.
@ -572,17 +574,14 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
/* once a vdev was matched and processed there is nothing left to do */ /* once a vdev was matched and processed there is nothing left to do */
if (dp->dd_found) if (dp->dd_found)
return; return;
(void) nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID, &guid);
/* /*
* Match by GUID if available otherwise fallback to devid or physical * Match by GUID if available otherwise fallback to devid or physical
*/ */
if (dp->dd_vdev_guid != 0) { if (dp->dd_vdev_guid != 0) {
uint64_t guid; if (guid != dp->dd_vdev_guid)
if (nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid != dp->dd_vdev_guid) {
return; return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid); zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched on %llu", guid);
dp->dd_found = B_TRUE; dp->dd_found = B_TRUE;
@ -592,6 +591,12 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
* illumos, substring matching is not required to accommodate * illumos, substring matching is not required to accommodate
* the partition suffix. An exact match will be present in * the partition suffix. An exact match will be present in
* the dp->dd_compare value. * the dp->dd_compare value.
* If the attached disk already contains a vdev GUID, it means
* the disk is not clean. In such a scenario, the physical path
* would be a match that makes the disk faulted when trying to
* online it. So, we would only want to proceed if either GUID
* matches with the last attached disk or the disk is in clean
* state.
*/ */
if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 || if (nvlist_lookup_string(nvl, dp->dd_prop, &path) != 0 ||
strcmp(dp->dd_compare, path) != 0) { strcmp(dp->dd_compare, path) != 0) {
@ -599,6 +604,12 @@ zfs_iter_vdev(zpool_handle_t *zhp, nvlist_t *nvl, void *data)
__func__, dp->dd_compare, path); __func__, dp->dd_compare, path);
return; return;
} }
if (dp->dd_new_vdev_guid != 0 && dp->dd_new_vdev_guid != guid) {
zed_log_msg(LOG_INFO, " %s: no match (GUID:%llu"
" != vdev GUID:%llu)", __func__,
dp->dd_new_vdev_guid, guid);
return;
}
zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s", zed_log_msg(LOG_INFO, " zfs_iter_vdev: matched %s on %s",
dp->dd_prop, path); dp->dd_prop, path);
@ -680,7 +691,7 @@ zfs_iter_pool(zpool_handle_t *zhp, void *data)
*/ */
static boolean_t static boolean_t
devphys_iter(const char *physical, const char *devid, zfs_process_func_t func, devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
boolean_t is_slice) boolean_t is_slice, uint64_t new_vdev_guid)
{ {
dev_data_t data = { 0 }; dev_data_t data = { 0 };
@ -690,6 +701,7 @@ devphys_iter(const char *physical, const char *devid, zfs_process_func_t func,
data.dd_found = B_FALSE; data.dd_found = B_FALSE;
data.dd_islabeled = is_slice; data.dd_islabeled = is_slice;
data.dd_new_devid = devid; /* used by auto replace code */ data.dd_new_devid = devid; /* used by auto replace code */
data.dd_new_vdev_guid = new_vdev_guid;
(void) zpool_iter(g_zfshdl, zfs_iter_pool, &data); (void) zpool_iter(g_zfshdl, zfs_iter_pool, &data);
@ -858,7 +870,7 @@ zfs_deliver_add(nvlist_t *nvl)
if (devid_iter(devid, zfs_process_add, is_slice)) if (devid_iter(devid, zfs_process_add, is_slice))
return (0); return (0);
if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add, if (devpath != NULL && devphys_iter(devpath, devid, zfs_process_add,
is_slice)) is_slice, vdev_guid))
return (0); return (0);
if (vdev_guid != 0) if (vdev_guid != 0)
(void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add, (void) guid_iter(pool_guid, vdev_guid, devid, zfs_process_add,

View File

@ -4269,9 +4269,9 @@ vdev_clear(spa_t *spa, vdev_t *vd)
vdev_clear(spa, vd->vdev_child[c]); vdev_clear(spa, vd->vdev_child[c]);
/* /*
* It makes no sense to "clear" an indirect vdev. * It makes no sense to "clear" an indirect or removed vdev.
*/ */
if (!vdev_is_concrete(vd)) if (!vdev_is_concrete(vd) || vd->vdev_removed)
return; return;
/* /*