2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
2016-01-21 03:31:44 +03:00
|
|
|
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
|
2010-08-27 01:24:34 +04:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2018-03-30 03:43:25 +03:00
|
|
|
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
2016-01-21 03:37:10 +03:00
|
|
|
* Copyright 2015 RackTop Systems.
|
2016-03-14 19:04:21 +03:00
|
|
|
* Copyright (c) 2016, Intel Corporation.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <libintl.h>
|
2018-01-26 21:49:46 +03:00
|
|
|
#include <libgen.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
#include <stddef.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
2016-10-24 20:45:59 +03:00
|
|
|
#include <libzfs.h>
|
2021-05-15 14:00:05 +03:00
|
|
|
#include "libzfs_impl.h"
|
2018-11-05 22:22:33 +03:00
|
|
|
#include <libzutil.h>
|
2020-04-10 20:33:35 +03:00
|
|
|
#include <sys/arc_impl.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns true if the named pool matches the given GUID.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
|
|
|
|
boolean_t *isactive)
|
|
|
|
{
|
|
|
|
zpool_handle_t *zhp;
|
|
|
|
|
|
|
|
if (zpool_open_silent(hdl, name, &zhp) != 0)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
if (zhp == NULL) {
|
|
|
|
*isactive = B_FALSE;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2022-03-15 01:44:56 +03:00
|
|
|
uint64_t theguid = fnvlist_lookup_uint64(zhp->zpool_config,
|
|
|
|
ZPOOL_CONFIG_POOL_GUID);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
zpool_close(zhp);
|
|
|
|
|
|
|
|
*isactive = (theguid == guid);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static nvlist_t *
|
|
|
|
refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
|
|
|
|
{
|
|
|
|
nvlist_t *nvl;
|
2013-09-04 16:00:57 +04:00
|
|
|
zfs_cmd_t zc = {"\0"};
|
2017-01-31 00:20:55 +03:00
|
|
|
int err, dstbuf_size;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
|
|
|
|
return (NULL);
|
|
|
|
|
2021-01-21 23:55:54 +03:00
|
|
|
dstbuf_size = MAX(CONFIG_BUF_MINSIZE, zc.zc_nvlist_conf_size * 32);
|
2017-01-31 00:20:55 +03:00
|
|
|
|
|
|
|
if (zcmd_alloc_dst_nvlist(hdl, &zc, dstbuf_size) != 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2019-10-24 03:29:43 +03:00
|
|
|
while ((err = zfs_ioctl(hdl, ZFS_IOC_POOL_TRYIMPORT,
|
2008-11-20 23:01:55 +03:00
|
|
|
&zc)) != 0 && errno == ENOMEM) {
|
|
|
|
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (err) {
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
zcmd_free_nvlists(&zc);
|
|
|
|
return (nvl);
|
|
|
|
}
|
|
|
|
|
2018-11-05 22:22:33 +03:00
|
|
|
static nvlist_t *
|
|
|
|
refresh_config_libzfs(void *handle, nvlist_t *tryconfig)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2018-11-05 22:22:33 +03:00
|
|
|
return (refresh_config((libzfs_handle_t *)handle, tryconfig));
|
|
|
|
}
|
2010-08-26 20:52:39 +04:00
|
|
|
|
2018-11-05 22:22:33 +03:00
|
|
|
static int
|
|
|
|
pool_active_libzfs(void *handle, const char *name, uint64_t guid,
|
|
|
|
boolean_t *isactive)
|
|
|
|
{
|
|
|
|
return (pool_active((libzfs_handle_t *)handle, name, guid, isactive));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2018-11-05 22:22:33 +03:00
|
|
|
const pool_config_ops_t libzfs_config_ops = {
|
|
|
|
.pco_refresh_config = refresh_config_libzfs,
|
|
|
|
.pco_pool_active = pool_active_libzfs,
|
|
|
|
};
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
2018-11-05 22:22:33 +03:00
|
|
|
* Return the offset of the given label.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
label_offset(uint64_t size, int l)
|
|
|
|
{
|
|
|
|
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
|
|
|
|
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
|
|
|
|
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
|
|
|
|
}
|
|
|
|
|
2013-07-05 14:44:53 +04:00
|
|
|
/*
|
|
|
|
* Given a file descriptor, clear (zero) the label information. This function
|
2013-07-05 15:01:44 +04:00
|
|
|
* is used in the appliance stack as part of the ZFS sysevent module and
|
|
|
|
* to implement the "zpool labelclear" command.
|
2013-07-05 14:44:53 +04:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
zpool_clear_label(int fd)
|
|
|
|
{
|
|
|
|
struct stat64 statbuf;
|
|
|
|
int l;
|
|
|
|
vdev_label_t *label;
|
2020-04-10 20:33:35 +03:00
|
|
|
l2arc_dev_hdr_phys_t *l2dhdr;
|
2013-07-05 14:44:53 +04:00
|
|
|
uint64_t size;
|
2020-04-10 20:33:35 +03:00
|
|
|
int labels_cleared = 0, header_cleared = 0;
|
|
|
|
boolean_t clear_l2arc_header = B_FALSE;
|
2013-07-05 14:44:53 +04:00
|
|
|
|
|
|
|
if (fstat64_blk(fd, &statbuf) == -1)
|
|
|
|
return (0);
|
2019-03-21 20:13:01 +03:00
|
|
|
|
2013-07-05 14:44:53 +04:00
|
|
|
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
|
|
|
|
|
2018-04-12 20:50:39 +03:00
|
|
|
if ((label = calloc(1, sizeof (vdev_label_t))) == NULL)
|
2013-07-05 14:44:53 +04:00
|
|
|
return (-1);
|
|
|
|
|
2020-04-10 20:33:35 +03:00
|
|
|
if ((l2dhdr = calloc(1, sizeof (l2arc_dev_hdr_phys_t))) == NULL) {
|
|
|
|
free(label);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2013-07-05 14:44:53 +04:00
|
|
|
for (l = 0; l < VDEV_LABELS; l++) {
|
2020-04-10 20:33:35 +03:00
|
|
|
uint64_t state, guid, l2cache;
|
2019-03-21 20:13:01 +03:00
|
|
|
nvlist_t *config;
|
|
|
|
|
|
|
|
if (pread64(fd, label, sizeof (vdev_label_t),
|
2014-04-23 07:35:38 +04:00
|
|
|
label_offset(size, l)) != sizeof (vdev_label_t)) {
|
2019-03-21 20:13:01 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
|
|
|
|
sizeof (label->vl_vdev_phys.vp_nvlist), &config, 0) != 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip labels which do not have a valid guid. */
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
|
|
|
|
&guid) != 0 || guid == 0) {
|
|
|
|
nvlist_free(config);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip labels which are not in a known valid state. */
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
|
|
|
|
&state) != 0 || state > POOL_STATE_L2CACHE) {
|
|
|
|
nvlist_free(config);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-04-10 20:33:35 +03:00
|
|
|
/* If the device is a cache device clear the header. */
|
|
|
|
if (!clear_l2arc_header) {
|
|
|
|
if (nvlist_lookup_uint64(config,
|
|
|
|
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
|
|
|
|
l2cache == POOL_STATE_L2CACHE) {
|
|
|
|
clear_l2arc_header = B_TRUE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-21 20:13:01 +03:00
|
|
|
nvlist_free(config);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A valid label was found, overwrite this label's nvlist
|
|
|
|
* and uberblocks with zeros on disk. This is done to prevent
|
|
|
|
* system utilities, like blkid, from incorrectly detecting a
|
|
|
|
* partial label. The leading pad space is left untouched.
|
|
|
|
*/
|
|
|
|
memset(label, 0, sizeof (vdev_label_t));
|
|
|
|
size_t label_size = sizeof (vdev_label_t) - (2 * VDEV_PAD_SIZE);
|
|
|
|
|
|
|
|
if (pwrite64(fd, label, label_size, label_offset(size, l) +
|
|
|
|
(2 * VDEV_PAD_SIZE)) == label_size) {
|
|
|
|
labels_cleared++;
|
2014-04-23 07:35:38 +04:00
|
|
|
}
|
2013-07-05 14:44:53 +04:00
|
|
|
}
|
|
|
|
|
2020-04-10 20:33:35 +03:00
|
|
|
/* Clear the L2ARC header. */
|
|
|
|
if (clear_l2arc_header) {
|
|
|
|
memset(l2dhdr, 0, sizeof (l2arc_dev_hdr_phys_t));
|
|
|
|
if (pwrite64(fd, l2dhdr, sizeof (l2arc_dev_hdr_phys_t),
|
|
|
|
VDEV_LABEL_START_SIZE) == sizeof (l2arc_dev_hdr_phys_t)) {
|
|
|
|
header_cleared++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-05 14:44:53 +04:00
|
|
|
free(label);
|
2020-04-10 20:33:35 +03:00
|
|
|
free(l2dhdr);
|
2019-03-21 20:13:01 +03:00
|
|
|
|
|
|
|
if (labels_cleared == 0)
|
|
|
|
return (-1);
|
|
|
|
|
2013-07-05 14:44:53 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
static boolean_t
|
2008-11-20 23:01:55 +03:00
|
|
|
find_guid(nvlist_t *nv, uint64_t guid)
|
|
|
|
{
|
|
|
|
nvlist_t **child;
|
|
|
|
uint_t c, children;
|
|
|
|
|
2022-03-15 01:44:56 +03:00
|
|
|
if (fnvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID) == guid)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (B_TRUE);
|
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
|
|
|
|
&child, &children) == 0) {
|
|
|
|
for (c = 0; c < children; c++)
|
|
|
|
if (find_guid(child[c], guid))
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct aux_cbdata {
|
|
|
|
const char *cb_type;
|
|
|
|
uint64_t cb_guid;
|
|
|
|
zpool_handle_t *cb_zhp;
|
|
|
|
} aux_cbdata_t;
|
|
|
|
|
|
|
|
static int
|
|
|
|
find_aux(zpool_handle_t *zhp, void *data)
|
|
|
|
{
|
|
|
|
aux_cbdata_t *cbp = data;
|
|
|
|
nvlist_t **list;
|
2022-03-15 01:44:56 +03:00
|
|
|
uint_t count;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2022-03-15 01:44:56 +03:00
|
|
|
nvlist_t *nvroot = fnvlist_lookup_nvlist(zhp->zpool_config,
|
|
|
|
ZPOOL_CONFIG_VDEV_TREE);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
|
|
|
|
&list, &count) == 0) {
|
2022-03-15 01:44:56 +03:00
|
|
|
for (uint_t i = 0; i < count; i++) {
|
|
|
|
uint64_t guid = fnvlist_lookup_uint64(list[i],
|
|
|
|
ZPOOL_CONFIG_GUID);
|
2008-11-20 23:01:55 +03:00
|
|
|
if (guid == cbp->cb_guid) {
|
|
|
|
cbp->cb_zhp = zhp;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool_close(zhp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determines if the pool is in use. If so, it returns true and the state of
|
2017-10-13 22:42:04 +03:00
|
|
|
* the pool as well as the name of the pool. Name string is allocated and
|
2008-11-20 23:01:55 +03:00
|
|
|
* must be freed by the caller.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
|
|
|
|
boolean_t *inuse)
|
|
|
|
{
|
|
|
|
nvlist_t *config;
|
2022-03-15 01:44:56 +03:00
|
|
|
char *name = NULL;
|
2008-11-20 23:01:55 +03:00
|
|
|
boolean_t ret;
|
2022-03-15 01:44:56 +03:00
|
|
|
uint64_t guid = 0, vdev_guid;
|
2008-11-20 23:01:55 +03:00
|
|
|
zpool_handle_t *zhp;
|
|
|
|
nvlist_t *pool_config;
|
|
|
|
uint64_t stateval, isspare;
|
|
|
|
aux_cbdata_t cb = { 0 };
|
|
|
|
boolean_t isactive;
|
|
|
|
|
|
|
|
*inuse = B_FALSE;
|
|
|
|
|
2015-03-21 01:10:24 +03:00
|
|
|
if (zpool_read_label(fd, &config, NULL) != 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config == NULL)
|
|
|
|
return (0);
|
|
|
|
|
2022-03-15 01:44:56 +03:00
|
|
|
stateval = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
|
|
|
|
vdev_guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
|
2022-03-15 01:44:56 +03:00
|
|
|
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
|
|
|
|
guid = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (stateval) {
|
|
|
|
case POOL_STATE_EXPORTED:
|
2010-08-27 01:24:34 +04:00
|
|
|
/*
|
|
|
|
* A pool with an exported state may in fact be imported
|
|
|
|
* read-only, so check the in-core state to see if it's
|
|
|
|
* active and imported read-only. If it is, set
|
|
|
|
* its state to active.
|
|
|
|
*/
|
|
|
|
if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
|
2015-07-05 02:54:29 +03:00
|
|
|
(zhp = zpool_open_canfail(hdl, name)) != NULL) {
|
|
|
|
if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
|
|
|
|
stateval = POOL_STATE_ACTIVE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All we needed the zpool handle for is the
|
|
|
|
* readonly prop check.
|
|
|
|
*/
|
|
|
|
zpool_close(zhp);
|
|
|
|
}
|
2010-08-27 01:24:34 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
ret = B_TRUE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case POOL_STATE_ACTIVE:
|
|
|
|
/*
|
|
|
|
* For an active pool, we have to determine if it's really part
|
|
|
|
* of a currently active pool (in which case the pool will exist
|
|
|
|
* and the guid will be the same), or whether it's part of an
|
|
|
|
* active pool that was disconnected without being explicitly
|
|
|
|
* exported.
|
|
|
|
*/
|
|
|
|
if (pool_active(hdl, name, guid, &isactive) != 0) {
|
|
|
|
nvlist_free(config);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isactive) {
|
|
|
|
/*
|
|
|
|
* Because the device may have been removed while
|
|
|
|
* offlined, we only report it as active if the vdev is
|
|
|
|
* still present in the config. Otherwise, pretend like
|
|
|
|
* it's not in use.
|
|
|
|
*/
|
|
|
|
if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
|
|
|
|
(pool_config = zpool_get_config(zhp, NULL))
|
|
|
|
!= NULL) {
|
2022-03-15 01:44:56 +03:00
|
|
|
nvlist_t *nvroot = fnvlist_lookup_nvlist(
|
|
|
|
pool_config, ZPOOL_CONFIG_VDEV_TREE);
|
2008-11-20 23:01:55 +03:00
|
|
|
ret = find_guid(nvroot, vdev_guid);
|
|
|
|
} else {
|
|
|
|
ret = B_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is an active spare within another pool, we
|
|
|
|
* treat it like an unused hot spare. This allows the
|
|
|
|
* user to create a pool with a hot spare that currently
|
|
|
|
* in use within another pool. Since we return B_TRUE,
|
|
|
|
* libdiskmgt will continue to prevent generic consumers
|
|
|
|
* from using the device.
|
|
|
|
*/
|
|
|
|
if (ret && nvlist_lookup_uint64(config,
|
|
|
|
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
|
|
|
|
stateval = POOL_STATE_SPARE;
|
|
|
|
|
|
|
|
if (zhp != NULL)
|
|
|
|
zpool_close(zhp);
|
|
|
|
} else {
|
|
|
|
stateval = POOL_STATE_POTENTIALLY_ACTIVE;
|
|
|
|
ret = B_TRUE;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case POOL_STATE_SPARE:
|
|
|
|
/*
|
|
|
|
* For a hot spare, it can be either definitively in use, or
|
|
|
|
* potentially active. To determine if it's in use, we iterate
|
|
|
|
* over all pools in the system and search for one with a spare
|
|
|
|
* with a matching guid.
|
|
|
|
*
|
|
|
|
* Due to the shared nature of spares, we don't actually report
|
|
|
|
* the potentially active case as in use. This means the user
|
|
|
|
* can freely create pools on the hot spares of exported pools,
|
|
|
|
* but to do otherwise makes the resulting code complicated, and
|
|
|
|
* we end up having to deal with this case anyway.
|
|
|
|
*/
|
|
|
|
cb.cb_zhp = NULL;
|
|
|
|
cb.cb_guid = vdev_guid;
|
|
|
|
cb.cb_type = ZPOOL_CONFIG_SPARES;
|
|
|
|
if (zpool_iter(hdl, find_aux, &cb) == 1) {
|
|
|
|
name = (char *)zpool_get_name(cb.cb_zhp);
|
2016-01-21 03:31:44 +03:00
|
|
|
ret = B_TRUE;
|
2008-11-20 23:01:55 +03:00
|
|
|
} else {
|
2016-01-21 03:31:44 +03:00
|
|
|
ret = B_FALSE;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case POOL_STATE_L2CACHE:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if any pool is currently using this l2cache device.
|
|
|
|
*/
|
|
|
|
cb.cb_zhp = NULL;
|
|
|
|
cb.cb_guid = vdev_guid;
|
|
|
|
cb.cb_type = ZPOOL_CONFIG_L2CACHE;
|
|
|
|
if (zpool_iter(hdl, find_aux, &cb) == 1) {
|
|
|
|
name = (char *)zpool_get_name(cb.cb_zhp);
|
2016-01-21 03:31:44 +03:00
|
|
|
ret = B_TRUE;
|
2008-11-20 23:01:55 +03:00
|
|
|
} else {
|
2016-01-21 03:31:44 +03:00
|
|
|
ret = B_FALSE;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
ret = B_FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
|
|
|
|
if (cb.cb_zhp)
|
|
|
|
zpool_close(cb.cb_zhp);
|
|
|
|
nvlist_free(config);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
*state = (pool_state_t)stateval;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cb.cb_zhp)
|
|
|
|
zpool_close(cb.cb_zhp);
|
|
|
|
|
|
|
|
nvlist_free(config);
|
|
|
|
*inuse = ret;
|
|
|
|
return (0);
|
|
|
|
}
|