2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
2012-12-14 03:24:15 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2012-12-14 03:24:15 +04:00
|
|
|
* Copyright (c) 2012 by Delphix. All rights reserved.
|
2013-06-19 10:36:40 +04:00
|
|
|
* Copyright (c) 2013 Steven Hartland. All rights reserved.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file contains the functions which analyze the status of a pool. This
|
|
|
|
* include both the status of an active pool, as well as the status exported
|
|
|
|
* pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
|
|
|
|
* the pool. This status is independent (to a certain degree) from the state of
|
|
|
|
* the pool. A pool's state describes only whether or not it is capable of
|
|
|
|
* providing the necessary fault tolerance for data. The status describes the
|
|
|
|
* overall status of devices. A pool that is online can still have a device
|
|
|
|
* that is experiencing errors.
|
|
|
|
*
|
|
|
|
* Only a subset of the possible faults can be detected using 'zpool status',
|
|
|
|
* and not all possible errors correspond to a FMA message ID. The explanation
|
|
|
|
* is left up to the caller, depending on whether it is a live pool or an
|
|
|
|
* import.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <libzfs.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include "libzfs_impl.h"
|
2012-12-15 03:00:45 +04:00
|
|
|
#include "zfeature_common.h"
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
|
|
|
|
* in libzfs.h. Note that there are some status results which go past the end
|
|
|
|
* of this table, and hence have no associated message ID.
|
|
|
|
*/
|
|
|
|
static char *zfs_msgid_table[] = {
|
|
|
|
"ZFS-8000-14",
|
|
|
|
"ZFS-8000-2Q",
|
|
|
|
"ZFS-8000-3C",
|
|
|
|
"ZFS-8000-4J",
|
|
|
|
"ZFS-8000-5E",
|
|
|
|
"ZFS-8000-6X",
|
|
|
|
"ZFS-8000-72",
|
|
|
|
"ZFS-8000-8A",
|
|
|
|
"ZFS-8000-9P",
|
|
|
|
"ZFS-8000-A5",
|
2008-12-03 23:09:06 +03:00
|
|
|
"ZFS-8000-EY",
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
"ZFS-8000-EY",
|
|
|
|
"ZFS-8000-EY",
|
2008-12-03 23:09:06 +03:00
|
|
|
"ZFS-8000-HC",
|
|
|
|
"ZFS-8000-JQ",
|
|
|
|
"ZFS-8000-K4",
|
2014-02-21 07:57:17 +04:00
|
|
|
"ZFS-8000-ER",
|
2008-11-20 23:01:55 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
#define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
|
|
|
|
{
|
|
|
|
return (state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
aux == VDEV_AUX_OPEN_FAILED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
|
|
|
|
{
|
|
|
|
return (state == VDEV_STATE_FAULTED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
|
|
|
|
{
|
|
|
|
return (state == VDEV_STATE_DEGRADED || errs != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
|
|
|
|
{
|
|
|
|
return (state == VDEV_STATE_CANT_OPEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
|
|
|
|
{
|
|
|
|
return (state == VDEV_STATE_OFFLINE);
|
|
|
|
}
|
|
|
|
|
2009-08-18 22:43:27 +04:00
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
|
|
|
|
{
|
|
|
|
return (state == VDEV_STATE_REMOVED);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Detect if any leaf devices that have seen errors or could not be opened.
|
|
|
|
*/
|
|
|
|
static boolean_t
|
|
|
|
find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
|
|
|
|
{
|
|
|
|
nvlist_t **child;
|
|
|
|
vdev_stat_t *vs;
|
|
|
|
uint_t c, children;
|
|
|
|
char *type;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore problems within a 'replacing' vdev, since we're presumably in
|
|
|
|
* the process of repairing any such errors, and don't want to call them
|
|
|
|
* out again. We'll pick up the fact that a resilver is happening
|
|
|
|
* later.
|
|
|
|
*/
|
|
|
|
verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
|
|
|
|
if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
|
|
|
|
&children) == 0) {
|
|
|
|
for (c = 0; c < children; c++)
|
|
|
|
if (find_vdev_problem(child[c], func))
|
|
|
|
return (B_TRUE);
|
|
|
|
} else {
|
2010-05-29 00:45:14 +04:00
|
|
|
verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
|
2008-11-20 23:01:55 +03:00
|
|
|
(uint64_t **)&vs, &c) == 0);
|
|
|
|
|
|
|
|
if (func(vs->vs_state, vs->vs_aux,
|
|
|
|
vs->vs_read_errors +
|
|
|
|
vs->vs_write_errors +
|
|
|
|
vs->vs_checksum_errors))
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
2013-06-19 10:36:40 +04:00
|
|
|
/*
|
|
|
|
* Check any L2 cache devs
|
|
|
|
*/
|
|
|
|
if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
|
|
|
|
&children) == 0) {
|
|
|
|
for (c = 0; c < children; c++)
|
|
|
|
if (find_vdev_problem(child[c], func))
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Active pool health status.
|
|
|
|
*
|
|
|
|
* To determine the status for a pool, we make several passes over the config,
|
|
|
|
* picking the most egregious error we find. In order of importance, we do the
|
|
|
|
* following:
|
|
|
|
*
|
|
|
|
* - Check for a complete and valid configuration
|
|
|
|
* - Look for any faulted or missing devices in a non-replicated config
|
|
|
|
* - Check for any data errors
|
|
|
|
* - Check for any faulted or missing devices in a replicated config
|
|
|
|
* - Look for any devices showing errors
|
|
|
|
* - Check for any resilvering devices
|
|
|
|
*
|
|
|
|
* There can obviously be multiple errors within a single pool, so this routine
|
|
|
|
* only picks the most damaging of all the current errors to report.
|
|
|
|
*/
|
|
|
|
static zpool_status_t
|
2014-02-21 07:57:17 +04:00
|
|
|
check_status(nvlist_t *config, boolean_t isimport, zpool_errata_t *erratap)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
nvlist_t *nvroot;
|
|
|
|
vdev_stat_t *vs;
|
2010-05-29 00:45:14 +04:00
|
|
|
pool_scan_stat_t *ps = NULL;
|
|
|
|
uint_t vsc, psc;
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t nerr;
|
|
|
|
uint64_t version;
|
|
|
|
uint64_t stateval;
|
2008-12-03 23:09:06 +03:00
|
|
|
uint64_t suspended;
|
2008-11-20 23:01:55 +03:00
|
|
|
uint64_t hostid = 0;
|
2014-02-21 07:57:17 +04:00
|
|
|
uint64_t errata = 0;
|
2015-01-23 11:05:04 +03:00
|
|
|
unsigned long system_hostid = get_system_hostid();
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
|
|
|
|
&version) == 0);
|
|
|
|
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
|
|
|
|
&nvroot) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
|
2008-11-20 23:01:55 +03:00
|
|
|
(uint64_t **)&vs, &vsc) == 0);
|
|
|
|
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
|
|
|
|
&stateval) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently resilvering a vdev
|
|
|
|
*/
|
|
|
|
(void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
|
|
|
|
(uint64_t **)&ps, &psc);
|
|
|
|
if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
|
|
|
|
ps->pss_state == DSS_SCANNING)
|
|
|
|
return (ZPOOL_STATUS_RESILVERING);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
/*
|
|
|
|
* The multihost property is set and the pool may be active.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_ACTIVE) {
|
|
|
|
mmp_state_t mmp_state;
|
|
|
|
nvlist_t *nvinfo;
|
|
|
|
|
|
|
|
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
|
|
|
|
mmp_state = fnvlist_lookup_uint64(nvinfo,
|
|
|
|
ZPOOL_CONFIG_MMP_STATE);
|
|
|
|
|
|
|
|
if (mmp_state == MMP_STATE_ACTIVE)
|
|
|
|
return (ZPOOL_STATUS_HOSTID_ACTIVE);
|
|
|
|
else if (mmp_state == MMP_STATE_NO_HOSTID)
|
|
|
|
return (ZPOOL_STATUS_HOSTID_REQUIRED);
|
|
|
|
else
|
|
|
|
return (ZPOOL_STATUS_HOSTID_MISMATCH);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Pool last accessed by another system.
|
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
|
2011-04-25 19:18:07 +04:00
|
|
|
if (hostid != 0 && (unsigned long)hostid != system_hostid &&
|
2008-11-20 23:01:55 +03:00
|
|
|
stateval == POOL_STATE_ACTIVE)
|
|
|
|
return (ZPOOL_STATUS_HOSTID_MISMATCH);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Newer on-disk version.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_VERSION_NEWER)
|
|
|
|
return (ZPOOL_STATUS_VERSION_NEWER);
|
|
|
|
|
2012-12-14 03:24:15 +04:00
|
|
|
/*
|
|
|
|
* Unsupported feature(s).
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
|
|
|
|
nvlist_t *nvinfo;
|
|
|
|
|
|
|
|
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
|
|
|
|
&nvinfo) == 0);
|
|
|
|
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
|
|
|
|
return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
|
|
|
|
return (ZPOOL_STATUS_UNSUP_FEAT_READ);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Check that the config is complete.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
|
|
|
|
return (ZPOOL_STATUS_BAD_GUID_SUM);
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
/*
|
|
|
|
* Check whether the pool has suspended due to failed I/O.
|
|
|
|
*/
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
|
|
|
|
&suspended) == 0) {
|
|
|
|
if (suspended == ZIO_FAILURE_MODE_CONTINUE)
|
|
|
|
return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
|
|
|
|
return (ZPOOL_STATUS_IO_FAILURE_WAIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Could not read a log.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_BAD_LOG) {
|
|
|
|
return (ZPOOL_STATUS_BAD_LOG);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Bad devices in non-replicated config.
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
find_vdev_problem(nvroot, vdev_faulted))
|
|
|
|
return (ZPOOL_STATUS_FAULTED_DEV_NR);
|
|
|
|
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
find_vdev_problem(nvroot, vdev_missing))
|
|
|
|
return (ZPOOL_STATUS_MISSING_DEV_NR);
|
|
|
|
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
find_vdev_problem(nvroot, vdev_broken))
|
|
|
|
return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Corrupted pool metadata
|
|
|
|
*/
|
|
|
|
if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
|
|
|
|
vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
|
|
|
|
return (ZPOOL_STATUS_CORRUPT_POOL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Persistent data errors.
|
|
|
|
*/
|
|
|
|
if (!isimport) {
|
|
|
|
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
|
|
|
|
&nerr) == 0 && nerr != 0)
|
|
|
|
return (ZPOOL_STATUS_CORRUPT_DATA);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Missing devices in a replicated config.
|
|
|
|
*/
|
|
|
|
if (find_vdev_problem(nvroot, vdev_faulted))
|
|
|
|
return (ZPOOL_STATUS_FAULTED_DEV_R);
|
|
|
|
if (find_vdev_problem(nvroot, vdev_missing))
|
|
|
|
return (ZPOOL_STATUS_MISSING_DEV_R);
|
|
|
|
if (find_vdev_problem(nvroot, vdev_broken))
|
|
|
|
return (ZPOOL_STATUS_CORRUPT_LABEL_R);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Devices with errors
|
|
|
|
*/
|
|
|
|
if (!isimport && find_vdev_problem(nvroot, vdev_errors))
|
|
|
|
return (ZPOOL_STATUS_FAILING_DEV);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offlined devices
|
|
|
|
*/
|
|
|
|
if (find_vdev_problem(nvroot, vdev_offlined))
|
|
|
|
return (ZPOOL_STATUS_OFFLINE_DEV);
|
|
|
|
|
2009-08-18 22:43:27 +04:00
|
|
|
/*
|
|
|
|
* Removed device
|
|
|
|
*/
|
|
|
|
if (find_vdev_problem(nvroot, vdev_removed))
|
|
|
|
return (ZPOOL_STATUS_REMOVED_DEV);
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Outdated, but usable, version
|
|
|
|
*/
|
2012-12-14 03:24:15 +04:00
|
|
|
if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (ZPOOL_STATUS_VERSION_OLDER);
|
|
|
|
|
2012-12-15 03:00:45 +04:00
|
|
|
/*
|
|
|
|
* Usable pool with disabled features
|
|
|
|
*/
|
|
|
|
if (version >= SPA_VERSION_FEATURES) {
|
|
|
|
int i;
|
|
|
|
nvlist_t *feat;
|
|
|
|
|
|
|
|
if (isimport) {
|
|
|
|
feat = fnvlist_lookup_nvlist(config,
|
|
|
|
ZPOOL_CONFIG_LOAD_INFO);
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
if (nvlist_exists(feat, ZPOOL_CONFIG_ENABLED_FEAT))
|
|
|
|
feat = fnvlist_lookup_nvlist(feat,
|
|
|
|
ZPOOL_CONFIG_ENABLED_FEAT);
|
2012-12-15 03:00:45 +04:00
|
|
|
} else {
|
|
|
|
feat = fnvlist_lookup_nvlist(config,
|
|
|
|
ZPOOL_CONFIG_FEATURE_STATS);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < SPA_FEATURES; i++) {
|
|
|
|
zfeature_info_t *fi = &spa_feature_table[i];
|
|
|
|
if (!nvlist_exists(feat, fi->fi_guid))
|
|
|
|
return (ZPOOL_STATUS_FEAT_DISABLED);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-21 07:57:17 +04:00
|
|
|
/*
|
|
|
|
* Informational errata available.
|
|
|
|
*/
|
|
|
|
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRATA, &errata);
|
|
|
|
if (errata) {
|
|
|
|
*erratap = errata;
|
|
|
|
return (ZPOOL_STATUS_ERRATA);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
return (ZPOOL_STATUS_OK);
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool_status_t
|
2014-02-21 07:57:17 +04:00
|
|
|
zpool_get_status(zpool_handle_t *zhp, char **msgid, zpool_errata_t *errata)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2014-02-21 07:57:17 +04:00
|
|
|
zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE, errata);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (ret >= NMSGID)
|
|
|
|
*msgid = NULL;
|
|
|
|
else
|
|
|
|
*msgid = zfs_msgid_table[ret];
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
zpool_status_t
|
2014-02-21 07:57:17 +04:00
|
|
|
zpool_import_status(nvlist_t *config, char **msgid, zpool_errata_t *errata)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
2014-02-21 07:57:17 +04:00
|
|
|
zpool_status_t ret = check_status(config, B_TRUE, errata);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (ret >= NMSGID)
|
|
|
|
*msgid = NULL;
|
|
|
|
else
|
|
|
|
*msgid = zfs_msgid_table[ret];
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
static void
|
|
|
|
dump_ddt_stat(const ddt_stat_t *dds, int h)
|
|
|
|
{
|
|
|
|
char refcnt[6];
|
|
|
|
char blocks[6], lsize[6], psize[6], dsize[6];
|
|
|
|
char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
|
|
|
|
|
|
|
|
if (dds == NULL || dds->dds_blocks == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (h == -1)
|
|
|
|
(void) strcpy(refcnt, "Total");
|
|
|
|
else
|
|
|
|
zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
|
|
|
|
|
|
|
|
zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
|
2017-05-02 23:43:53 +03:00
|
|
|
zfs_nicebytes(dds->dds_lsize, lsize, sizeof (lsize));
|
|
|
|
zfs_nicebytes(dds->dds_psize, psize, sizeof (psize));
|
|
|
|
zfs_nicebytes(dds->dds_dsize, dsize, sizeof (dsize));
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
|
2017-05-02 23:43:53 +03:00
|
|
|
zfs_nicebytes(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
|
|
|
|
zfs_nicebytes(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
|
|
|
|
zfs_nicebytes(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
(void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
|
|
|
|
refcnt,
|
|
|
|
blocks, lsize, psize, dsize,
|
|
|
|
ref_blocks, ref_lsize, ref_psize, ref_dsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the DDT histogram and the column totals.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
|
|
|
|
{
|
|
|
|
int h;
|
|
|
|
|
|
|
|
(void) printf("\n");
|
|
|
|
|
|
|
|
(void) printf("bucket "
|
|
|
|
" allocated "
|
|
|
|
" referenced \n");
|
|
|
|
(void) printf("______ "
|
|
|
|
"______________________________ "
|
|
|
|
"______________________________\n");
|
|
|
|
|
|
|
|
(void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
|
|
|
|
"refcnt",
|
|
|
|
"blocks", "LSIZE", "PSIZE", "DSIZE",
|
|
|
|
"blocks", "LSIZE", "PSIZE", "DSIZE");
|
|
|
|
|
|
|
|
(void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
|
|
|
|
"------",
|
|
|
|
"------", "-----", "-----", "-----",
|
|
|
|
"------", "-----", "-----", "-----");
|
|
|
|
|
|
|
|
for (h = 0; h < 64; h++)
|
|
|
|
dump_ddt_stat(&ddh->ddh_stat[h], h);
|
|
|
|
|
|
|
|
dump_ddt_stat(dds_total, -1);
|
|
|
|
|
|
|
|
(void) printf("\n");
|
|
|
|
}
|