2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
|
2016-01-23 03:41:02 +03:00
|
|
|
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
|
2016-02-03 03:23:21 +03:00
|
|
|
* Copyright 2016 Gary Mills
|
2010-05-29 00:45:14 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/dsl_scan.h>
|
|
|
|
#include <sys/dsl_pool.h>
|
|
|
|
#include <sys/dsl_dataset.h>
|
|
|
|
#include <sys/dsl_prop.h>
|
|
|
|
#include <sys/dsl_dir.h>
|
|
|
|
#include <sys/dsl_synctask.h>
|
|
|
|
#include <sys/dnode.h>
|
|
|
|
#include <sys/dmu_tx.h>
|
|
|
|
#include <sys/dmu_objset.h>
|
|
|
|
#include <sys/arc.h>
|
|
|
|
#include <sys/zap.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/zfs_znode.h>
|
|
|
|
#include <sys/spa_impl.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
|
|
|
#include <sys/zil_impl.h>
|
|
|
|
#include <sys/zio_checksum.h>
|
|
|
|
#include <sys/ddt.h>
|
|
|
|
#include <sys/sa.h>
|
|
|
|
#include <sys/sa_impl.h>
|
2012-12-14 03:24:15 +04:00
|
|
|
#include <sys/zfeature.h>
|
2010-05-29 00:45:14 +04:00
|
|
|
#ifdef _KERNEL
|
|
|
|
#include <sys/zfs_vfsops.h>
|
|
|
|
#endif
|
|
|
|
|
2014-06-25 22:37:59 +04:00
|
|
|
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
|
|
|
|
const zbookmark_phys_t *);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
static scan_cb_t dsl_scan_scrub_cb;
|
2013-09-04 16:00:57 +04:00
|
|
|
static void dsl_scan_cancel_sync(void *, dmu_tx_t *);
|
2016-06-23 11:39:40 +03:00
|
|
|
static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *);
|
|
|
|
static boolean_t dsl_scan_restarting(dsl_scan_t *, dmu_tx_t *);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */
|
|
|
|
int zfs_resilver_delay = 2; /* number of ticks to delay resilver */
|
|
|
|
int zfs_scrub_delay = 4; /* number of ticks to delay scrub */
|
|
|
|
int zfs_scan_idle = 50; /* idle window in clock ticks */
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */
|
|
|
|
int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
|
|
|
|
int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
|
2011-05-04 02:09:28 +04:00
|
|
|
int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
|
2014-06-06 01:20:08 +04:00
|
|
|
int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
|
2010-05-29 00:45:14 +04:00
|
|
|
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
|
|
|
|
int dsl_scan_delay_completion = B_FALSE; /* set to delay scan completion */
|
2014-09-07 19:06:08 +04:00
|
|
|
/* max number of blocks to free in a single TXG */
|
|
|
|
ulong zfs_free_max_blocks = 100000;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
|
|
|
|
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
|
|
|
|
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
|
|
|
|
|
2016-01-23 03:41:02 +03:00
|
|
|
/*
|
|
|
|
* Enable/disable the processing of the free_bpobj object.
|
|
|
|
*/
|
|
|
|
int zfs_free_bpobj_enabled = 1;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/* the order has to match pool_scan_type */
|
|
|
|
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
|
|
|
|
NULL,
|
|
|
|
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
|
|
|
|
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
|
|
|
|
};
|
|
|
|
|
|
|
|
int
|
|
|
|
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
dsl_scan_t *scn;
|
|
|
|
spa_t *spa = dp->dp_spa;
|
|
|
|
uint64_t f;
|
|
|
|
|
|
|
|
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
|
|
|
|
scn->scn_dp = dp;
|
|
|
|
|
2013-04-23 21:31:42 +04:00
|
|
|
/*
|
|
|
|
* It's possible that we're resuming a scan after a reboot so
|
|
|
|
* make sure that the scan_async_destroying flag is initialized
|
|
|
|
* appropriately.
|
|
|
|
*/
|
|
|
|
ASSERT(!scn->scn_async_destroying);
|
|
|
|
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
|
2013-10-08 21:13:05 +04:00
|
|
|
SPA_FEATURE_ASYNC_DESTROY);
|
2013-04-23 21:31:42 +04:00
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
|
|
|
|
"scrub_func", sizeof (uint64_t), 1, &f);
|
|
|
|
if (err == 0) {
|
|
|
|
/*
|
|
|
|
* There was an old-style scrub in progress. Restart a
|
|
|
|
* new-style scrub from the beginning.
|
|
|
|
*/
|
|
|
|
scn->scn_restart_txg = txg;
|
|
|
|
zfs_dbgmsg("old-style scrub was in progress; "
|
|
|
|
"restarting new-style scrub in txg %llu",
|
|
|
|
scn->scn_restart_txg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load the queue obj from the old location so that it
|
|
|
|
* can be freed by dsl_scan_done().
|
|
|
|
*/
|
|
|
|
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
|
|
|
|
"scrub_queue", sizeof (uint64_t), 1,
|
|
|
|
&scn->scn_phys.scn_queue_obj);
|
|
|
|
} else {
|
|
|
|
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
|
|
|
|
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
|
|
|
|
&scn->scn_phys);
|
Add erratum for issue #2094
ZoL commit 1421c89 unintentionally changed the disk format in a forward-
compatible, but not backward compatible way. This was accomplished by
adding an entry to zbookmark_t, which is included in a couple of
on-disk structures. That lead to the creation of pools with incorrect
dsl_scan_phys_t objects that could only be imported by versions of ZoL
containing that commit. Such pools cannot be imported by other versions
of ZFS or past versions of ZoL.
The additional field has been removed by the previous commit. However,
affected pools must be imported and scrubbed using a version of ZoL with
this commit applied. This will return the pools to a state in which they
may be imported by other implementations.
The 'zpool import' or 'zpool status' command can be used to determine if
a pool is impacted. A message similar to one of the following means your
pool must be scrubbed to restore compatibility.
$ zpool import
pool: zol-0.6.2-173
id: 1165955789558693437
state: ONLINE
status: Errata #1 detected.
action: The pool can be imported using its name or numeric identifier,
however there is a compatibility issue which should be corrected
by running 'zpool scrub'
see: http://zfsonlinux.org/msg/ZFS-8000-ER
config:
...
$ zpool status
pool: zol-0.6.2-173
state: ONLINE
scan: pool compatibility issue detected.
see: https://github.com/zfsonlinux/zfs/issues/2094
action: To correct the issue run 'zpool scrub'.
config:
...
If there was an async destroy in progress 'zpool import' will prevent
the pool from being imported. Further advice on how to proceed will be
provided by the error message as follows.
$ zpool import
pool: zol-0.6.2-173
id: 1165955789558693437
state: ONLINE
status: Errata #2 detected.
action: The pool can not be imported with this version of ZFS due to an
active asynchronous destroy. Revert to an earlier version and
allow the destroy to complete before updating.
see: http://zfsonlinux.org/msg/ZFS-8000-ER
config:
...
Pools affected by the damaged dsl_scan_phys_t can be detected prior to
an upgrade by running the following command as root:
zdb -dddd poolname 1 | grep -P '^\t\tscan = ' | sed -e 's;scan = ;;' | wc -w
Note that `poolname` must be replaced with the name of the pool you wish
to check. A value of 25 indicates the dsl_scan_phys_t has been damaged.
A value of 24 indicates that the dsl_scan_phys_t is normal. A value of 0
indicates that there has never been a scrub run on the pool.
The regression caused by the change to zbookmark_t never made it into a
tagged release, Gentoo backports, Ubuntu, Debian, Fedora, or EPEL
stable respositorys. Only those using the HEAD version directly from
Github after the 0.6.2 but before the 0.6.3 tag are affected.
This patch does have one limitation that should be mentioned. It will not
detect errata #2 on a pool unless errata #1 is also present. It expected
this will not be a significant problem because pools impacted by errata #2
have a high probably of being impacted by errata #1.
End users can ensure they do no hit this unlikely case by waiting for all
asynchronous destroy operations to complete before updating ZoL. The
presence of any background destroys on any imported pools can be checked
by running `zpool get freeing` as root. This will display a non-zero
value for any pool with an active asynchronous destroy.
Lastly, it is expected that no user data has been lost as a result of
this erratum.
Original-patch-by: Tim Chase <tim@chase2k.com>
Reworked-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #2094
2014-02-21 08:28:33 +04:00
|
|
|
/*
|
|
|
|
* Detect if the pool contains the signature of #2094. If it
|
|
|
|
* does properly update the scn->scn_phys structure and notify
|
|
|
|
* the administrator by setting an errata for the pool.
|
|
|
|
*/
|
|
|
|
if (err == EOVERFLOW) {
|
|
|
|
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
|
|
|
|
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
|
|
|
|
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
|
|
|
|
(23 * sizeof (uint64_t)));
|
|
|
|
|
|
|
|
err = zap_lookup(dp->dp_meta_objset,
|
|
|
|
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
|
|
|
|
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
|
|
|
|
if (err == 0) {
|
|
|
|
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
|
|
|
|
|
|
|
|
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
|
|
|
|
scn->scn_async_destroying) {
|
|
|
|
spa->spa_errata =
|
|
|
|
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
|
|
|
|
return (EOVERFLOW);
|
|
|
|
}
|
|
|
|
|
|
|
|
bcopy(zaptmp, &scn->scn_phys,
|
|
|
|
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
|
|
|
|
scn->scn_phys.scn_flags = overflow;
|
|
|
|
|
|
|
|
/* Required scrub already in progress. */
|
|
|
|
if (scn->scn_phys.scn_state == DSS_FINISHED ||
|
|
|
|
scn->scn_phys.scn_state == DSS_CANCELED)
|
|
|
|
spa->spa_errata =
|
|
|
|
ZPOOL_ERRATA_ZOL_2094_SCRUB;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err == ENOENT)
|
|
|
|
return (0);
|
|
|
|
else if (err)
|
|
|
|
return (err);
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state == DSS_SCANNING &&
|
|
|
|
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
|
|
|
|
/*
|
|
|
|
* A new-type scrub was in progress on an old
|
|
|
|
* pool, and the pool was accessed by old
|
|
|
|
* software. Restart from the beginning, since
|
|
|
|
* the old software may have changed the pool in
|
|
|
|
* the meantime.
|
|
|
|
*/
|
|
|
|
scn->scn_restart_txg = txg;
|
|
|
|
zfs_dbgmsg("new-style scrub was modified "
|
|
|
|
"by old software; restarting in txg %llu",
|
|
|
|
scn->scn_restart_txg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spa_scan_stat_init(spa);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dsl_scan_fini(dsl_pool_t *dp)
|
|
|
|
{
|
|
|
|
if (dp->dp_scan) {
|
|
|
|
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
|
|
|
|
dp->dp_scan = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state == DSS_SCANNING)
|
2013-03-08 22:41:28 +04:00
|
|
|
return (SET_ERROR(EBUSY));
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
|
|
|
|
pool_scan_func_t *funcp = arg;
|
2010-05-29 00:45:14 +04:00
|
|
|
dmu_object_type_t ot = 0;
|
|
|
|
dsl_pool_t *dp = scn->scn_dp;
|
|
|
|
spa_t *spa = dp->dp_spa;
|
|
|
|
|
|
|
|
ASSERT(scn->scn_phys.scn_state != DSS_SCANNING);
|
|
|
|
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
|
|
|
|
bzero(&scn->scn_phys, sizeof (scn->scn_phys));
|
|
|
|
scn->scn_phys.scn_func = *funcp;
|
|
|
|
scn->scn_phys.scn_state = DSS_SCANNING;
|
|
|
|
scn->scn_phys.scn_min_txg = 0;
|
|
|
|
scn->scn_phys.scn_max_txg = tx->tx_txg;
|
|
|
|
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
|
|
|
|
scn->scn_phys.scn_start_time = gethrestime_sec();
|
|
|
|
scn->scn_phys.scn_errors = 0;
|
|
|
|
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
|
|
|
|
scn->scn_restart_txg = 0;
|
2013-08-08 00:16:22 +04:00
|
|
|
scn->scn_done_txg = 0;
|
2010-05-29 00:45:14 +04:00
|
|
|
spa_scan_stat_init(spa);
|
|
|
|
|
|
|
|
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
|
|
|
|
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
|
|
|
|
|
|
|
|
/* rewrite all disk labels */
|
|
|
|
vdev_config_dirty(spa->spa_root_vdev);
|
|
|
|
|
|
|
|
if (vdev_resilver_needed(spa->spa_root_vdev,
|
|
|
|
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
|
2016-07-28 01:29:15 +03:00
|
|
|
spa_event_notify(spa, NULL, ESC_ZFS_RESILVER_START);
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
2016-07-28 01:29:15 +03:00
|
|
|
spa_event_notify(spa, NULL, ESC_ZFS_SCRUB_START);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
spa->spa_scrub_started = B_TRUE;
|
|
|
|
/*
|
|
|
|
* If this is an incremental scrub, limit the DDT scrub phase
|
|
|
|
* to just the auto-ditto class (for correctness); the rest
|
|
|
|
* of the scrub should go faster using top-down pruning.
|
|
|
|
*/
|
|
|
|
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
|
|
|
|
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* back to the generic stuff */
|
|
|
|
|
|
|
|
if (dp->dp_blkstats == NULL) {
|
2014-11-21 03:09:39 +03:00
|
|
|
dp->dp_blkstats =
|
|
|
|
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
bzero(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
|
|
|
|
|
|
|
|
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
|
|
|
|
ot = DMU_OT_ZAP_OTHER;
|
|
|
|
|
|
|
|
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
|
|
|
|
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
|
|
|
|
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
|
2013-08-28 15:45:09 +04:00
|
|
|
spa_history_log_internal(spa, "scan setup", tx,
|
2010-05-29 00:45:14 +04:00
|
|
|
"func=%u mintxg=%llu maxtxg=%llu",
|
|
|
|
*funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static void
|
|
|
|
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
static const char *old_names[] = {
|
|
|
|
"scrub_bookmark",
|
|
|
|
"scrub_ddt_bookmark",
|
|
|
|
"scrub_ddt_class_max",
|
|
|
|
"scrub_queue",
|
|
|
|
"scrub_min_txg",
|
|
|
|
"scrub_max_txg",
|
|
|
|
"scrub_func",
|
|
|
|
"scrub_errors",
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
dsl_pool_t *dp = scn->scn_dp;
|
|
|
|
spa_t *spa = dp->dp_spa;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Remove any remnants of an old-style scrub. */
|
|
|
|
for (i = 0; old_names[i]; i++) {
|
|
|
|
(void) zap_remove(dp->dp_meta_objset,
|
|
|
|
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_queue_obj != 0) {
|
|
|
|
VERIFY(0 == dmu_object_free(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, tx));
|
|
|
|
scn->scn_phys.scn_queue_obj = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we were "restarted" from a stopped state, don't bother
|
|
|
|
* with anything else.
|
|
|
|
*/
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (complete)
|
|
|
|
scn->scn_phys.scn_state = DSS_FINISHED;
|
|
|
|
else
|
|
|
|
scn->scn_phys.scn_state = DSS_CANCELED;
|
|
|
|
|
2016-06-23 11:39:40 +03:00
|
|
|
if (dsl_scan_restarting(scn, tx))
|
|
|
|
spa_history_log_internal(spa, "scan aborted, restarting", tx,
|
|
|
|
"errors=%llu", spa_get_errlog_size(spa));
|
|
|
|
else if (!complete)
|
|
|
|
spa_history_log_internal(spa, "scan cancelled", tx,
|
|
|
|
"errors=%llu", spa_get_errlog_size(spa));
|
|
|
|
else
|
|
|
|
spa_history_log_internal(spa, "scan done", tx,
|
|
|
|
"errors=%llu", spa_get_errlog_size(spa));
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
|
|
|
|
mutex_enter(&spa->spa_scrub_lock);
|
|
|
|
while (spa->spa_scrub_inflight > 0) {
|
|
|
|
cv_wait(&spa->spa_scrub_io_cv,
|
|
|
|
&spa->spa_scrub_lock);
|
|
|
|
}
|
|
|
|
mutex_exit(&spa->spa_scrub_lock);
|
|
|
|
spa->spa_scrub_started = B_FALSE;
|
|
|
|
spa->spa_scrub_active = B_FALSE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the scrub/resilver completed, update all DTLs to
|
|
|
|
* reflect this. Whether it succeeded or not, vacate
|
|
|
|
* all temporary scrub DTLs.
|
|
|
|
*/
|
|
|
|
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
|
|
|
|
complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE);
|
|
|
|
if (complete) {
|
|
|
|
spa_event_notify(spa, NULL, scn->scn_phys.scn_min_txg ?
|
2016-07-28 01:29:15 +03:00
|
|
|
ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
spa_errlog_rotate(spa);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We may have finished replacing a device.
|
|
|
|
* Let the async thread assess this and handle the detach.
|
|
|
|
*/
|
|
|
|
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
|
|
|
|
}
|
|
|
|
|
|
|
|
scn->scn_phys.scn_end_time = gethrestime_sec();
|
Add erratum for issue #2094
ZoL commit 1421c89 unintentionally changed the disk format in a forward-
compatible, but not backward compatible way. This was accomplished by
adding an entry to zbookmark_t, which is included in a couple of
on-disk structures. That lead to the creation of pools with incorrect
dsl_scan_phys_t objects that could only be imported by versions of ZoL
containing that commit. Such pools cannot be imported by other versions
of ZFS or past versions of ZoL.
The additional field has been removed by the previous commit. However,
affected pools must be imported and scrubbed using a version of ZoL with
this commit applied. This will return the pools to a state in which they
may be imported by other implementations.
The 'zpool import' or 'zpool status' command can be used to determine if
a pool is impacted. A message similar to one of the following means your
pool must be scrubbed to restore compatibility.
$ zpool import
pool: zol-0.6.2-173
id: 1165955789558693437
state: ONLINE
status: Errata #1 detected.
action: The pool can be imported using its name or numeric identifier,
however there is a compatibility issue which should be corrected
by running 'zpool scrub'
see: http://zfsonlinux.org/msg/ZFS-8000-ER
config:
...
$ zpool status
pool: zol-0.6.2-173
state: ONLINE
scan: pool compatibility issue detected.
see: https://github.com/zfsonlinux/zfs/issues/2094
action: To correct the issue run 'zpool scrub'.
config:
...
If there was an async destroy in progress 'zpool import' will prevent
the pool from being imported. Further advice on how to proceed will be
provided by the error message as follows.
$ zpool import
pool: zol-0.6.2-173
id: 1165955789558693437
state: ONLINE
status: Errata #2 detected.
action: The pool can not be imported with this version of ZFS due to an
active asynchronous destroy. Revert to an earlier version and
allow the destroy to complete before updating.
see: http://zfsonlinux.org/msg/ZFS-8000-ER
config:
...
Pools affected by the damaged dsl_scan_phys_t can be detected prior to
an upgrade by running the following command as root:
zdb -dddd poolname 1 | grep -P '^\t\tscan = ' | sed -e 's;scan = ;;' | wc -w
Note that `poolname` must be replaced with the name of the pool you wish
to check. A value of 25 indicates the dsl_scan_phys_t has been damaged.
A value of 24 indicates that the dsl_scan_phys_t is normal. A value of 0
indicates that there has never been a scrub run on the pool.
The regression caused by the change to zbookmark_t never made it into a
tagged release, Gentoo backports, Ubuntu, Debian, Fedora, or EPEL
stable respositorys. Only those using the HEAD version directly from
Github after the 0.6.2 but before the 0.6.3 tag are affected.
This patch does have one limitation that should be mentioned. It will not
detect errata #2 on a pool unless errata #1 is also present. It expected
this will not be a significant problem because pools impacted by errata #2
have a high probably of being impacted by errata #1.
End users can ensure they do no hit this unlikely case by waiting for all
asynchronous destroy operations to complete before updating ZoL. The
presence of any background destroys on any imported pools can be checked
by running `zpool get freeing` as root. This will display a non-zero
value for any pool with an active asynchronous destroy.
Lastly, it is expected that no user data has been lost as a result of
this erratum.
Original-patch-by: Tim Chase <tim@chase2k.com>
Reworked-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Richard Yao <ryao@gentoo.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #2094
2014-02-21 08:28:33 +04:00
|
|
|
|
|
|
|
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
|
|
|
|
spa->spa_errata = 0;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
2013-03-08 22:41:28 +04:00
|
|
|
return (SET_ERROR(ENOENT));
|
2010-05-29 00:45:14 +04:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static void
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
dsl_scan_done(scn, B_FALSE, tx);
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dsl_scan_cancel(dsl_pool_t *dp)
|
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
|
2014-11-03 23:28:43 +03:00
|
|
|
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
|
|
|
|
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
|
|
|
|
dmu_objset_type_t ostype, dmu_tx_t *tx);
|
2012-07-19 03:56:24 +04:00
|
|
|
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
|
|
|
|
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
|
2014-09-17 11:07:28 +04:00
|
|
|
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
void
|
|
|
|
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
|
|
|
|
{
|
|
|
|
zio_free(dp->dp_spa, txg, bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
|
|
|
|
{
|
|
|
|
ASSERT(dsl_pool_sync_context(dp));
|
|
|
|
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
|
|
|
|
{
|
|
|
|
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
|
2015-04-02 06:44:32 +03:00
|
|
|
if (ds->ds_is_snapshot)
|
2015-04-01 18:14:34 +03:00
|
|
|
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
|
2010-05-29 00:45:14 +04:00
|
|
|
return (smt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx)
|
|
|
|
{
|
2013-09-04 16:00:57 +04:00
|
|
|
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
|
2010-05-29 00:45:14 +04:00
|
|
|
DMU_POOL_DIRECTORY_OBJECT,
|
|
|
|
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
|
|
|
|
&scn->scn_phys, tx));
|
|
|
|
}
|
|
|
|
|
2015-05-06 20:38:29 +03:00
|
|
|
extern int zfs_vdev_async_write_active_min_dirty_percent;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
static boolean_t
|
2014-06-25 22:37:59 +04:00
|
|
|
dsl_scan_check_pause(dsl_scan_t *scn, const zbookmark_phys_t *zb)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
uint64_t elapsed_nanosecs;
|
|
|
|
int mintime;
|
2015-05-06 20:38:29 +03:00
|
|
|
int dirty_pct;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
/* we never skip user/group accounting objects */
|
|
|
|
if (zb && (int64_t)zb->zb_object < 0)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
|
|
|
if (scn->scn_pausing)
|
|
|
|
return (B_TRUE); /* we're already pausing */
|
|
|
|
|
2012-12-14 03:24:15 +04:00
|
|
|
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
|
2010-05-29 00:45:14 +04:00
|
|
|
return (B_FALSE); /* we're resuming */
|
|
|
|
|
|
|
|
/* We only know how to resume from level-0 blocks. */
|
|
|
|
if (zb && zb->zb_level != 0)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
2015-05-06 20:38:29 +03:00
|
|
|
/*
|
|
|
|
* We pause if:
|
|
|
|
* - we have scanned for the maximum time: an entire txg
|
|
|
|
* timeout (default 5 sec)
|
|
|
|
* or
|
|
|
|
* - we have scanned for at least the minimum time (default 1 sec
|
|
|
|
* for scrub, 3 sec for resilver), and either we have sufficient
|
|
|
|
* dirty data that we are starting to write more quickly
|
|
|
|
* (default 30%), or someone is explicitly waiting for this txg
|
|
|
|
* to complete.
|
|
|
|
* or
|
|
|
|
* - the spa is shutting down because this pool is being exported
|
|
|
|
* or the machine is rebooting.
|
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
|
|
|
|
zfs_resilver_min_time_ms : zfs_scan_min_time_ms;
|
|
|
|
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
|
2015-05-06 20:38:29 +03:00
|
|
|
dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
|
|
|
|
if (elapsed_nanosecs / NANOSEC >= zfs_txg_timeout ||
|
2013-08-29 03:05:48 +04:00
|
|
|
(NSEC2MSEC(elapsed_nanosecs) > mintime &&
|
2015-05-06 20:38:29 +03:00
|
|
|
(txg_sync_waiting(scn->scn_dp) ||
|
|
|
|
dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent)) ||
|
2010-05-29 00:45:14 +04:00
|
|
|
spa_shutting_down(scn->scn_dp->dp_spa)) {
|
|
|
|
if (zb) {
|
|
|
|
dprintf("pausing at bookmark %llx/%llx/%llx/%llx\n",
|
|
|
|
(longlong_t)zb->zb_objset,
|
|
|
|
(longlong_t)zb->zb_object,
|
|
|
|
(longlong_t)zb->zb_level,
|
|
|
|
(longlong_t)zb->zb_blkid);
|
|
|
|
scn->scn_phys.scn_bookmark = *zb;
|
|
|
|
}
|
|
|
|
dprintf("pausing at DDT bookmark %llx/%llx/%llx/%llx\n",
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor);
|
|
|
|
scn->scn_pausing = B_TRUE;
|
|
|
|
return (B_TRUE);
|
|
|
|
}
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct zil_scan_arg {
|
|
|
|
dsl_pool_t *zsa_dp;
|
|
|
|
zil_header_t *zsa_zh;
|
|
|
|
} zil_scan_arg_t;
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
|
|
|
|
{
|
|
|
|
zil_scan_arg_t *zsa = arg;
|
|
|
|
dsl_pool_t *dp = zsa->zsa_dp;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
zil_header_t *zh = zsa->zsa_zh;
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t zb;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2013-12-09 22:37:51 +04:00
|
|
|
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
|
2010-05-29 00:45:14 +04:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* One block ("stubby") can be allocated a long time ago; we
|
|
|
|
* want to visit that one because it has been allocated
|
|
|
|
* (on-disk) even if it hasn't been claimed (even though for
|
|
|
|
* scrub there's nothing to do to it).
|
|
|
|
*/
|
|
|
|
if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
|
|
|
|
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
|
|
|
|
|
|
|
|
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
|
|
|
dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
|
|
|
|
{
|
|
|
|
if (lrc->lrc_txtype == TX_WRITE) {
|
|
|
|
zil_scan_arg_t *zsa = arg;
|
|
|
|
dsl_pool_t *dp = zsa->zsa_dp;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
zil_header_t *zh = zsa->zsa_zh;
|
|
|
|
lr_write_t *lr = (lr_write_t *)lrc;
|
|
|
|
blkptr_t *bp = &lr->lr_blkptr;
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t zb;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2013-12-09 22:37:51 +04:00
|
|
|
if (BP_IS_HOLE(bp) ||
|
|
|
|
bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
|
2010-05-29 00:45:14 +04:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* birth can be < claim_txg if this record's txg is
|
|
|
|
* already txg sync'ed (but this log block contains
|
|
|
|
* other records that are not synced)
|
|
|
|
*/
|
|
|
|
if (claim_txg == 0 || bp->blk_birth < claim_txg)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
|
|
|
|
lr->lr_foid, ZB_ZIL_LEVEL,
|
|
|
|
lr->lr_offset / BP_GET_LSIZE(bp));
|
|
|
|
|
|
|
|
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
|
|
|
|
{
|
|
|
|
uint64_t claim_txg = zh->zh_claim_txg;
|
|
|
|
zil_scan_arg_t zsa = { dp, zh };
|
|
|
|
zilog_t *zilog;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only want to visit blocks that have been claimed but not yet
|
|
|
|
* replayed (or, in read-only mode, blocks that *would* be claimed).
|
|
|
|
*/
|
|
|
|
if (claim_txg == 0 && spa_writeable(dp->dp_spa))
|
|
|
|
return;
|
|
|
|
|
|
|
|
zilog = zil_alloc(dp->dp_meta_objset, zh);
|
|
|
|
|
|
|
|
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
|
|
|
|
claim_txg);
|
|
|
|
|
|
|
|
zil_free(zilog);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static void
|
|
|
|
dsl_scan_prefetch(dsl_scan_t *scn, arc_buf_t *buf, blkptr_t *bp,
|
|
|
|
uint64_t objset, uint64_t object, uint64_t blkid)
|
|
|
|
{
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t czb;
|
2014-12-06 20:24:32 +03:00
|
|
|
arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (zfs_no_scrub_prefetch)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_min_txg ||
|
|
|
|
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE))
|
|
|
|
return;
|
|
|
|
|
|
|
|
SET_BOOKMARK(&czb, objset, object, BP_GET_LEVEL(bp), blkid);
|
|
|
|
|
|
|
|
(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa, bp,
|
2013-07-03 00:26:24 +04:00
|
|
|
NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
|
2010-08-27 01:24:34 +04:00
|
|
|
ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD, &flags, &czb);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static boolean_t
|
|
|
|
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
|
2014-06-25 22:37:59 +04:00
|
|
|
const zbookmark_phys_t *zb)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We never skip over user/group accounting objects (obj<0)
|
|
|
|
*/
|
2012-12-14 03:24:15 +04:00
|
|
|
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
|
2010-05-29 00:45:14 +04:00
|
|
|
(int64_t)zb->zb_object >= 0) {
|
|
|
|
/*
|
|
|
|
* If we already visited this bp & everything below (in
|
|
|
|
* a prior txg sync), don't bother doing it again.
|
|
|
|
*/
|
2015-12-22 04:31:57 +03:00
|
|
|
if (zbookmark_subtree_completed(dnp, zb,
|
|
|
|
&scn->scn_phys.scn_bookmark))
|
2010-05-29 00:45:14 +04:00
|
|
|
return (B_TRUE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we found the block we're trying to resume from, or
|
|
|
|
* we went past it to a different object, zero it out to
|
|
|
|
* indicate that it's OK to start checking for pausing
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
|
|
|
|
zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
|
|
|
|
dprintf("resuming at %llx/%llx/%llx/%llx\n",
|
|
|
|
(longlong_t)zb->zb_objset,
|
|
|
|
(longlong_t)zb->zb_object,
|
|
|
|
(longlong_t)zb->zb_level,
|
|
|
|
(longlong_t)zb->zb_blkid);
|
|
|
|
bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (B_FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return nonzero on i/o error.
|
|
|
|
* Return new buf to write out in *bufp.
|
|
|
|
*/
|
2012-07-19 03:56:24 +04:00
|
|
|
inline __attribute__((always_inline)) static int
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
|
|
|
|
dnode_phys_t *dnp, const blkptr_t *bp,
|
2014-09-17 11:07:28 +04:00
|
|
|
const zbookmark_phys_t *zb, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
dsl_pool_t *dp = scn->scn_dp;
|
2010-08-27 01:24:34 +04:00
|
|
|
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
|
2010-05-29 00:45:14 +04:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (BP_GET_LEVEL(bp) > 0) {
|
2014-12-06 20:24:32 +03:00
|
|
|
arc_flags_t flags = ARC_FLAG_WAIT;
|
2010-05-29 00:45:14 +04:00
|
|
|
int i;
|
|
|
|
blkptr_t *cbp;
|
|
|
|
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
|
2014-09-17 11:07:28 +04:00
|
|
|
arc_buf_t *buf;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
|
2010-08-27 01:24:34 +04:00
|
|
|
ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err) {
|
|
|
|
scn->scn_phys.scn_errors++;
|
|
|
|
return (err);
|
|
|
|
}
|
2014-09-17 11:07:28 +04:00
|
|
|
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
|
|
|
|
dsl_scan_prefetch(scn, buf, cbp, zb->zb_objset,
|
2010-05-29 00:45:14 +04:00
|
|
|
zb->zb_object, zb->zb_blkid * epb + i);
|
|
|
|
}
|
2014-09-17 11:07:28 +04:00
|
|
|
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t czb;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
|
|
|
|
zb->zb_level - 1,
|
|
|
|
zb->zb_blkid * epb + i);
|
|
|
|
dsl_scan_visitbp(cbp, &czb, dnp,
|
2014-09-17 11:07:28 +04:00
|
|
|
ds, scn, ostype, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2016-06-02 07:04:53 +03:00
|
|
|
arc_buf_destroy(buf, &buf);
|
2010-05-29 00:45:14 +04:00
|
|
|
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
|
2014-12-06 20:24:32 +03:00
|
|
|
arc_flags_t flags = ARC_FLAG_WAIT;
|
2010-05-29 00:45:14 +04:00
|
|
|
dnode_phys_t *cdnp;
|
|
|
|
int i, j;
|
|
|
|
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
|
2014-09-17 11:07:28 +04:00
|
|
|
arc_buf_t *buf;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
|
2010-08-27 01:24:34 +04:00
|
|
|
ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err) {
|
|
|
|
scn->scn_phys.scn_errors++;
|
|
|
|
return (err);
|
|
|
|
}
|
Implement large_dnode pool feature
Justification
-------------
This feature adds support for variable length dnodes. Our motivation is
to eliminate the overhead associated with using spill blocks. Spill
blocks are used to store system attribute data (i.e. file metadata) that
does not fit in the dnode's bonus buffer. By allowing a larger bonus
buffer area the use of a spill block can be avoided. Spill blocks
potentially incur an additional read I/O for every dnode in a dnode
block. As a worst case example, reading 32 dnodes from a 16k dnode block
and all of the spill blocks could issue 33 separate reads. Now suppose
those dnodes have size 1024 and therefore don't need spill blocks. Then
the worst case number of blocks read is reduced to from 33 to two--one
per dnode block. In practice spill blocks may tend to be co-located on
disk with the dnode blocks so the reduction in I/O would not be this
drastic. In a badly fragmented pool, however, the improvement could be
significant.
ZFS-on-Linux systems that make heavy use of extended attributes would
benefit from this feature. In particular, ZFS-on-Linux supports the
xattr=sa dataset property which allows file extended attribute data
to be stored in the dnode bonus buffer as an alternative to the
traditional directory-based format. Workloads such as SELinux and the
Lustre distributed filesystem often store enough xattr data to force
spill bocks when xattr=sa is in effect. Large dnodes may therefore
provide a performance benefit to such systems.
Other use cases that may benefit from this feature include files with
large ACLs and symbolic links with long target names. Furthermore,
this feature may be desirable on other platforms in case future
applications or features are developed that could make use of a
larger bonus buffer area.
Implementation
--------------
The size of a dnode may be a multiple of 512 bytes up to the size of
a dnode block (currently 16384 bytes). A dn_extra_slots field was
added to the current on-disk dnode_phys_t structure to describe the
size of the physical dnode on disk. The 8 bits for this field were
taken from the zero filled dn_pad2 field. The field represents how
many "extra" dnode_phys_t slots a dnode consumes in its dnode block.
This convention results in a value of 0 for 512 byte dnodes which
preserves on-disk format compatibility with older software.
Similarly, the in-memory dnode_t structure has a new dn_num_slots field
to represent the total number of dnode_phys_t slots consumed on disk.
Thus dn->dn_num_slots is 1 greater than the corresponding
dnp->dn_extra_slots. This difference in convention was adopted
because, unlike on-disk structures, backward compatibility is not a
concern for in-memory objects, so we used a more natural way to
represent size for a dnode_t.
The default size for newly created dnodes is determined by the value of
a new "dnodesize" dataset property. By default the property is set to
"legacy" which is compatible with older software. Setting the property
to "auto" will allow the filesystem to choose the most suitable dnode
size. Currently this just sets the default dnode size to 1k, but future
code improvements could dynamically choose a size based on observed
workload patterns. Dnodes of varying sizes can coexist within the same
dataset and even within the same dnode block. For example, to enable
automatically-sized dnodes, run
# zfs set dnodesize=auto tank/fish
The user can also specify literal values for the dnodesize property.
These are currently limited to powers of two from 1k to 16k. The
power-of-2 limitation is only for simplicity of the user interface.
Internally the implementation can handle any multiple of 512 up to 16k,
and consumers of the DMU API can specify any legal dnode value.
The size of a new dnode is determined at object allocation time and
stored as a new field in the znode in-memory structure. New DMU
interfaces are added to allow the consumer to specify the dnode size
that a newly allocated object should use. Existing interfaces are
unchanged to avoid having to update every call site and to preserve
compatibility with external consumers such as Lustre. The new
interfaces names are given below. The versions of these functions that
don't take a dnodesize parameter now just call the _dnsize() versions
with a dnodesize of 0, which means use the legacy dnode size.
New DMU interfaces:
dmu_object_alloc_dnsize()
dmu_object_claim_dnsize()
dmu_object_reclaim_dnsize()
New ZAP interfaces:
zap_create_dnsize()
zap_create_norm_dnsize()
zap_create_flags_dnsize()
zap_create_claim_norm_dnsize()
zap_create_link_dnsize()
The constant DN_MAX_BONUSLEN is renamed to DN_OLD_MAX_BONUSLEN. The
spa_maxdnodesize() function should be used to determine the maximum
bonus length for a pool.
These are a few noteworthy changes to key functions:
* The prototype for dnode_hold_impl() now takes a "slots" parameter.
When the DNODE_MUST_BE_FREE flag is set, this parameter is used to
ensure the hole at the specified object offset is large enough to
hold the dnode being created. The slots parameter is also used
to ensure a dnode does not span multiple dnode blocks. In both of
these cases, if a failure occurs, ENOSPC is returned. Keep in mind,
these failure cases are only possible when using DNODE_MUST_BE_FREE.
If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
dnode_hold_impl() will check if the requested dnode is already
consumed as an extra dnode slot by an large dnode, in which case
it returns ENOENT.
* The function dmu_object_alloc() advances to the next dnode block
if dnode_hold_impl() returns an error for a requested object.
This is because the beginning of the next dnode block is the only
location it can safely assume to either be a hole or a valid
starting point for a dnode.
* dnode_next_offset_level() and other functions that iterate
through dnode blocks may no longer use a simple array indexing
scheme. These now use the current dnode's dn_num_slots field to
advance to the next dnode in the block. This is to ensure we
properly skip the current dnode's bonus area and don't interpret it
as a valid dnode.
zdb
---
The zdb command was updated to display a dnode's size under the
"dnsize" column when the object is dumped.
For ZIL create log records, zdb will now display the slot count for
the object.
ztest
-----
Ztest chooses a random dnodesize for every newly created object. The
random distribution is more heavily weighted toward small dnodes to
better simulate real-world datasets.
Unused bonus buffer space is filled with non-zero values computed from
the object number, dataset id, offset, and generation number. This
helps ensure that the dnode traversal code properly skips the interior
regions of large dnodes, and that these interior regions are not
overwritten by data belonging to other dnodes. A new test visits each
object in a dataset. It verifies that the actual dnode size matches what
was stored in the ztest block tag when it was created. It also verifies
that the unused bonus buffer space is filled with the expected data
patterns.
ZFS Test Suite
--------------
Added six new large dnode-specific tests, and integrated the dnodesize
property into existing tests for zfs allow and send/recv.
Send/Receive
------------
ZFS send streams for datasets containing large dnodes cannot be received
on pools that don't support the large_dnode feature. A send stream with
large dnodes sets a DMU_BACKUP_FEATURE_LARGE_DNODE flag which will be
unrecognized by an incompatible receiving pool so that the zfs receive
will fail gracefully.
While not implemented here, it may be possible to generate a
backward-compatible send stream from a dataset containing large
dnodes. The implementation may be tricky, however, because the send
object record for a large dnode would need to be resized to a 512
byte dnode, possibly kicking in a spill block in the process. This
means we would need to construct a new SA layout and possibly
register it in the SA layout object. The SA layout is normally just
sent as an ordinary object record. But if we are constructing new
layouts while generating the send stream we'd have to build the SA
layout object dynamically and send it at the end of the stream.
For sending and receiving between pools that do support large dnodes,
the drr_object send record type is extended with a new field to store
the dnode slot count. This field was repurposed from unused padding
in the structure.
ZIL Replay
----------
The dnode slot count is stored in the uppermost 8 bits of the lr_foid
field. The bits were unused as the object id is currently capped at
48 bits.
Resizing Dnodes
---------------
It should be possible to resize a dnode when it is dirtied if the
current dnodesize dataset property differs from the dnode's size, but
this functionality is not currently implemented. Clearly a dnode can
only grow if there are sufficient contiguous unused slots in the
dnode block, but it should always be possible to shrink a dnode.
Growing dnodes may be useful to reduce fragmentation in a pool with
many spill blocks in use. Shrinking dnodes may be useful to allow
sending a dataset to a pool that doesn't support the large_dnode
feature.
Feature Reference Counting
--------------------------
The reference count for the large_dnode pool feature tracks the
number of datasets that have ever contained a dnode of size larger
than 512 bytes. The first time a large dnode is created in a dataset
the dataset is converted to an extensible dataset. This is a one-way
operation and the only way to decrement the feature count is to
destroy the dataset, even if the dataset no longer contains any large
dnodes. The complexity of reference counting on a per-dnode basis was
too high, so we chose to track it on a per-dataset basis similarly to
the large_block feature.
Signed-off-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #3542
2016-03-17 04:25:34 +03:00
|
|
|
for (i = 0, cdnp = buf->b_data; i < epb;
|
|
|
|
i += cdnp->dn_extra_slots + 1,
|
|
|
|
cdnp += cdnp->dn_extra_slots + 1) {
|
2010-05-29 00:45:14 +04:00
|
|
|
for (j = 0; j < cdnp->dn_nblkptr; j++) {
|
|
|
|
blkptr_t *cbp = &cdnp->dn_blkptr[j];
|
2014-09-17 11:07:28 +04:00
|
|
|
dsl_scan_prefetch(scn, buf, cbp,
|
2010-05-29 00:45:14 +04:00
|
|
|
zb->zb_objset, zb->zb_blkid * epb + i, j);
|
|
|
|
}
|
|
|
|
}
|
Implement large_dnode pool feature
Justification
-------------
This feature adds support for variable length dnodes. Our motivation is
to eliminate the overhead associated with using spill blocks. Spill
blocks are used to store system attribute data (i.e. file metadata) that
does not fit in the dnode's bonus buffer. By allowing a larger bonus
buffer area the use of a spill block can be avoided. Spill blocks
potentially incur an additional read I/O for every dnode in a dnode
block. As a worst case example, reading 32 dnodes from a 16k dnode block
and all of the spill blocks could issue 33 separate reads. Now suppose
those dnodes have size 1024 and therefore don't need spill blocks. Then
the worst case number of blocks read is reduced to from 33 to two--one
per dnode block. In practice spill blocks may tend to be co-located on
disk with the dnode blocks so the reduction in I/O would not be this
drastic. In a badly fragmented pool, however, the improvement could be
significant.
ZFS-on-Linux systems that make heavy use of extended attributes would
benefit from this feature. In particular, ZFS-on-Linux supports the
xattr=sa dataset property which allows file extended attribute data
to be stored in the dnode bonus buffer as an alternative to the
traditional directory-based format. Workloads such as SELinux and the
Lustre distributed filesystem often store enough xattr data to force
spill bocks when xattr=sa is in effect. Large dnodes may therefore
provide a performance benefit to such systems.
Other use cases that may benefit from this feature include files with
large ACLs and symbolic links with long target names. Furthermore,
this feature may be desirable on other platforms in case future
applications or features are developed that could make use of a
larger bonus buffer area.
Implementation
--------------
The size of a dnode may be a multiple of 512 bytes up to the size of
a dnode block (currently 16384 bytes). A dn_extra_slots field was
added to the current on-disk dnode_phys_t structure to describe the
size of the physical dnode on disk. The 8 bits for this field were
taken from the zero filled dn_pad2 field. The field represents how
many "extra" dnode_phys_t slots a dnode consumes in its dnode block.
This convention results in a value of 0 for 512 byte dnodes which
preserves on-disk format compatibility with older software.
Similarly, the in-memory dnode_t structure has a new dn_num_slots field
to represent the total number of dnode_phys_t slots consumed on disk.
Thus dn->dn_num_slots is 1 greater than the corresponding
dnp->dn_extra_slots. This difference in convention was adopted
because, unlike on-disk structures, backward compatibility is not a
concern for in-memory objects, so we used a more natural way to
represent size for a dnode_t.
The default size for newly created dnodes is determined by the value of
a new "dnodesize" dataset property. By default the property is set to
"legacy" which is compatible with older software. Setting the property
to "auto" will allow the filesystem to choose the most suitable dnode
size. Currently this just sets the default dnode size to 1k, but future
code improvements could dynamically choose a size based on observed
workload patterns. Dnodes of varying sizes can coexist within the same
dataset and even within the same dnode block. For example, to enable
automatically-sized dnodes, run
# zfs set dnodesize=auto tank/fish
The user can also specify literal values for the dnodesize property.
These are currently limited to powers of two from 1k to 16k. The
power-of-2 limitation is only for simplicity of the user interface.
Internally the implementation can handle any multiple of 512 up to 16k,
and consumers of the DMU API can specify any legal dnode value.
The size of a new dnode is determined at object allocation time and
stored as a new field in the znode in-memory structure. New DMU
interfaces are added to allow the consumer to specify the dnode size
that a newly allocated object should use. Existing interfaces are
unchanged to avoid having to update every call site and to preserve
compatibility with external consumers such as Lustre. The new
interfaces names are given below. The versions of these functions that
don't take a dnodesize parameter now just call the _dnsize() versions
with a dnodesize of 0, which means use the legacy dnode size.
New DMU interfaces:
dmu_object_alloc_dnsize()
dmu_object_claim_dnsize()
dmu_object_reclaim_dnsize()
New ZAP interfaces:
zap_create_dnsize()
zap_create_norm_dnsize()
zap_create_flags_dnsize()
zap_create_claim_norm_dnsize()
zap_create_link_dnsize()
The constant DN_MAX_BONUSLEN is renamed to DN_OLD_MAX_BONUSLEN. The
spa_maxdnodesize() function should be used to determine the maximum
bonus length for a pool.
These are a few noteworthy changes to key functions:
* The prototype for dnode_hold_impl() now takes a "slots" parameter.
When the DNODE_MUST_BE_FREE flag is set, this parameter is used to
ensure the hole at the specified object offset is large enough to
hold the dnode being created. The slots parameter is also used
to ensure a dnode does not span multiple dnode blocks. In both of
these cases, if a failure occurs, ENOSPC is returned. Keep in mind,
these failure cases are only possible when using DNODE_MUST_BE_FREE.
If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
dnode_hold_impl() will check if the requested dnode is already
consumed as an extra dnode slot by an large dnode, in which case
it returns ENOENT.
* The function dmu_object_alloc() advances to the next dnode block
if dnode_hold_impl() returns an error for a requested object.
This is because the beginning of the next dnode block is the only
location it can safely assume to either be a hole or a valid
starting point for a dnode.
* dnode_next_offset_level() and other functions that iterate
through dnode blocks may no longer use a simple array indexing
scheme. These now use the current dnode's dn_num_slots field to
advance to the next dnode in the block. This is to ensure we
properly skip the current dnode's bonus area and don't interpret it
as a valid dnode.
zdb
---
The zdb command was updated to display a dnode's size under the
"dnsize" column when the object is dumped.
For ZIL create log records, zdb will now display the slot count for
the object.
ztest
-----
Ztest chooses a random dnodesize for every newly created object. The
random distribution is more heavily weighted toward small dnodes to
better simulate real-world datasets.
Unused bonus buffer space is filled with non-zero values computed from
the object number, dataset id, offset, and generation number. This
helps ensure that the dnode traversal code properly skips the interior
regions of large dnodes, and that these interior regions are not
overwritten by data belonging to other dnodes. A new test visits each
object in a dataset. It verifies that the actual dnode size matches what
was stored in the ztest block tag when it was created. It also verifies
that the unused bonus buffer space is filled with the expected data
patterns.
ZFS Test Suite
--------------
Added six new large dnode-specific tests, and integrated the dnodesize
property into existing tests for zfs allow and send/recv.
Send/Receive
------------
ZFS send streams for datasets containing large dnodes cannot be received
on pools that don't support the large_dnode feature. A send stream with
large dnodes sets a DMU_BACKUP_FEATURE_LARGE_DNODE flag which will be
unrecognized by an incompatible receiving pool so that the zfs receive
will fail gracefully.
While not implemented here, it may be possible to generate a
backward-compatible send stream from a dataset containing large
dnodes. The implementation may be tricky, however, because the send
object record for a large dnode would need to be resized to a 512
byte dnode, possibly kicking in a spill block in the process. This
means we would need to construct a new SA layout and possibly
register it in the SA layout object. The SA layout is normally just
sent as an ordinary object record. But if we are constructing new
layouts while generating the send stream we'd have to build the SA
layout object dynamically and send it at the end of the stream.
For sending and receiving between pools that do support large dnodes,
the drr_object send record type is extended with a new field to store
the dnode slot count. This field was repurposed from unused padding
in the structure.
ZIL Replay
----------
The dnode slot count is stored in the uppermost 8 bits of the lr_foid
field. The bits were unused as the object id is currently capped at
48 bits.
Resizing Dnodes
---------------
It should be possible to resize a dnode when it is dirtied if the
current dnodesize dataset property differs from the dnode's size, but
this functionality is not currently implemented. Clearly a dnode can
only grow if there are sufficient contiguous unused slots in the
dnode block, but it should always be possible to shrink a dnode.
Growing dnodes may be useful to reduce fragmentation in a pool with
many spill blocks in use. Shrinking dnodes may be useful to allow
sending a dataset to a pool that doesn't support the large_dnode
feature.
Feature Reference Counting
--------------------------
The reference count for the large_dnode pool feature tracks the
number of datasets that have ever contained a dnode of size larger
than 512 bytes. The first time a large dnode is created in a dataset
the dataset is converted to an extensible dataset. This is a one-way
operation and the only way to decrement the feature count is to
destroy the dataset, even if the dataset no longer contains any large
dnodes. The complexity of reference counting on a per-dnode basis was
too high, so we chose to track it on a per-dataset basis similarly to
the large_block feature.
Signed-off-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #3542
2016-03-17 04:25:34 +03:00
|
|
|
for (i = 0, cdnp = buf->b_data; i < epb;
|
|
|
|
i += cdnp->dn_extra_slots + 1,
|
|
|
|
cdnp += cdnp->dn_extra_slots + 1) {
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_scan_visitdnode(scn, ds, ostype,
|
2014-09-17 11:07:28 +04:00
|
|
|
cdnp, zb->zb_blkid * epb + i, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2016-06-02 07:04:53 +03:00
|
|
|
arc_buf_destroy(buf, &buf);
|
2010-05-29 00:45:14 +04:00
|
|
|
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
|
2014-12-06 20:24:32 +03:00
|
|
|
arc_flags_t flags = ARC_FLAG_WAIT;
|
2010-05-29 00:45:14 +04:00
|
|
|
objset_phys_t *osp;
|
2014-09-17 11:07:28 +04:00
|
|
|
arc_buf_t *buf;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
|
2010-08-27 01:24:34 +04:00
|
|
|
ZIO_PRIORITY_ASYNC_READ, zio_flags, &flags, zb);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err) {
|
|
|
|
scn->scn_phys.scn_errors++;
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
osp = buf->b_data;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
dsl_scan_visitdnode(scn, ds, osp->os_type,
|
2014-09-17 11:07:28 +04:00
|
|
|
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
if (OBJSET_BUF_HAS_USERUSED(buf)) {
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* We also always visit user/group accounting
|
|
|
|
* objects, and never skip them, even if we are
|
|
|
|
* pausing. This is necessary so that the space
|
|
|
|
* deltas from this txg get integrated.
|
|
|
|
*/
|
|
|
|
dsl_scan_visitdnode(scn, ds, osp->os_type,
|
2014-09-17 11:07:28 +04:00
|
|
|
&osp->os_groupused_dnode,
|
2010-05-29 00:45:14 +04:00
|
|
|
DMU_GROUPUSED_OBJECT, tx);
|
|
|
|
dsl_scan_visitdnode(scn, ds, osp->os_type,
|
2014-09-17 11:07:28 +04:00
|
|
|
&osp->os_userused_dnode,
|
2010-05-29 00:45:14 +04:00
|
|
|
DMU_USERUSED_OBJECT, tx);
|
|
|
|
}
|
2016-06-02 07:04:53 +03:00
|
|
|
arc_buf_destroy(buf, &buf);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-07-19 03:56:24 +04:00
|
|
|
inline __attribute__((always_inline)) static void
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
|
2014-09-17 11:07:28 +04:00
|
|
|
dmu_objset_type_t ostype, dnode_phys_t *dnp,
|
2010-05-29 00:45:14 +04:00
|
|
|
uint64_t object, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < dnp->dn_nblkptr; j++) {
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t czb;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
|
|
|
|
dnp->dn_nlevels - 1, j);
|
|
|
|
dsl_scan_visitbp(&dnp->dn_blkptr[j],
|
2014-09-17 11:07:28 +04:00
|
|
|
&czb, dnp, ds, scn, ostype, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t czb;
|
2010-05-29 00:45:14 +04:00
|
|
|
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
|
|
|
|
0, DMU_SPILL_BLKID);
|
Implement large_dnode pool feature
Justification
-------------
This feature adds support for variable length dnodes. Our motivation is
to eliminate the overhead associated with using spill blocks. Spill
blocks are used to store system attribute data (i.e. file metadata) that
does not fit in the dnode's bonus buffer. By allowing a larger bonus
buffer area the use of a spill block can be avoided. Spill blocks
potentially incur an additional read I/O for every dnode in a dnode
block. As a worst case example, reading 32 dnodes from a 16k dnode block
and all of the spill blocks could issue 33 separate reads. Now suppose
those dnodes have size 1024 and therefore don't need spill blocks. Then
the worst case number of blocks read is reduced to from 33 to two--one
per dnode block. In practice spill blocks may tend to be co-located on
disk with the dnode blocks so the reduction in I/O would not be this
drastic. In a badly fragmented pool, however, the improvement could be
significant.
ZFS-on-Linux systems that make heavy use of extended attributes would
benefit from this feature. In particular, ZFS-on-Linux supports the
xattr=sa dataset property which allows file extended attribute data
to be stored in the dnode bonus buffer as an alternative to the
traditional directory-based format. Workloads such as SELinux and the
Lustre distributed filesystem often store enough xattr data to force
spill bocks when xattr=sa is in effect. Large dnodes may therefore
provide a performance benefit to such systems.
Other use cases that may benefit from this feature include files with
large ACLs and symbolic links with long target names. Furthermore,
this feature may be desirable on other platforms in case future
applications or features are developed that could make use of a
larger bonus buffer area.
Implementation
--------------
The size of a dnode may be a multiple of 512 bytes up to the size of
a dnode block (currently 16384 bytes). A dn_extra_slots field was
added to the current on-disk dnode_phys_t structure to describe the
size of the physical dnode on disk. The 8 bits for this field were
taken from the zero filled dn_pad2 field. The field represents how
many "extra" dnode_phys_t slots a dnode consumes in its dnode block.
This convention results in a value of 0 for 512 byte dnodes which
preserves on-disk format compatibility with older software.
Similarly, the in-memory dnode_t structure has a new dn_num_slots field
to represent the total number of dnode_phys_t slots consumed on disk.
Thus dn->dn_num_slots is 1 greater than the corresponding
dnp->dn_extra_slots. This difference in convention was adopted
because, unlike on-disk structures, backward compatibility is not a
concern for in-memory objects, so we used a more natural way to
represent size for a dnode_t.
The default size for newly created dnodes is determined by the value of
a new "dnodesize" dataset property. By default the property is set to
"legacy" which is compatible with older software. Setting the property
to "auto" will allow the filesystem to choose the most suitable dnode
size. Currently this just sets the default dnode size to 1k, but future
code improvements could dynamically choose a size based on observed
workload patterns. Dnodes of varying sizes can coexist within the same
dataset and even within the same dnode block. For example, to enable
automatically-sized dnodes, run
# zfs set dnodesize=auto tank/fish
The user can also specify literal values for the dnodesize property.
These are currently limited to powers of two from 1k to 16k. The
power-of-2 limitation is only for simplicity of the user interface.
Internally the implementation can handle any multiple of 512 up to 16k,
and consumers of the DMU API can specify any legal dnode value.
The size of a new dnode is determined at object allocation time and
stored as a new field in the znode in-memory structure. New DMU
interfaces are added to allow the consumer to specify the dnode size
that a newly allocated object should use. Existing interfaces are
unchanged to avoid having to update every call site and to preserve
compatibility with external consumers such as Lustre. The new
interfaces names are given below. The versions of these functions that
don't take a dnodesize parameter now just call the _dnsize() versions
with a dnodesize of 0, which means use the legacy dnode size.
New DMU interfaces:
dmu_object_alloc_dnsize()
dmu_object_claim_dnsize()
dmu_object_reclaim_dnsize()
New ZAP interfaces:
zap_create_dnsize()
zap_create_norm_dnsize()
zap_create_flags_dnsize()
zap_create_claim_norm_dnsize()
zap_create_link_dnsize()
The constant DN_MAX_BONUSLEN is renamed to DN_OLD_MAX_BONUSLEN. The
spa_maxdnodesize() function should be used to determine the maximum
bonus length for a pool.
These are a few noteworthy changes to key functions:
* The prototype for dnode_hold_impl() now takes a "slots" parameter.
When the DNODE_MUST_BE_FREE flag is set, this parameter is used to
ensure the hole at the specified object offset is large enough to
hold the dnode being created. The slots parameter is also used
to ensure a dnode does not span multiple dnode blocks. In both of
these cases, if a failure occurs, ENOSPC is returned. Keep in mind,
these failure cases are only possible when using DNODE_MUST_BE_FREE.
If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
dnode_hold_impl() will check if the requested dnode is already
consumed as an extra dnode slot by an large dnode, in which case
it returns ENOENT.
* The function dmu_object_alloc() advances to the next dnode block
if dnode_hold_impl() returns an error for a requested object.
This is because the beginning of the next dnode block is the only
location it can safely assume to either be a hole or a valid
starting point for a dnode.
* dnode_next_offset_level() and other functions that iterate
through dnode blocks may no longer use a simple array indexing
scheme. These now use the current dnode's dn_num_slots field to
advance to the next dnode in the block. This is to ensure we
properly skip the current dnode's bonus area and don't interpret it
as a valid dnode.
zdb
---
The zdb command was updated to display a dnode's size under the
"dnsize" column when the object is dumped.
For ZIL create log records, zdb will now display the slot count for
the object.
ztest
-----
Ztest chooses a random dnodesize for every newly created object. The
random distribution is more heavily weighted toward small dnodes to
better simulate real-world datasets.
Unused bonus buffer space is filled with non-zero values computed from
the object number, dataset id, offset, and generation number. This
helps ensure that the dnode traversal code properly skips the interior
regions of large dnodes, and that these interior regions are not
overwritten by data belonging to other dnodes. A new test visits each
object in a dataset. It verifies that the actual dnode size matches what
was stored in the ztest block tag when it was created. It also verifies
that the unused bonus buffer space is filled with the expected data
patterns.
ZFS Test Suite
--------------
Added six new large dnode-specific tests, and integrated the dnodesize
property into existing tests for zfs allow and send/recv.
Send/Receive
------------
ZFS send streams for datasets containing large dnodes cannot be received
on pools that don't support the large_dnode feature. A send stream with
large dnodes sets a DMU_BACKUP_FEATURE_LARGE_DNODE flag which will be
unrecognized by an incompatible receiving pool so that the zfs receive
will fail gracefully.
While not implemented here, it may be possible to generate a
backward-compatible send stream from a dataset containing large
dnodes. The implementation may be tricky, however, because the send
object record for a large dnode would need to be resized to a 512
byte dnode, possibly kicking in a spill block in the process. This
means we would need to construct a new SA layout and possibly
register it in the SA layout object. The SA layout is normally just
sent as an ordinary object record. But if we are constructing new
layouts while generating the send stream we'd have to build the SA
layout object dynamically and send it at the end of the stream.
For sending and receiving between pools that do support large dnodes,
the drr_object send record type is extended with a new field to store
the dnode slot count. This field was repurposed from unused padding
in the structure.
ZIL Replay
----------
The dnode slot count is stored in the uppermost 8 bits of the lr_foid
field. The bits were unused as the object id is currently capped at
48 bits.
Resizing Dnodes
---------------
It should be possible to resize a dnode when it is dirtied if the
current dnodesize dataset property differs from the dnode's size, but
this functionality is not currently implemented. Clearly a dnode can
only grow if there are sufficient contiguous unused slots in the
dnode block, but it should always be possible to shrink a dnode.
Growing dnodes may be useful to reduce fragmentation in a pool with
many spill blocks in use. Shrinking dnodes may be useful to allow
sending a dataset to a pool that doesn't support the large_dnode
feature.
Feature Reference Counting
--------------------------
The reference count for the large_dnode pool feature tracks the
number of datasets that have ever contained a dnode of size larger
than 512 bytes. The first time a large dnode is created in a dataset
the dataset is converted to an extensible dataset. This is a one-way
operation and the only way to decrement the feature count is to
destroy the dataset, even if the dataset no longer contains any large
dnodes. The complexity of reference counting on a per-dnode basis was
too high, so we chose to track it on a per-dataset basis similarly to
the large_block feature.
Signed-off-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #3542
2016-03-17 04:25:34 +03:00
|
|
|
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
|
2014-09-17 11:07:28 +04:00
|
|
|
&czb, dnp, ds, scn, ostype, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The arguments are in this order because mdb can only print the
|
|
|
|
* first 5; we want them to be useful.
|
|
|
|
*/
|
|
|
|
static void
|
2014-06-25 22:37:59 +04:00
|
|
|
dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
|
2014-09-17 11:07:28 +04:00
|
|
|
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
|
|
|
|
dmu_objset_type_t ostype, dmu_tx_t *tx)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
dsl_pool_t *dp = scn->scn_dp;
|
2010-08-26 21:53:43 +04:00
|
|
|
blkptr_t *bp_toread;
|
|
|
|
|
2014-11-21 03:09:39 +03:00
|
|
|
bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
|
2010-08-26 21:53:43 +04:00
|
|
|
*bp_toread = *bp;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
/* ASSERT(pbuf == NULL || arc_released(pbuf)); */
|
|
|
|
|
|
|
|
if (dsl_scan_check_pause(scn, zb))
|
2010-08-26 21:53:43 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (dsl_scan_check_resume(scn, dnp, zb))
|
2010-08-26 21:53:43 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2013-12-09 22:37:51 +04:00
|
|
|
if (BP_IS_HOLE(bp))
|
2010-08-26 21:53:43 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
scn->scn_visited_this_txg++;
|
|
|
|
|
2011-04-27 02:43:07 +04:00
|
|
|
/*
|
|
|
|
* This debugging is commented out to conserve stack space. This
|
|
|
|
* function is called recursively and the debugging addes several
|
|
|
|
* bytes to the stack for each call. It can be commented back in
|
|
|
|
* if required to debug an issue in dsl_scan_visitbp().
|
|
|
|
*
|
|
|
|
* dprintf_bp(bp,
|
2014-09-17 11:07:28 +04:00
|
|
|
* "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
|
2011-04-27 02:43:07 +04:00
|
|
|
* ds, ds ? ds->ds_object : 0,
|
|
|
|
* zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
|
2014-09-17 11:07:28 +04:00
|
|
|
* bp);
|
2011-04-27 02:43:07 +04:00
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
|
2010-08-26 21:53:43 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2014-09-17 11:07:28 +04:00
|
|
|
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
|
2010-08-26 21:53:43 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If dsl_scan_ddt() has aready visited this block, it will have
|
|
|
|
* already done any translations or scrubbing, so don't call the
|
|
|
|
* callback again.
|
|
|
|
*/
|
|
|
|
if (ddt_class_contains(dp->dp_spa,
|
|
|
|
scn->scn_phys.scn_ddt_class_max, bp)) {
|
2010-08-26 21:53:43 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this block is from the future (after cur_max_txg), then we
|
|
|
|
* are doing this on behalf of a deleted snapshot, and we will
|
|
|
|
* revisit the future block on the next pass of this dataset.
|
|
|
|
* Don't scan it now unless we need to because something
|
|
|
|
* under it was modified.
|
|
|
|
*/
|
2013-08-08 00:16:22 +04:00
|
|
|
if (BP_PHYSICAL_BIRTH(bp) <= scn->scn_phys.scn_cur_max_txg) {
|
2010-05-29 00:45:14 +04:00
|
|
|
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
|
|
|
|
}
|
2010-08-26 21:53:43 +04:00
|
|
|
out:
|
2013-11-01 23:26:11 +04:00
|
|
|
kmem_free(bp_toread, sizeof (blkptr_t));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
|
|
|
|
dmu_tx_t *tx)
|
|
|
|
{
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t zb;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
|
|
|
|
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
|
2014-09-17 11:07:28 +04:00
|
|
|
dsl_scan_visitbp(bp, &zb, NULL,
|
2010-05-29 00:45:14 +04:00
|
|
|
ds, scn, DMU_OST_NONE, tx);
|
|
|
|
|
|
|
|
dprintf_ds(ds, "finished scan%s", "");
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_pool_t *dp = ds->ds_dir->dd_pool;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
uint64_t mintxg;
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
|
2015-04-02 06:44:32 +03:00
|
|
|
if (ds->ds_is_snapshot) {
|
2016-01-30 23:40:28 +03:00
|
|
|
/*
|
|
|
|
* Note:
|
|
|
|
* - scn_cur_{min,max}_txg stays the same.
|
|
|
|
* - Setting the flag is not really necessary if
|
|
|
|
* scn_cur_max_txg == scn_max_txg, because there
|
|
|
|
* is nothing after this snapshot that we care
|
|
|
|
* about. However, we set it anyway and then
|
|
|
|
* ignore it when we retraverse it in
|
|
|
|
* dsl_scan_visitds().
|
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
scn->scn_phys.scn_bookmark.zb_objset =
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_next_snap_obj;
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_dbgmsg("destroying ds %llu; currently traversing; "
|
|
|
|
"reset zb_objset to %llu",
|
|
|
|
(u_longlong_t)ds->ds_object,
|
2015-04-01 18:14:34 +03:00
|
|
|
(u_longlong_t)dsl_dataset_phys(ds)->
|
|
|
|
ds_next_snap_obj);
|
2010-05-29 00:45:14 +04:00
|
|
|
scn->scn_phys.scn_flags |= DSF_VISIT_DS_AGAIN;
|
|
|
|
} else {
|
|
|
|
SET_BOOKMARK(&scn->scn_phys.scn_bookmark,
|
|
|
|
ZB_DESTROYED_OBJSET, 0, 0, 0);
|
|
|
|
zfs_dbgmsg("destroying ds %llu; currently traversing; "
|
|
|
|
"reset bookmark to -1,0,0,0",
|
|
|
|
(u_longlong_t)ds->ds_object);
|
|
|
|
}
|
|
|
|
} else if (zap_lookup_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
|
2015-04-01 18:14:34 +03:00
|
|
|
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
|
2010-05-29 00:45:14 +04:00
|
|
|
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
|
2015-04-02 06:44:32 +03:00
|
|
|
if (ds->ds_is_snapshot) {
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* We keep the same mintxg; it could be >
|
|
|
|
* ds_creation_txg if the previous snapshot was
|
|
|
|
* deleted too.
|
|
|
|
*/
|
|
|
|
VERIFY(zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_next_snap_obj,
|
|
|
|
mintxg, tx) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_dbgmsg("destroying ds %llu; in queue; "
|
|
|
|
"replacing with %llu",
|
|
|
|
(u_longlong_t)ds->ds_object,
|
2015-04-01 18:14:34 +03:00
|
|
|
(u_longlong_t)dsl_dataset_phys(ds)->
|
|
|
|
ds_next_snap_obj);
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
|
|
|
zfs_dbgmsg("destroying ds %llu; in queue; removing",
|
|
|
|
(u_longlong_t)ds->ds_object);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dsl_scan_sync() should be called after this, and should sync
|
|
|
|
* out our changed state, but just to be safe, do it here.
|
|
|
|
*/
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_pool_t *dp = ds->ds_dir->dd_pool;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
uint64_t mintxg;
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
|
|
|
return;
|
|
|
|
|
2015-04-01 18:14:34 +03:00
|
|
|
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (scn->scn_phys.scn_bookmark.zb_objset == ds->ds_object) {
|
|
|
|
scn->scn_phys.scn_bookmark.zb_objset =
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_prev_snap_obj;
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
|
|
|
|
"reset zb_objset to %llu",
|
|
|
|
(u_longlong_t)ds->ds_object,
|
2015-04-01 18:14:34 +03:00
|
|
|
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
|
2010-05-29 00:45:14 +04:00
|
|
|
} else if (zap_lookup_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds->ds_object, &mintxg) == 0) {
|
|
|
|
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
|
|
|
|
VERIFY(zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
zfs_dbgmsg("snapshotting ds %llu; in queue; "
|
|
|
|
"replacing with %llu",
|
|
|
|
(u_longlong_t)ds->ds_object,
|
2015-04-01 18:14:34 +03:00
|
|
|
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
uint64_t mintxg;
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_bookmark.zb_objset == ds1->ds_object) {
|
|
|
|
scn->scn_phys.scn_bookmark.zb_objset = ds2->ds_object;
|
|
|
|
zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
|
|
|
|
"reset zb_objset to %llu",
|
|
|
|
(u_longlong_t)ds1->ds_object,
|
|
|
|
(u_longlong_t)ds2->ds_object);
|
|
|
|
} else if (scn->scn_phys.scn_bookmark.zb_objset == ds2->ds_object) {
|
|
|
|
scn->scn_phys.scn_bookmark.zb_objset = ds1->ds_object;
|
|
|
|
zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
|
|
|
|
"reset zb_objset to %llu",
|
|
|
|
(u_longlong_t)ds2->ds_object,
|
|
|
|
(u_longlong_t)ds1->ds_object);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
|
|
|
|
ds1->ds_object, &mintxg) == 0) {
|
|
|
|
int err;
|
|
|
|
|
2015-04-01 18:14:34 +03:00
|
|
|
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
|
|
|
|
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
|
2010-05-29 00:45:14 +04:00
|
|
|
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
|
|
|
|
err = zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
|
|
|
|
VERIFY(err == 0 || err == EEXIST);
|
|
|
|
if (err == EEXIST) {
|
|
|
|
/* Both were there to begin with */
|
|
|
|
VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj,
|
|
|
|
ds1->ds_object, mintxg, tx));
|
|
|
|
}
|
|
|
|
zfs_dbgmsg("clone_swap ds %llu; in queue; "
|
|
|
|
"replacing with %llu",
|
|
|
|
(u_longlong_t)ds1->ds_object,
|
|
|
|
(u_longlong_t)ds2->ds_object);
|
|
|
|
} else if (zap_lookup_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds2->ds_object, &mintxg) == 0) {
|
2015-04-01 18:14:34 +03:00
|
|
|
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
|
|
|
|
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
|
2010-05-29 00:45:14 +04:00
|
|
|
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
|
|
|
|
VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
|
|
|
|
zfs_dbgmsg("clone_swap ds %llu; in queue; "
|
|
|
|
"replacing with %llu",
|
|
|
|
(u_longlong_t)ds2->ds_object,
|
|
|
|
(u_longlong_t)ds1->ds_object);
|
|
|
|
}
|
|
|
|
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct enqueue_clones_arg {
|
|
|
|
dmu_tx_t *tx;
|
|
|
|
uint64_t originobj;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2013-09-04 16:00:57 +04:00
|
|
|
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
struct enqueue_clones_arg *eca = arg;
|
|
|
|
dsl_dataset_t *ds;
|
|
|
|
int err;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
|
2015-04-01 18:14:34 +03:00
|
|
|
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != eca->originobj)
|
2013-09-04 16:00:57 +04:00
|
|
|
return (0);
|
|
|
|
|
|
|
|
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
2015-04-01 18:14:34 +03:00
|
|
|
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != eca->originobj) {
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_dataset_t *prev;
|
|
|
|
err = dsl_dataset_hold_obj(dp,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
ds = prev;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2013-09-04 16:00:57 +04:00
|
|
|
VERIFY(zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds->ds_object,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_prev_snap_txg, eca->tx) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_pool_t *dp = scn->scn_dp;
|
|
|
|
dsl_dataset_t *ds;
|
2010-08-27 01:24:34 +04:00
|
|
|
objset_t *os;
|
2010-08-26 20:52:39 +04:00
|
|
|
char *dsname;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
|
|
|
|
|
2016-01-30 23:40:28 +03:00
|
|
|
if (scn->scn_phys.scn_cur_min_txg >=
|
|
|
|
scn->scn_phys.scn_max_txg) {
|
|
|
|
/*
|
|
|
|
* This can happen if this snapshot was created after the
|
|
|
|
* scan started, and we already completed a previous snapshot
|
|
|
|
* that was created after the scan started. This snapshot
|
|
|
|
* only references blocks with:
|
|
|
|
*
|
|
|
|
* birth < our ds_creation_txg
|
|
|
|
* cur_min_txg is no less than ds_creation_txg.
|
|
|
|
* We have already visited these blocks.
|
|
|
|
* or
|
|
|
|
* birth > scn_max_txg
|
|
|
|
* The scan requested not to visit these blocks.
|
|
|
|
*
|
|
|
|
* Subsequent snapshots (and clones) can reference our
|
|
|
|
* blocks, or blocks with even higher birth times.
|
|
|
|
* Therefore we do not need to visit them either,
|
|
|
|
* so we do not add them to the work queue.
|
|
|
|
*
|
|
|
|
* Note that checking for cur_min_txg >= cur_max_txg
|
|
|
|
* is not sufficient, because in that case we may need to
|
|
|
|
* visit subsequent snapshots. This happens when min_txg > 0,
|
|
|
|
* which raises cur_min_txg. In this case we will visit
|
|
|
|
* this dataset but skip all of its blocks, because the
|
|
|
|
* rootbp's birth time is < cur_min_txg. Then we will
|
|
|
|
* add the next snapshots/clones to the work queue.
|
|
|
|
*/
|
2016-06-16 00:28:36 +03:00
|
|
|
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
|
2016-01-30 23:40:28 +03:00
|
|
|
dsl_dataset_name(ds, dsname);
|
|
|
|
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
|
|
|
|
"cur_min_txg (%llu) >= max_txg (%llu)",
|
|
|
|
dsobj, dsname,
|
|
|
|
scn->scn_phys.scn_cur_min_txg,
|
|
|
|
scn->scn_phys.scn_max_txg);
|
|
|
|
kmem_free(dsname, MAXNAMELEN);
|
|
|
|
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
if (dmu_objset_from_ds(ds, &os))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only the ZIL in the head (non-snapshot) is valid. Even though
|
|
|
|
* snapshots can have ZIL block pointers (which may be the same
|
|
|
|
* BP as in the head), they must be ignored. So we traverse the
|
|
|
|
* ZIL here, rather than in scan_recurse(), because the regular
|
|
|
|
* snapshot block-sharing rules don't apply to it.
|
|
|
|
*/
|
2015-04-02 06:44:32 +03:00
|
|
|
if (DSL_SCAN_IS_SCRUB_RESILVER(scn) && !ds->ds_is_snapshot)
|
2010-08-27 01:24:34 +04:00
|
|
|
dsl_scan_zil(dp, &os->os_zil_header);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/*
|
|
|
|
* Iterate over the bps in this ds.
|
|
|
|
*/
|
|
|
|
dmu_buf_will_dirty(ds->ds_dbuf, tx);
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2016-06-16 00:28:36 +03:00
|
|
|
dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_dataset_name(ds, dsname);
|
|
|
|
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
|
|
|
|
"pausing=%u",
|
|
|
|
(longlong_t)dsobj, dsname,
|
|
|
|
(longlong_t)scn->scn_phys.scn_cur_min_txg,
|
|
|
|
(longlong_t)scn->scn_phys.scn_cur_max_txg,
|
|
|
|
(int)scn->scn_pausing);
|
2016-06-16 00:28:36 +03:00
|
|
|
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (scn->scn_pausing)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We've finished this pass over this dataset.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we did not completely visit this dataset, do another pass.
|
|
|
|
*/
|
|
|
|
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
|
|
|
|
zfs_dbgmsg("incomplete pass; visiting again");
|
|
|
|
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
|
|
|
|
VERIFY(zap_add_int_key(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, ds->ds_object,
|
|
|
|
scn->scn_phys.scn_cur_max_txg, tx) == 0);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add descendent datasets to work queue.
|
|
|
|
*/
|
2015-04-01 18:14:34 +03:00
|
|
|
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
|
2010-05-29 00:45:14 +04:00
|
|
|
VERIFY(zap_add_int_key(dp->dp_meta_objset,
|
2015-04-01 18:14:34 +03:00
|
|
|
scn->scn_phys.scn_queue_obj,
|
|
|
|
dsl_dataset_phys(ds)->ds_next_snap_obj,
|
|
|
|
dsl_dataset_phys(ds)->ds_creation_txg, tx) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2015-04-01 18:14:34 +03:00
|
|
|
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
|
2010-05-29 00:45:14 +04:00
|
|
|
boolean_t usenext = B_FALSE;
|
2015-04-01 18:14:34 +03:00
|
|
|
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
|
2010-05-29 00:45:14 +04:00
|
|
|
uint64_t count;
|
|
|
|
/*
|
|
|
|
* A bug in a previous version of the code could
|
|
|
|
* cause upgrade_clones_cb() to not set
|
|
|
|
* ds_next_snap_obj when it should, leading to a
|
|
|
|
* missing entry. Therefore we can only use the
|
|
|
|
* next_clones_obj when its count is correct.
|
|
|
|
*/
|
|
|
|
int err = zap_count(dp->dp_meta_objset,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err == 0 &&
|
2015-04-01 18:14:34 +03:00
|
|
|
count == dsl_dataset_phys(ds)->ds_num_children - 1)
|
2010-05-29 00:45:14 +04:00
|
|
|
usenext = B_TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (usenext) {
|
2013-09-04 16:00:57 +04:00
|
|
|
VERIFY0(zap_join_key(dp->dp_meta_objset,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_next_clones_obj,
|
2010-05-29 00:45:14 +04:00
|
|
|
scn->scn_phys.scn_queue_obj,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_creation_txg, tx));
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
|
|
|
struct enqueue_clones_arg eca;
|
|
|
|
eca.tx = tx;
|
|
|
|
eca.originobj = ds->ds_object;
|
|
|
|
|
2013-09-04 16:00:57 +04:00
|
|
|
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
|
|
|
|
enqueue_clones_cb, &eca, DS_FIND_CHILDREN));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2013-09-04 16:00:57 +04:00
|
|
|
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
dmu_tx_t *tx = arg;
|
|
|
|
dsl_dataset_t *ds;
|
|
|
|
int err;
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
|
2013-09-04 16:00:57 +04:00
|
|
|
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err)
|
|
|
|
return (err);
|
|
|
|
|
2015-04-01 18:14:34 +03:00
|
|
|
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_dataset_t *prev;
|
2015-04-01 18:14:34 +03:00
|
|
|
err = dsl_dataset_hold_obj(dp,
|
|
|
|
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (err) {
|
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
return (err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a clone, we don't need to worry about it for now.
|
|
|
|
*/
|
2015-04-01 18:14:34 +03:00
|
|
|
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
dsl_dataset_rele(prev, FTAG);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
ds = prev;
|
|
|
|
}
|
|
|
|
|
|
|
|
VERIFY(zap_add_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
|
2015-04-01 18:14:34 +03:00
|
|
|
ds->ds_object, dsl_dataset_phys(ds)->ds_prev_snap_txg, tx) == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scrub/dedup interaction.
|
|
|
|
*
|
|
|
|
* If there are N references to a deduped block, we don't want to scrub it
|
|
|
|
* N times -- ideally, we should scrub it exactly once.
|
|
|
|
*
|
|
|
|
* We leverage the fact that the dde's replication class (enum ddt_class)
|
|
|
|
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
|
|
|
|
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
|
|
|
|
*
|
|
|
|
* To prevent excess scrubbing, the scrub begins by walking the DDT
|
|
|
|
* to find all blocks with refcnt > 1, and scrubs each of these once.
|
|
|
|
* Since there are two replication classes which contain blocks with
|
|
|
|
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
|
|
|
|
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
|
|
|
|
*
|
|
|
|
* There would be nothing more to say if a block's refcnt couldn't change
|
|
|
|
* during a scrub, but of course it can so we must account for changes
|
|
|
|
* in a block's replication class.
|
|
|
|
*
|
|
|
|
* Here's an example of what can occur:
|
|
|
|
*
|
|
|
|
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
|
|
|
|
* when visited during the top-down scrub phase, it will be scrubbed twice.
|
|
|
|
* This negates our scrub optimization, but is otherwise harmless.
|
|
|
|
*
|
|
|
|
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
|
|
|
|
* on each visit during the top-down scrub phase, it will never be scrubbed.
|
|
|
|
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
|
|
|
|
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
|
|
|
|
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
|
|
|
|
* while a scrub is in progress, it scrubs the block right then.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
|
2010-08-26 20:52:41 +04:00
|
|
|
ddt_entry_t dde;
|
2010-05-29 00:45:14 +04:00
|
|
|
int error;
|
|
|
|
uint64_t n = 0;
|
|
|
|
|
2010-08-26 20:52:41 +04:00
|
|
|
bzero(&dde, sizeof (ddt_entry_t));
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
|
|
|
|
ddt_t *ddt;
|
|
|
|
|
|
|
|
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
|
|
|
|
break;
|
|
|
|
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
|
|
|
|
(longlong_t)ddb->ddb_class,
|
|
|
|
(longlong_t)ddb->ddb_type,
|
|
|
|
(longlong_t)ddb->ddb_checksum,
|
|
|
|
(longlong_t)ddb->ddb_cursor);
|
|
|
|
|
|
|
|
/* There should be no pending changes to the dedup table */
|
|
|
|
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
|
|
|
|
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
|
|
|
|
|
|
|
|
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
|
|
|
|
n++;
|
|
|
|
|
|
|
|
if (dsl_scan_check_pause(scn, NULL))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; pausing=%u",
|
|
|
|
(longlong_t)n, (int)scn->scn_phys.scn_ddt_class_max,
|
|
|
|
(int)scn->scn_pausing);
|
|
|
|
|
|
|
|
ASSERT(error == 0 || error == ENOENT);
|
|
|
|
ASSERT(error != ENOENT ||
|
|
|
|
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
void
|
|
|
|
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
|
|
|
|
ddt_entry_t *dde, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
const ddt_key_t *ddk = &dde->dde_key;
|
|
|
|
ddt_phys_t *ddp = dde->dde_phys;
|
|
|
|
blkptr_t bp;
|
2014-06-25 22:37:59 +04:00
|
|
|
zbookmark_phys_t zb = { 0 };
|
2010-08-26 20:52:39 +04:00
|
|
|
int p;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
|
|
|
return;
|
|
|
|
|
2010-08-26 20:52:39 +04:00
|
|
|
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
|
2010-05-29 00:45:14 +04:00
|
|
|
if (ddp->ddp_phys_birth == 0 ||
|
2013-08-08 00:16:22 +04:00
|
|
|
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
|
2010-05-29 00:45:14 +04:00
|
|
|
continue;
|
|
|
|
ddt_bp_create(checksum, ddk, ddp, &bp);
|
|
|
|
|
|
|
|
scn->scn_visited_this_txg++;
|
|
|
|
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_pool_t *dp = scn->scn_dp;
|
2011-04-27 01:56:04 +04:00
|
|
|
zap_cursor_t *zc;
|
|
|
|
zap_attribute_t *za;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
|
|
|
|
scn->scn_phys.scn_ddt_class_max) {
|
|
|
|
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
|
|
|
|
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
|
|
|
|
dsl_scan_ddt(scn, tx);
|
|
|
|
if (scn->scn_pausing)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
|
|
|
|
/* First do the MOS & ORIGIN */
|
|
|
|
|
|
|
|
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
|
|
|
|
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
|
|
|
|
dsl_scan_visit_rootbp(scn, NULL,
|
|
|
|
&dp->dp_meta_rootbp, tx);
|
|
|
|
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
|
|
|
|
if (scn->scn_pausing)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
|
2013-09-04 16:00:57 +04:00
|
|
|
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
|
|
|
|
enqueue_cb, tx, DS_FIND_CHILDREN));
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
|
|
|
dsl_scan_visitds(scn,
|
|
|
|
dp->dp_origin_snap->ds_object, tx);
|
|
|
|
}
|
|
|
|
ASSERT(!scn->scn_pausing);
|
|
|
|
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
|
|
|
|
ZB_DESTROYED_OBJSET) {
|
|
|
|
/*
|
|
|
|
* If we were paused, continue from here. Note if the
|
|
|
|
* ds we were paused on was deleted, the zb_objset may
|
|
|
|
* be -1, so we will skip this and find a new objset
|
|
|
|
* below.
|
|
|
|
*/
|
|
|
|
dsl_scan_visitds(scn, scn->scn_phys.scn_bookmark.zb_objset, tx);
|
|
|
|
if (scn->scn_pausing)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In case we were paused right at the end of the ds, zero the
|
|
|
|
* bookmark so we don't think that we're still trying to resume.
|
|
|
|
*/
|
2014-06-25 22:37:59 +04:00
|
|
|
bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
|
2014-11-21 03:09:39 +03:00
|
|
|
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
|
|
|
|
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
/* keep pulling things out of the zap-object-as-queue */
|
2011-04-27 01:56:04 +04:00
|
|
|
while (zap_cursor_init(zc, dp->dp_meta_objset,
|
2010-05-29 00:45:14 +04:00
|
|
|
scn->scn_phys.scn_queue_obj),
|
2011-04-27 01:56:04 +04:00
|
|
|
zap_cursor_retrieve(zc, za) == 0) {
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_dataset_t *ds;
|
|
|
|
uint64_t dsobj;
|
|
|
|
|
2011-04-27 01:56:04 +04:00
|
|
|
dsobj = strtonum(za->za_name, NULL);
|
2010-05-29 00:45:14 +04:00
|
|
|
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
|
|
|
|
scn->scn_phys.scn_queue_obj, dsobj, tx));
|
|
|
|
|
|
|
|
/* Set up min/max txg */
|
|
|
|
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
|
2011-04-27 01:56:04 +04:00
|
|
|
if (za->za_first_integer != 0) {
|
2010-05-29 00:45:14 +04:00
|
|
|
scn->scn_phys.scn_cur_min_txg =
|
|
|
|
MAX(scn->scn_phys.scn_min_txg,
|
2011-04-27 01:56:04 +04:00
|
|
|
za->za_first_integer);
|
2010-05-29 00:45:14 +04:00
|
|
|
} else {
|
|
|
|
scn->scn_phys.scn_cur_min_txg =
|
|
|
|
MAX(scn->scn_phys.scn_min_txg,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dataset_phys(ds)->ds_prev_snap_txg);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
|
|
|
|
dsl_dataset_rele(ds, FTAG);
|
|
|
|
|
|
|
|
dsl_scan_visitds(scn, dsobj, tx);
|
2011-04-27 01:56:04 +04:00
|
|
|
zap_cursor_fini(zc);
|
2010-05-29 00:45:14 +04:00
|
|
|
if (scn->scn_pausing)
|
2011-04-27 01:56:04 +04:00
|
|
|
goto out;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2011-04-27 01:56:04 +04:00
|
|
|
zap_cursor_fini(zc);
|
|
|
|
out:
|
2013-11-01 23:26:11 +04:00
|
|
|
kmem_free(za, sizeof (zap_attribute_t));
|
|
|
|
kmem_free(zc, sizeof (zap_cursor_t));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2012-12-14 03:24:15 +04:00
|
|
|
static boolean_t
|
|
|
|
dsl_scan_free_should_pause(dsl_scan_t *scn)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
uint64_t elapsed_nanosecs;
|
|
|
|
|
2013-08-12 20:53:33 +04:00
|
|
|
if (zfs_recover)
|
|
|
|
return (B_FALSE);
|
|
|
|
|
2014-09-07 19:06:08 +04:00
|
|
|
if (scn->scn_visited_this_txg >= zfs_free_max_blocks)
|
|
|
|
return (B_TRUE);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
|
2012-12-14 03:24:15 +04:00
|
|
|
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
|
2013-08-29 03:05:48 +04:00
|
|
|
(NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms &&
|
2010-05-29 00:45:14 +04:00
|
|
|
txg_sync_waiting(scn->scn_dp)) ||
|
2012-12-14 03:24:15 +04:00
|
|
|
spa_shutting_down(scn->scn_dp->dp_spa));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_scan_t *scn = arg;
|
|
|
|
|
|
|
|
if (!scn->scn_is_bptree ||
|
|
|
|
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
|
|
|
|
if (dsl_scan_free_should_pause(scn))
|
2013-03-08 22:41:28 +04:00
|
|
|
return (SET_ERROR(ERESTART));
|
2012-12-14 03:24:15 +04:00
|
|
|
}
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
|
|
|
|
dmu_tx_get_txg(tx), bp, 0));
|
|
|
|
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
|
|
|
|
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
|
|
|
|
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
|
|
|
|
scn->scn_visited_this_txg++;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
boolean_t
|
|
|
|
dsl_scan_active(dsl_scan_t *scn)
|
|
|
|
{
|
|
|
|
spa_t *spa = scn->scn_dp->dp_spa;
|
|
|
|
uint64_t used = 0, comp, uncomp;
|
|
|
|
|
|
|
|
if (spa->spa_load_state != SPA_LOAD_NONE)
|
|
|
|
return (B_FALSE);
|
|
|
|
if (spa_shutting_down(spa))
|
|
|
|
return (B_FALSE);
|
2013-04-23 21:31:42 +04:00
|
|
|
if (scn->scn_phys.scn_state == DSS_SCANNING ||
|
2014-06-06 01:20:08 +04:00
|
|
|
(scn->scn_async_destroying && !scn->scn_async_stalled))
|
2010-05-29 00:45:14 +04:00
|
|
|
return (B_TRUE);
|
|
|
|
|
|
|
|
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
|
|
|
|
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
|
|
|
|
&used, &comp, &uncomp);
|
|
|
|
}
|
|
|
|
return (used != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
spa_t *spa = dp->dp_spa;
|
2014-06-06 01:20:08 +04:00
|
|
|
int err = 0;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for scn_restart_txg before checking spa_load_state, so
|
|
|
|
* that we can restart an old-style scan while the pool is being
|
|
|
|
* imported (see dsl_scan_init).
|
|
|
|
*/
|
2016-06-23 11:39:40 +03:00
|
|
|
if (dsl_scan_restarting(scn, tx)) {
|
2010-05-29 00:45:14 +04:00
|
|
|
pool_scan_func_t func = POOL_SCAN_SCRUB;
|
|
|
|
dsl_scan_done(scn, B_FALSE, tx);
|
|
|
|
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
|
|
|
|
func = POOL_SCAN_RESILVER;
|
|
|
|
zfs_dbgmsg("restarting scan func=%u txg=%llu",
|
|
|
|
func, tx->tx_txg);
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_scan_setup_sync(&func, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
2015-11-04 23:12:40 +03:00
|
|
|
/*
|
|
|
|
* Only process scans in sync pass 1.
|
|
|
|
*/
|
|
|
|
if (spa_sync_pass(dp->dp_spa) > 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the spa is shutting down, then stop scanning. This will
|
|
|
|
* ensure that the scan does not dirty any new data during the
|
|
|
|
* shutdown phase.
|
|
|
|
*/
|
|
|
|
if (spa_shutting_down(spa))
|
|
|
|
return;
|
|
|
|
|
2014-06-06 01:20:08 +04:00
|
|
|
/*
|
|
|
|
* If the scan is inactive due to a stalled async destroy, try again.
|
|
|
|
*/
|
2015-11-04 23:12:40 +03:00
|
|
|
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
|
2010-05-29 00:45:14 +04:00
|
|
|
return;
|
|
|
|
|
|
|
|
scn->scn_visited_this_txg = 0;
|
|
|
|
scn->scn_pausing = B_FALSE;
|
|
|
|
scn->scn_sync_start_time = gethrtime();
|
|
|
|
spa->spa_scrub_active = B_TRUE;
|
|
|
|
|
|
|
|
/*
|
2014-06-06 01:20:08 +04:00
|
|
|
* First process the async destroys. If we pause, don't do
|
|
|
|
* any scrubbing or resilvering. This ensures that there are no
|
|
|
|
* async destroys while we are scanning, so the scan code doesn't
|
|
|
|
* have to worry about traversing it. It is also faster to free the
|
|
|
|
* blocks than to scrub them.
|
2010-05-29 00:45:14 +04:00
|
|
|
*/
|
2016-01-23 03:41:02 +03:00
|
|
|
if (zfs_free_bpobj_enabled &&
|
|
|
|
spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
|
2012-12-14 03:24:15 +04:00
|
|
|
scn->scn_is_bptree = B_FALSE;
|
2010-05-29 00:45:14 +04:00
|
|
|
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
|
|
|
|
NULL, ZIO_FLAG_MUSTSUCCEED);
|
|
|
|
err = bpobj_iterate(&dp->dp_free_bpobj,
|
2012-12-14 03:24:15 +04:00
|
|
|
dsl_scan_free_block_cb, scn, tx);
|
2010-05-29 00:45:14 +04:00
|
|
|
VERIFY3U(0, ==, zio_wait(scn->scn_zio_root));
|
2012-12-14 03:24:15 +04:00
|
|
|
|
2014-06-06 01:20:08 +04:00
|
|
|
if (err != 0 && err != ERESTART)
|
|
|
|
zfs_panic_recover("error %u from bpobj_iterate()", err);
|
|
|
|
}
|
2013-09-04 16:00:57 +04:00
|
|
|
|
2014-06-06 01:20:08 +04:00
|
|
|
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
|
|
|
|
ASSERT(scn->scn_async_destroying);
|
|
|
|
scn->scn_is_bptree = B_TRUE;
|
|
|
|
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
|
|
|
|
NULL, ZIO_FLAG_MUSTSUCCEED);
|
|
|
|
err = bptree_iterate(dp->dp_meta_objset,
|
|
|
|
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
|
|
|
|
VERIFY0(zio_wait(scn->scn_zio_root));
|
|
|
|
|
|
|
|
if (err == EIO || err == ECKSUM) {
|
|
|
|
err = 0;
|
|
|
|
} else if (err != 0 && err != ERESTART) {
|
|
|
|
zfs_panic_recover("error %u from "
|
|
|
|
"traverse_dataset_destroyed()", err);
|
2012-12-14 03:24:15 +04:00
|
|
|
}
|
2014-06-06 01:20:08 +04:00
|
|
|
|
|
|
|
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
|
|
|
|
/* finished; deactivate async destroy feature */
|
|
|
|
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
|
|
|
|
ASSERT(!spa_feature_is_active(spa,
|
|
|
|
SPA_FEATURE_ASYNC_DESTROY));
|
|
|
|
VERIFY0(zap_remove(dp->dp_meta_objset,
|
|
|
|
DMU_POOL_DIRECTORY_OBJECT,
|
|
|
|
DMU_POOL_BPTREE_OBJ, tx));
|
|
|
|
VERIFY0(bptree_free(dp->dp_meta_objset,
|
|
|
|
dp->dp_bptree_obj, tx));
|
|
|
|
dp->dp_bptree_obj = 0;
|
|
|
|
scn->scn_async_destroying = B_FALSE;
|
2015-07-11 03:19:41 +03:00
|
|
|
scn->scn_async_stalled = B_FALSE;
|
2014-10-16 06:23:27 +04:00
|
|
|
} else {
|
|
|
|
/*
|
2015-07-11 03:19:41 +03:00
|
|
|
* If we didn't make progress, mark the async
|
|
|
|
* destroy as stalled, so that we will not initiate
|
|
|
|
* a spa_sync() on its behalf. Note that we only
|
|
|
|
* check this if we are not finished, because if the
|
|
|
|
* bptree had no blocks for us to visit, we can
|
|
|
|
* finish without "making progress".
|
2014-10-16 06:23:27 +04:00
|
|
|
*/
|
|
|
|
scn->scn_async_stalled =
|
|
|
|
(scn->scn_visited_this_txg == 0);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2014-06-06 01:20:08 +04:00
|
|
|
}
|
|
|
|
if (scn->scn_visited_this_txg) {
|
|
|
|
zfs_dbgmsg("freed %llu blocks in %llums from "
|
|
|
|
"free_bpobj/bptree txg %llu; err=%u",
|
|
|
|
(longlong_t)scn->scn_visited_this_txg,
|
|
|
|
(longlong_t)
|
|
|
|
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
|
|
|
|
(longlong_t)tx->tx_txg, err);
|
|
|
|
scn->scn_visited_this_txg = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write out changes to the DDT that may be required as a
|
|
|
|
* result of the blocks freed. This ensures that the DDT
|
|
|
|
* is clean when a scrub/resilver runs.
|
|
|
|
*/
|
|
|
|
ddt_sync(spa, tx->tx_txg);
|
|
|
|
}
|
|
|
|
if (err != 0)
|
|
|
|
return;
|
2016-02-03 03:23:21 +03:00
|
|
|
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
|
|
|
|
zfs_free_leak_on_eio &&
|
2015-04-01 18:14:34 +03:00
|
|
|
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
|
|
|
|
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
|
|
|
|
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
|
2014-06-06 01:20:08 +04:00
|
|
|
/*
|
|
|
|
* We have finished background destroying, but there is still
|
|
|
|
* some space left in the dp_free_dir. Transfer this leaked
|
|
|
|
* space to the dp_leak_dir.
|
|
|
|
*/
|
|
|
|
if (dp->dp_leak_dir == NULL) {
|
|
|
|
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
|
|
|
|
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
|
|
|
|
LEAK_DIR_NAME, tx);
|
|
|
|
VERIFY0(dsl_pool_open_special_dir(dp,
|
|
|
|
LEAK_DIR_NAME, &dp->dp_leak_dir));
|
|
|
|
rrw_exit(&dp->dp_config_rwlock, FTAG);
|
|
|
|
}
|
|
|
|
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
|
2015-04-01 18:14:34 +03:00
|
|
|
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
|
|
|
|
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
|
|
|
|
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
|
2014-06-06 01:20:08 +04:00
|
|
|
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
|
2015-04-01 18:14:34 +03:00
|
|
|
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
|
|
|
|
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
|
|
|
|
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
|
2014-06-06 01:20:08 +04:00
|
|
|
}
|
2016-02-03 03:23:21 +03:00
|
|
|
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) {
|
2014-06-06 01:19:08 +04:00
|
|
|
/* finished; verify that space accounting went to zero */
|
2015-04-01 18:14:34 +03:00
|
|
|
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
|
|
|
|
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
|
|
|
|
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (scn->scn_phys.scn_state != DSS_SCANNING)
|
|
|
|
return;
|
|
|
|
|
2013-08-08 00:16:22 +04:00
|
|
|
if (scn->scn_done_txg == tx->tx_txg) {
|
|
|
|
ASSERT(!scn->scn_pausing);
|
|
|
|
/* finished with scan. */
|
|
|
|
zfs_dbgmsg("txg %llu scan complete", tx->tx_txg);
|
|
|
|
dsl_scan_done(scn, B_TRUE, tx);
|
|
|
|
ASSERT3U(spa->spa_scrub_inflight, ==, 0);
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
|
|
|
|
scn->scn_phys.scn_ddt_class_max) {
|
|
|
|
zfs_dbgmsg("doing scan sync txg %llu; "
|
|
|
|
"ddt bm=%llu/%llu/%llu/%llx",
|
|
|
|
(longlong_t)tx->tx_txg,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_class,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_type,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_checksum,
|
|
|
|
(longlong_t)scn->scn_phys.scn_ddt_bookmark.ddb_cursor);
|
|
|
|
ASSERT(scn->scn_phys.scn_bookmark.zb_objset == 0);
|
|
|
|
ASSERT(scn->scn_phys.scn_bookmark.zb_object == 0);
|
|
|
|
ASSERT(scn->scn_phys.scn_bookmark.zb_level == 0);
|
|
|
|
ASSERT(scn->scn_phys.scn_bookmark.zb_blkid == 0);
|
|
|
|
} else {
|
|
|
|
zfs_dbgmsg("doing scan sync txg %llu; bm=%llu/%llu/%llu/%llu",
|
|
|
|
(longlong_t)tx->tx_txg,
|
|
|
|
(longlong_t)scn->scn_phys.scn_bookmark.zb_objset,
|
|
|
|
(longlong_t)scn->scn_phys.scn_bookmark.zb_object,
|
|
|
|
(longlong_t)scn->scn_phys.scn_bookmark.zb_level,
|
|
|
|
(longlong_t)scn->scn_phys.scn_bookmark.zb_blkid);
|
|
|
|
}
|
|
|
|
|
|
|
|
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
|
|
|
|
NULL, ZIO_FLAG_CANFAIL);
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_pool_config_enter(dp, FTAG);
|
2010-05-29 00:45:14 +04:00
|
|
|
dsl_scan_visit(scn, tx);
|
2013-09-04 16:00:57 +04:00
|
|
|
dsl_pool_config_exit(dp, FTAG);
|
2010-05-29 00:45:14 +04:00
|
|
|
(void) zio_wait(scn->scn_zio_root);
|
|
|
|
scn->scn_zio_root = NULL;
|
|
|
|
|
|
|
|
zfs_dbgmsg("visited %llu blocks in %llums",
|
|
|
|
(longlong_t)scn->scn_visited_this_txg,
|
2013-08-29 03:05:48 +04:00
|
|
|
(longlong_t)NSEC2MSEC(gethrtime() - scn->scn_sync_start_time));
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (!scn->scn_pausing) {
|
2013-08-08 00:16:22 +04:00
|
|
|
scn->scn_done_txg = tx->tx_txg + 1;
|
|
|
|
zfs_dbgmsg("txg %llu traversal complete, waiting till txg %llu",
|
|
|
|
tx->tx_txg, scn->scn_done_txg);
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
|
|
|
|
mutex_enter(&spa->spa_scrub_lock);
|
|
|
|
while (spa->spa_scrub_inflight > 0) {
|
|
|
|
cv_wait(&spa->spa_scrub_io_cv,
|
|
|
|
&spa->spa_scrub_lock);
|
|
|
|
}
|
|
|
|
mutex_exit(&spa->spa_scrub_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
dsl_scan_sync_state(scn, tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will start a new scan, or restart an existing one.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg)
|
|
|
|
{
|
|
|
|
if (txg == 0) {
|
|
|
|
dmu_tx_t *tx;
|
|
|
|
tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
|
|
|
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
|
|
|
|
|
|
|
|
txg = dmu_tx_get_txg(tx);
|
|
|
|
dp->dp_scan->scn_restart_txg = txg;
|
|
|
|
dmu_tx_commit(tx);
|
|
|
|
} else {
|
|
|
|
dp->dp_scan->scn_restart_txg = txg;
|
|
|
|
}
|
|
|
|
zfs_dbgmsg("restarting resilver txg=%llu", txg);
|
|
|
|
}
|
|
|
|
|
|
|
|
boolean_t
|
|
|
|
dsl_scan_resilvering(dsl_pool_t *dp)
|
|
|
|
{
|
|
|
|
return (dp->dp_scan->scn_phys.scn_state == DSS_SCANNING &&
|
|
|
|
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* scrub consumers
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
count_block(zfs_all_blkstats_t *zab, const blkptr_t *bp)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we resume after a reboot, zab will be NULL; don't record
|
|
|
|
* incomplete stats in that case.
|
|
|
|
*/
|
|
|
|
if (zab == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
|
|
|
|
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
|
|
|
|
int equal;
|
2012-12-14 03:24:15 +04:00
|
|
|
zfs_blkstat_t *zb;
|
|
|
|
|
|
|
|
if (t & DMU_OT_NEWTYPE)
|
|
|
|
t = DMU_OT_OTHER;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
2012-12-14 03:24:15 +04:00
|
|
|
zb = &zab->zab_type[l][t];
|
2010-05-29 00:45:14 +04:00
|
|
|
zb->zb_count++;
|
|
|
|
zb->zb_asize += BP_GET_ASIZE(bp);
|
|
|
|
zb->zb_lsize += BP_GET_LSIZE(bp);
|
|
|
|
zb->zb_psize += BP_GET_PSIZE(bp);
|
|
|
|
zb->zb_gangs += BP_COUNT_GANG(bp);
|
|
|
|
|
|
|
|
switch (BP_GET_NDVAS(bp)) {
|
|
|
|
case 2:
|
|
|
|
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
|
|
|
|
DVA_GET_VDEV(&bp->blk_dva[1]))
|
|
|
|
zb->zb_ditto_2_of_2_samevdev++;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
|
|
|
|
DVA_GET_VDEV(&bp->blk_dva[1])) +
|
|
|
|
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
|
|
|
|
DVA_GET_VDEV(&bp->blk_dva[2])) +
|
|
|
|
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
|
|
|
|
DVA_GET_VDEV(&bp->blk_dva[2]));
|
|
|
|
if (equal == 1)
|
|
|
|
zb->zb_ditto_2_of_3_samevdev++;
|
|
|
|
else if (equal == 3)
|
|
|
|
zb->zb_ditto_3_of_3_samevdev++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dsl_scan_scrub_done(zio_t *zio)
|
|
|
|
{
|
|
|
|
spa_t *spa = zio->io_spa;
|
|
|
|
|
|
|
|
zio_data_buf_free(zio->io_data, zio->io_size);
|
|
|
|
|
|
|
|
mutex_enter(&spa->spa_scrub_lock);
|
|
|
|
spa->spa_scrub_inflight--;
|
|
|
|
cv_broadcast(&spa->spa_scrub_io_cv);
|
|
|
|
|
|
|
|
if (zio->io_error && (zio->io_error != ECKSUM ||
|
|
|
|
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
|
|
|
|
spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors++;
|
|
|
|
}
|
|
|
|
mutex_exit(&spa->spa_scrub_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dsl_scan_scrub_cb(dsl_pool_t *dp,
|
2014-06-25 22:37:59 +04:00
|
|
|
const blkptr_t *bp, const zbookmark_phys_t *zb)
|
2010-05-29 00:45:14 +04:00
|
|
|
{
|
|
|
|
dsl_scan_t *scn = dp->dp_scan;
|
|
|
|
size_t size = BP_GET_PSIZE(bp);
|
|
|
|
spa_t *spa = dp->dp_spa;
|
|
|
|
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
|
2010-08-26 20:52:39 +04:00
|
|
|
boolean_t needs_io = B_FALSE;
|
2010-08-27 01:24:34 +04:00
|
|
|
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
|
|
|
|
int scan_delay = 0;
|
2010-08-26 20:52:39 +04:00
|
|
|
int d;
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
if (phys_birth <= scn->scn_phys.scn_min_txg ||
|
|
|
|
phys_birth >= scn->scn_phys.scn_max_txg)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
count_block(dp->dp_blkstats, bp);
|
|
|
|
|
2014-06-06 01:19:08 +04:00
|
|
|
if (BP_IS_EMBEDDED(bp))
|
|
|
|
return (0);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
|
|
|
|
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
|
|
|
|
zio_flags |= ZIO_FLAG_SCRUB;
|
|
|
|
needs_io = B_TRUE;
|
2010-08-27 01:24:34 +04:00
|
|
|
scan_delay = zfs_scrub_delay;
|
2013-02-11 10:21:05 +04:00
|
|
|
} else {
|
|
|
|
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
|
2010-05-29 00:45:14 +04:00
|
|
|
zio_flags |= ZIO_FLAG_RESILVER;
|
|
|
|
needs_io = B_FALSE;
|
2010-08-27 01:24:34 +04:00
|
|
|
scan_delay = zfs_resilver_delay;
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If it's an intent log block, failure is expected. */
|
|
|
|
if (zb->zb_level == ZB_ZIL_LEVEL)
|
|
|
|
zio_flags |= ZIO_FLAG_SPECULATIVE;
|
|
|
|
|
2010-08-26 20:52:39 +04:00
|
|
|
for (d = 0; d < BP_GET_NDVAS(bp); d++) {
|
2010-05-29 00:45:14 +04:00
|
|
|
vdev_t *vd = vdev_lookup_top(spa,
|
|
|
|
DVA_GET_VDEV(&bp->blk_dva[d]));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Keep track of how much data we've examined so that
|
|
|
|
* zpool(1M) status can make useful progress reports.
|
|
|
|
*/
|
|
|
|
scn->scn_phys.scn_examined += DVA_GET_ASIZE(&bp->blk_dva[d]);
|
|
|
|
spa->spa_scan_pass_exam += DVA_GET_ASIZE(&bp->blk_dva[d]);
|
|
|
|
|
|
|
|
/* if it's a resilver, this may not be in the target range */
|
|
|
|
if (!needs_io) {
|
|
|
|
if (DVA_GET_GANG(&bp->blk_dva[d])) {
|
|
|
|
/*
|
|
|
|
* Gang members may be spread across multiple
|
|
|
|
* vdevs, so the best estimate we have is the
|
|
|
|
* scrub range, which has already been checked.
|
|
|
|
* XXX -- it would be better to change our
|
|
|
|
* allocation policy to ensure that all
|
|
|
|
* gang members reside on the same vdev.
|
|
|
|
*/
|
|
|
|
needs_io = B_TRUE;
|
|
|
|
} else {
|
|
|
|
needs_io = vdev_dtl_contains(vd, DTL_PARTIAL,
|
|
|
|
phys_birth, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (needs_io && !zfs_no_scrub_io) {
|
2010-08-27 01:24:34 +04:00
|
|
|
vdev_t *rvd = spa->spa_root_vdev;
|
|
|
|
uint64_t maxinflight = rvd->vdev_children * zfs_top_maxinflight;
|
2010-05-29 00:45:14 +04:00
|
|
|
void *data = zio_data_buf_alloc(size);
|
|
|
|
|
|
|
|
mutex_enter(&spa->spa_scrub_lock);
|
2010-08-27 01:24:34 +04:00
|
|
|
while (spa->spa_scrub_inflight >= maxinflight)
|
2010-05-29 00:45:14 +04:00
|
|
|
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
|
|
|
|
spa->spa_scrub_inflight++;
|
|
|
|
mutex_exit(&spa->spa_scrub_lock);
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
/*
|
|
|
|
* If we're seeing recent (zfs_scan_idle) "important" I/Os
|
|
|
|
* then throttle our workload to limit the impact of a scan.
|
|
|
|
*/
|
|
|
|
if (ddi_get_lbolt64() - spa->spa_last_io <= zfs_scan_idle)
|
|
|
|
delay(scan_delay);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
zio_nowait(zio_read(NULL, spa, bp, data, size,
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 07:01:20 +04:00
|
|
|
dsl_scan_scrub_done, NULL, ZIO_PRIORITY_SCRUB,
|
2010-05-29 00:45:14 +04:00
|
|
|
zio_flags, zb));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* do not relocate this block */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
|
|
|
|
{
|
|
|
|
spa_t *spa = dp->dp_spa;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Purge all vdev caches and probe all devices. We do this here
|
|
|
|
* rather than in sync context because this requires a writer lock
|
|
|
|
* on the spa_config lock, which we can't do from sync context. The
|
|
|
|
* spa_scrub_reopen flag indicates that vdev_open() should not
|
|
|
|
* attempt to start another scrub.
|
|
|
|
*/
|
|
|
|
spa_vdev_state_enter(spa, SCL_NONE);
|
|
|
|
spa->spa_scrub_reopen = B_TRUE;
|
|
|
|
vdev_reopen(spa->spa_root_vdev);
|
|
|
|
spa->spa_scrub_reopen = B_FALSE;
|
|
|
|
(void) spa_vdev_state_exit(spa, NULL, 0);
|
|
|
|
|
2013-09-04 16:00:57 +04:00
|
|
|
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
|
2014-11-03 23:28:43 +03:00
|
|
|
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE));
|
2010-05-29 00:45:14 +04:00
|
|
|
}
|
2011-05-04 02:09:28 +04:00
|
|
|
|
2016-06-23 11:39:40 +03:00
|
|
|
static boolean_t
|
|
|
|
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
|
|
|
|
{
|
|
|
|
return (scn->scn_restart_txg != 0 &&
|
|
|
|
scn->scn_restart_txg <= tx->tx_txg);
|
|
|
|
}
|
|
|
|
|
2011-05-04 02:09:28 +04:00
|
|
|
#if defined(_KERNEL) && defined(HAVE_SPL)
|
|
|
|
module_param(zfs_top_maxinflight, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_top_maxinflight, "Max I/Os per top-level");
|
|
|
|
|
|
|
|
module_param(zfs_resilver_delay, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_resilver_delay, "Number of ticks to delay resilver");
|
|
|
|
|
|
|
|
module_param(zfs_scrub_delay, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_scrub_delay, "Number of ticks to delay scrub");
|
|
|
|
|
|
|
|
module_param(zfs_scan_idle, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_scan_idle, "Idle window in clock ticks");
|
|
|
|
|
|
|
|
module_param(zfs_scan_min_time_ms, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_scan_min_time_ms, "Min millisecs to scrub per txg");
|
|
|
|
|
|
|
|
module_param(zfs_free_min_time_ms, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_free_min_time_ms, "Min millisecs to free per txg");
|
|
|
|
|
|
|
|
module_param(zfs_resilver_min_time_ms, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_resilver_min_time_ms, "Min millisecs to resilver per txg");
|
|
|
|
|
|
|
|
module_param(zfs_no_scrub_io, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O");
|
|
|
|
|
|
|
|
module_param(zfs_no_scrub_prefetch, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching");
|
2014-09-07 19:06:08 +04:00
|
|
|
|
|
|
|
module_param(zfs_free_max_blocks, ulong, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_free_max_blocks, "Max number of blocks freed in one txg");
|
2016-01-23 03:41:02 +03:00
|
|
|
|
|
|
|
module_param(zfs_free_bpobj_enabled, int, 0644);
|
|
|
|
MODULE_PARM_DESC(zfs_free_bpobj_enabled, "Enable processing of the free_bpobj");
|
2011-05-04 02:09:28 +04:00
|
|
|
#endif
|