2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
|
2008-11-20 23:01:55 +03:00
|
|
|
* Use is subject to license terms.
|
|
|
|
*/
|
|
|
|
|
2012-01-24 06:43:32 +04:00
|
|
|
/*
|
OpenZFS 9102 - zfs should be able to initialize storage devices
PROBLEM
========
The first access to a block incurs a performance penalty on some platforms
(e.g. AWS's EBS, VMware VMDKs). Therefore we recommend that volumes are
"thick provisioned", where supported by the platform (VMware). This can
create a large delay in getting a new virtual machines up and running (or
adding storage to an existing Engine). If the thick provision step is
omitted, write performance will be suboptimal until all blocks on the LUN
have been written.
SOLUTION
=========
This feature introduces a way to 'initialize' the disks at install or in the
background to make sure we don't incur this first read penalty.
When an entire LUN is added to ZFS, we make all space available immediately,
and allow ZFS to find unallocated space and zero it out. This works with
concurrent writes to arbitrary offsets, ensuring that we don't zero out
something that has been (or is in the middle of being) written. This scheme
can also be applied to existing pools (affecting only free regions on the
vdev). Detailed design:
- new subcommand:zpool initialize [-cs] <pool> [<vdev> ...]
- start, suspend, or cancel initialization
- Creates new open-context thread for each vdev
- Thread iterates through all metaslabs in this vdev
- Each metaslab:
- select a metaslab
- load the metaslab
- mark the metaslab as being zeroed
- walk all free ranges within that metaslab and translate
them to ranges on the leaf vdev
- issue a "zeroing" I/O on the leaf vdev that corresponds to
a free range on the metaslab we're working on
- continue until all free ranges for this metaslab have been
"zeroed"
- reset/unmark the metaslab being zeroed
- if more metaslabs exist, then repeat above tasks.
- if no more metaslabs, then we're done.
- progress for the initialization is stored on-disk in the vdev’s
leaf zap object. The following information is stored:
- the last offset that has been initialized
- the state of the initialization process (i.e. active,
suspended, or canceled)
- the start time for the initialization
- progress is reported via the zpool status command and shows
information for each of the vdevs that are initializing
Porting notes:
- Added zfs_initialize_value module parameter to set the pattern
written by "zpool initialize".
- Added zfs_vdev_{initializing,removal}_{min,max}_active module options.
Authored by: George Wilson <george.wilson@delphix.com>
Reviewed by: John Wren Kennedy <john.kennedy@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: loli10K <ezomori.nozomu@gmail.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Richard Lowe <richlowe@richlowe.net>
Signed-off-by: Tim Chase <tim@chase2k.com>
Ported-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/9102
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/c3963210eb
Closes #8230
2018-12-19 17:54:59 +03:00
|
|
|
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
|
2012-01-24 06:43:32 +04:00
|
|
|
*/
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* The 'missing' vdev is a special vdev type used only during import. It
|
|
|
|
* signifies a placeholder in the root vdev for some vdev that we know is
|
|
|
|
* missing. We pass it down to the kernel to allow the rest of the
|
|
|
|
* configuration to parsed and an attempt made to open all available devices.
|
|
|
|
* Because its GUID is always 0, we know that the guid sum will mismatch and we
|
|
|
|
* won't be able to open the pool anyway.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/vdev_impl.h>
|
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/zio.h>
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static int
|
2012-01-24 06:43:32 +04:00
|
|
|
vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
|
|
|
|
uint64_t *ashift)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Really this should just fail. But then the root vdev will be in the
|
|
|
|
* faulted state with VDEV_AUX_NO_REPLICAS, when what we really want is
|
|
|
|
* VDEV_AUX_BAD_GUID_SUM. So we pretend to succeed, knowing that we
|
|
|
|
* will fail the GUID sum check before ever trying to open the pool.
|
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
*psize = 0;
|
2012-01-24 06:43:32 +04:00
|
|
|
*max_psize = 0;
|
2010-05-29 00:45:14 +04:00
|
|
|
*ashift = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
|
|
|
static void
|
|
|
|
vdev_missing_close(vdev_t *vd)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
2014-10-21 02:07:45 +04:00
|
|
|
static void
|
2008-11-20 23:01:55 +03:00
|
|
|
vdev_missing_io_start(zio_t *zio)
|
|
|
|
{
|
2013-03-08 22:41:28 +04:00
|
|
|
zio->io_error = SET_ERROR(ENOTSUP);
|
2014-10-21 02:07:45 +04:00
|
|
|
zio_execute(zio);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
2008-12-03 23:09:06 +03:00
|
|
|
static void
|
2008-11-20 23:01:55 +03:00
|
|
|
vdev_missing_io_done(zio_t *zio)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
vdev_ops_t vdev_missing_ops = {
|
2019-06-21 04:29:02 +03:00
|
|
|
.vdev_op_open = vdev_missing_open,
|
|
|
|
.vdev_op_close = vdev_missing_close,
|
|
|
|
.vdev_op_asize = vdev_default_asize,
|
|
|
|
.vdev_op_io_start = vdev_missing_io_start,
|
|
|
|
.vdev_op_io_done = vdev_missing_io_done,
|
|
|
|
.vdev_op_state_change = NULL,
|
|
|
|
.vdev_op_need_resilver = NULL,
|
|
|
|
.vdev_op_hold = NULL,
|
|
|
|
.vdev_op_rele = NULL,
|
|
|
|
.vdev_op_remap = NULL,
|
|
|
|
.vdev_op_xlate = NULL,
|
|
|
|
.vdev_op_type = VDEV_TYPE_MISSING, /* name of this vdev type */
|
|
|
|
.vdev_op_leaf = B_TRUE /* leaf vdev */
|
2008-11-20 23:01:55 +03:00
|
|
|
};
|
2010-05-29 00:45:14 +04:00
|
|
|
|
|
|
|
vdev_ops_t vdev_hole_ops = {
|
2019-06-21 04:29:02 +03:00
|
|
|
.vdev_op_open = vdev_missing_open,
|
|
|
|
.vdev_op_close = vdev_missing_close,
|
|
|
|
.vdev_op_asize = vdev_default_asize,
|
|
|
|
.vdev_op_io_start = vdev_missing_io_start,
|
|
|
|
.vdev_op_io_done = vdev_missing_io_done,
|
|
|
|
.vdev_op_state_change = NULL,
|
|
|
|
.vdev_op_need_resilver = NULL,
|
|
|
|
.vdev_op_hold = NULL,
|
|
|
|
.vdev_op_rele = NULL,
|
|
|
|
.vdev_op_remap = NULL,
|
|
|
|
.vdev_op_xlate = NULL,
|
|
|
|
.vdev_op_type = VDEV_TYPE_HOLE, /* name of this vdev type */
|
|
|
|
.vdev_op_leaf = B_TRUE /* leaf vdev */
|
2010-05-29 00:45:14 +04:00
|
|
|
};
|