2017-06-18 21:27:06 +03:00
|
|
|
.\"
|
|
|
|
.\" CDDL HEADER START
|
|
|
|
.\"
|
|
|
|
.\" The contents of this file are subject to the terms of the
|
|
|
|
.\" Common Development and Distribution License (the "License").
|
|
|
|
.\" You may not use this file except in compliance with the License.
|
|
|
|
.\"
|
|
|
|
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
.\" or https://opensource.org/licenses/CDDL-1.0.
|
2017-06-18 21:27:06 +03:00
|
|
|
.\" See the License for the specific language governing permissions
|
|
|
|
.\" and limitations under the License.
|
|
|
|
.\"
|
|
|
|
.\" When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
.\" If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
.\" fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
.\" information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
.\"
|
|
|
|
.\" CDDL HEADER END
|
|
|
|
.\"
|
2009-12-12 03:15:33 +03:00
|
|
|
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
|
2018-08-20 19:52:37 +03:00
|
|
|
.\" Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
2012-11-06 16:39:00 +04:00
|
|
|
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
|
2017-05-19 22:33:11 +03:00
|
|
|
.\" Copyright (c) 2017 Datto Inc.
|
2018-04-30 21:42:58 +03:00
|
|
|
.\" Copyright (c) 2018 George Melikov. All Rights Reserved.
|
2017-08-24 20:30:42 +03:00
|
|
|
.\" Copyright 2017 Nexenta Systems, Inc.
|
2017-10-26 22:26:09 +03:00
|
|
|
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
|
2012-12-14 03:24:15 +04:00
|
|
|
.\"
|
2022-03-16 19:47:06 +03:00
|
|
|
.Dd March 16, 2022
|
2019-11-13 20:21:07 +03:00
|
|
|
.Dt ZPOOL 8
|
2020-08-21 21:55:47 +03:00
|
|
|
.Os
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh NAME
|
|
|
|
.Nm zpool
|
|
|
|
.Nd configure ZFS storage pools
|
|
|
|
.Sh SYNOPSIS
|
|
|
|
.Nm
|
2019-04-10 10:43:28 +03:00
|
|
|
.Fl ?V
|
2017-06-18 21:27:06 +03:00
|
|
|
.Nm
|
2019-04-10 10:43:28 +03:00
|
|
|
.Cm version
|
2019-11-13 20:21:07 +03:00
|
|
|
.Nm
|
2021-05-27 03:46:40 +03:00
|
|
|
.Cm subcommand
|
2021-11-29 21:52:42 +03:00
|
|
|
.Op Ar arguments
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh DESCRIPTION
|
|
|
|
The
|
|
|
|
.Nm
|
|
|
|
command configures ZFS storage pools.
|
|
|
|
A storage pool is a collection of devices that provides physical storage and
|
|
|
|
data replication for ZFS datasets.
|
|
|
|
All datasets within a storage pool share the same space.
|
|
|
|
See
|
|
|
|
.Xr zfs 8
|
|
|
|
for information on managing datasets.
|
|
|
|
.Pp
|
2019-11-13 20:21:07 +03:00
|
|
|
For an overview of creating and managing ZFS storage pools see the
|
2021-06-04 23:29:26 +03:00
|
|
|
.Xr zpoolconcepts 7
|
2019-11-13 20:21:07 +03:00
|
|
|
manual page.
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Sh SUBCOMMANDS
|
2017-06-18 21:27:06 +03:00
|
|
|
All subcommands that modify state are logged persistently to the pool in their
|
|
|
|
original form.
|
|
|
|
.Pp
|
|
|
|
The
|
|
|
|
.Nm
|
|
|
|
command provides subcommands to create and destroy storage pools, add capacity
|
|
|
|
to storage pools, and provide information about the storage pools.
|
|
|
|
The following subcommands are supported:
|
|
|
|
.Bl -tag -width Ds
|
|
|
|
.It Xo
|
|
|
|
.Nm
|
2021-05-27 03:46:40 +03:00
|
|
|
.Fl ?\&
|
2017-06-18 21:27:06 +03:00
|
|
|
.Xc
|
2009-12-12 03:15:33 +03:00
|
|
|
Displays a help message.
|
2017-06-18 21:27:06 +03:00
|
|
|
.It Xo
|
|
|
|
.Nm
|
2021-05-27 03:46:40 +03:00
|
|
|
.Fl V , -version
|
2019-04-10 10:43:28 +03:00
|
|
|
.Xc
|
|
|
|
.It Xo
|
|
|
|
.Nm
|
2019-11-13 20:21:07 +03:00
|
|
|
.Cm version
|
2017-06-18 21:27:06 +03:00
|
|
|
.Xc
|
2019-11-13 20:21:07 +03:00
|
|
|
Displays the software version of the
|
2017-06-18 21:27:06 +03:00
|
|
|
.Nm
|
2021-05-27 03:46:40 +03:00
|
|
|
userland utility and the ZFS kernel module.
|
2017-06-18 21:27:06 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Creation
|
2016-12-17 01:11:29 +03:00
|
|
|
.Bl -tag -width Ds
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-create 8
|
2017-06-18 21:27:06 +03:00
|
|
|
Creates a new storage pool containing the virtual devices specified on the
|
|
|
|
command line.
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-initialize 8
|
|
|
|
Begins initializing by writing to all unallocated regions on the specified
|
|
|
|
devices, or all eligible devices in the pool if no individual devices are
|
|
|
|
specified.
|
2017-06-18 21:27:06 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Destruction
|
2017-06-18 21:27:06 +03:00
|
|
|
.Bl -tag -width Ds
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-destroy 8
|
|
|
|
Destroys the given pool, freeing up any devices for other use.
|
|
|
|
.It Xr zpool-labelclear 8
|
|
|
|
Removes ZFS label information from the specified
|
|
|
|
.Ar device .
|
2017-06-18 21:27:06 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Virtual Devices
|
|
|
|
.Bl -tag -width Ds
|
2017-06-18 21:27:06 +03:00
|
|
|
.It Xo
|
2021-05-27 03:46:40 +03:00
|
|
|
.Xr zpool-attach 8 Ns / Ns Xr zpool-detach 8
|
2017-06-18 21:27:06 +03:00
|
|
|
.Xc
|
2023-08-29 19:12:40 +03:00
|
|
|
Converts a non-redundant disk into a mirror, or increases
|
|
|
|
the redundancy level of an existing mirror
|
|
|
|
.Cm ( attach Ns ), or performs the inverse operation (
|
2023-08-26 21:30:19 +03:00
|
|
|
.Cm detach Ns ).
|
2017-06-18 21:27:06 +03:00
|
|
|
.It Xo
|
2021-05-27 03:46:40 +03:00
|
|
|
.Xr zpool-add 8 Ns / Ns Xr zpool-remove 8
|
2017-06-18 21:27:06 +03:00
|
|
|
.Xc
|
2019-11-13 20:21:07 +03:00
|
|
|
Adds the specified virtual devices to the given pool,
|
|
|
|
or removes the specified device from the pool.
|
|
|
|
.It Xr zpool-replace 8
|
|
|
|
Replaces an existing device (which may be faulted) with a new one.
|
|
|
|
.It Xr zpool-split 8
|
2022-11-12 15:23:30 +03:00
|
|
|
Creates a new pool by splitting all mirrors in an existing pool (which decreases
|
|
|
|
its redundancy).
|
2017-06-18 21:27:06 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Properties
|
|
|
|
Available pool properties listed in the
|
2021-06-04 23:29:26 +03:00
|
|
|
.Xr zpoolprops 7
|
2019-11-13 20:21:07 +03:00
|
|
|
manual page.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Bl -tag -width Ds
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-list 8
|
|
|
|
Lists the given pools along with a health status and space usage.
|
2017-06-18 21:27:06 +03:00
|
|
|
.It Xo
|
2021-05-27 03:46:40 +03:00
|
|
|
.Xr zpool-get 8 Ns / Ns Xr zpool-set 8
|
2017-06-18 21:27:06 +03:00
|
|
|
.Xc
|
|
|
|
Retrieves the given list of properties
|
|
|
|
.Po
|
|
|
|
or all properties if
|
|
|
|
.Sy all
|
|
|
|
is used
|
|
|
|
.Pc
|
|
|
|
for the specified storage pool(s).
|
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Monitoring
|
2017-06-18 21:27:06 +03:00
|
|
|
.Bl -tag -width Ds
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-status 8
|
|
|
|
Displays the detailed health status for the given pools.
|
|
|
|
.It Xr zpool-iostat 8
|
2022-02-17 23:26:43 +03:00
|
|
|
Displays logical I/O statistics for the given pools/vdevs.
|
|
|
|
Physical I/O operations may be observed via
|
2019-02-22 01:00:48 +03:00
|
|
|
.Xr iostat 1 .
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-events 8
|
2021-05-27 03:46:40 +03:00
|
|
|
Lists all recent events generated by the ZFS kernel modules.
|
|
|
|
These events are consumed by the
|
2019-11-13 20:21:07 +03:00
|
|
|
.Xr zed 8
|
|
|
|
and used to automate administrative tasks such as replacing a failed device
|
2021-05-27 03:46:40 +03:00
|
|
|
with a hot spare.
|
2021-06-04 23:29:26 +03:00
|
|
|
That manual page also describes the subclasses and event payloads
|
|
|
|
that can be generated.
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-history 8
|
|
|
|
Displays the command history of the specified pool(s) or all pools if no pool is
|
|
|
|
specified.
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 19:30:13 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Maintenance
|
2017-06-18 21:27:06 +03:00
|
|
|
.Bl -tag -width Ds
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-scrub 8
|
2017-07-07 08:16:13 +03:00
|
|
|
Begins a scrub or resumes a paused scrub.
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-checkpoint 8
|
|
|
|
Checkpoints the current state of
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar pool ,
|
|
|
|
which can be later restored by
|
|
|
|
.Nm zpool Cm import Fl -rewind-to-checkpoint .
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-trim 8
|
2022-11-12 15:23:30 +03:00
|
|
|
Initiates an immediate on-demand TRIM operation for all of the free space in a
|
|
|
|
pool.
|
2021-05-27 03:46:40 +03:00
|
|
|
This operation informs the underlying storage devices of all blocks
|
2019-03-29 19:13:20 +03:00
|
|
|
in the pool which are no longer allocated and allows thinly provisioned
|
|
|
|
devices to reclaim the space.
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-sync 8
|
2017-06-18 21:27:06 +03:00
|
|
|
This command forces all in-core dirty data to be written to the primary
|
2021-05-27 03:46:40 +03:00
|
|
|
pool storage and not the ZIL.
|
|
|
|
It will also update administrative information including quota reporting.
|
|
|
|
Without arguments,
|
|
|
|
.Nm zpool Cm sync
|
|
|
|
will sync all pools on the system.
|
|
|
|
Otherwise, it will sync only the specified pool(s).
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-upgrade 8
|
|
|
|
Manage the on-disk format version of storage pools.
|
|
|
|
.It Xr zpool-wait 8
|
Add subcommand to wait for background zfs activity to complete
Currently the best way to wait for the completion of a long-running
operation in a pool, like a scrub or device removal, is to poll 'zpool
status' and parse its output, which is neither efficient nor convenient.
This change adds a 'wait' subcommand to the zpool command. When invoked,
'zpool wait' will block until a specified type of background activity
completes. Currently, this subcommand can wait for any of the following:
- Scrubs or resilvers to complete
- Devices to initialized
- Devices to be replaced
- Devices to be removed
- Checkpoints to be discarded
- Background freeing to complete
For example, a scrub that is in progress could be waited for by running
zpool wait -t scrub <pool>
This also adds a -w flag to the attach, checkpoint, initialize, replace,
remove, and scrub subcommands. When used, this flag makes the operations
kicked off by these subcommands synchronous instead of asynchronous.
This functionality is implemented using a new ioctl. The type of
activity to wait for is provided as input to the ioctl, and the ioctl
blocks until all activity of that type has completed. An ioctl was used
over other methods of kernel-userspace communiction primarily for the
sake of portability.
Porting Notes:
This is ported from Delphix OS change DLPX-44432. The following changes
were made while porting:
- Added ZoL-style ioctl input declaration.
- Reorganized error handling in zpool_initialize in libzfs to integrate
better with changes made for TRIM support.
- Fixed check for whether a checkpoint discard is in progress.
Previously it also waited if the pool had a checkpoint, instead of
just if a checkpoint was being discarded.
- Exposed zfs_initialize_chunk_size as a ZoL-style tunable.
- Updated more existing tests to make use of new 'zpool wait'
functionality, tests that don't exist in Delphix OS.
- Used existing ZoL tunable zfs_scan_suspend_progress, together with
zinject, in place of a new tunable zfs_scan_max_blks_per_txg.
- Added support for a non-integral interval argument to zpool wait.
Future work:
ZoL has support for trimming devices, which Delphix OS does not. In the
future, 'zpool wait' could be extended to add the ability to wait for
trim operations to complete.
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Gallagher <john.gallagher@delphix.com>
Closes #9162
2019-09-14 04:09:06 +03:00
|
|
|
Waits until all background activity of the given types has ceased in the given
|
|
|
|
pool.
|
2019-11-13 20:21:07 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Fault Resolution
|
Add subcommand to wait for background zfs activity to complete
Currently the best way to wait for the completion of a long-running
operation in a pool, like a scrub or device removal, is to poll 'zpool
status' and parse its output, which is neither efficient nor convenient.
This change adds a 'wait' subcommand to the zpool command. When invoked,
'zpool wait' will block until a specified type of background activity
completes. Currently, this subcommand can wait for any of the following:
- Scrubs or resilvers to complete
- Devices to initialized
- Devices to be replaced
- Devices to be removed
- Checkpoints to be discarded
- Background freeing to complete
For example, a scrub that is in progress could be waited for by running
zpool wait -t scrub <pool>
This also adds a -w flag to the attach, checkpoint, initialize, replace,
remove, and scrub subcommands. When used, this flag makes the operations
kicked off by these subcommands synchronous instead of asynchronous.
This functionality is implemented using a new ioctl. The type of
activity to wait for is provided as input to the ioctl, and the ioctl
blocks until all activity of that type has completed. An ioctl was used
over other methods of kernel-userspace communiction primarily for the
sake of portability.
Porting Notes:
This is ported from Delphix OS change DLPX-44432. The following changes
were made while porting:
- Added ZoL-style ioctl input declaration.
- Reorganized error handling in zpool_initialize in libzfs to integrate
better with changes made for TRIM support.
- Fixed check for whether a checkpoint discard is in progress.
Previously it also waited if the pool had a checkpoint, instead of
just if a checkpoint was being discarded.
- Exposed zfs_initialize_chunk_size as a ZoL-style tunable.
- Updated more existing tests to make use of new 'zpool wait'
functionality, tests that don't exist in Delphix OS.
- Used existing ZoL tunable zfs_scan_suspend_progress, together with
zinject, in place of a new tunable zfs_scan_max_blks_per_txg.
- Added support for a non-integral interval argument to zpool wait.
Future work:
ZoL has support for trimming devices, which Delphix OS does not. In the
future, 'zpool wait' could be extended to add the ability to wait for
trim operations to complete.
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Gallagher <john.gallagher@delphix.com>
Closes #9162
2019-09-14 04:09:06 +03:00
|
|
|
.Bl -tag -width Ds
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xo
|
2021-05-27 03:46:40 +03:00
|
|
|
.Xr zpool-offline 8 Ns / Ns Xr zpool-online 8
|
2019-11-13 20:21:07 +03:00
|
|
|
.Xc
|
|
|
|
Takes the specified physical device offline or brings it online.
|
|
|
|
.It Xr zpool-resilver 8
|
2021-05-27 03:46:40 +03:00
|
|
|
Starts a resilver.
|
2022-11-12 15:23:30 +03:00
|
|
|
If an existing resilver is already running it will be restarted from the
|
|
|
|
beginning.
|
2019-11-13 20:21:07 +03:00
|
|
|
.It Xr zpool-reopen 8
|
|
|
|
Reopen all the vdevs associated with the pool.
|
|
|
|
.It Xr zpool-clear 8
|
|
|
|
Clears device errors in a pool.
|
Add subcommand to wait for background zfs activity to complete
Currently the best way to wait for the completion of a long-running
operation in a pool, like a scrub or device removal, is to poll 'zpool
status' and parse its output, which is neither efficient nor convenient.
This change adds a 'wait' subcommand to the zpool command. When invoked,
'zpool wait' will block until a specified type of background activity
completes. Currently, this subcommand can wait for any of the following:
- Scrubs or resilvers to complete
- Devices to initialized
- Devices to be replaced
- Devices to be removed
- Checkpoints to be discarded
- Background freeing to complete
For example, a scrub that is in progress could be waited for by running
zpool wait -t scrub <pool>
This also adds a -w flag to the attach, checkpoint, initialize, replace,
remove, and scrub subcommands. When used, this flag makes the operations
kicked off by these subcommands synchronous instead of asynchronous.
This functionality is implemented using a new ioctl. The type of
activity to wait for is provided as input to the ioctl, and the ioctl
blocks until all activity of that type has completed. An ioctl was used
over other methods of kernel-userspace communiction primarily for the
sake of portability.
Porting Notes:
This is ported from Delphix OS change DLPX-44432. The following changes
were made while porting:
- Added ZoL-style ioctl input declaration.
- Reorganized error handling in zpool_initialize in libzfs to integrate
better with changes made for TRIM support.
- Fixed check for whether a checkpoint discard is in progress.
Previously it also waited if the pool had a checkpoint, instead of
just if a checkpoint was being discarded.
- Exposed zfs_initialize_chunk_size as a ZoL-style tunable.
- Updated more existing tests to make use of new 'zpool wait'
functionality, tests that don't exist in Delphix OS.
- Used existing ZoL tunable zfs_scan_suspend_progress, together with
zinject, in place of a new tunable zfs_scan_max_blks_per_txg.
- Added support for a non-integral interval argument to zpool wait.
Future work:
ZoL has support for trimming devices, which Delphix OS does not. In the
future, 'zpool wait' could be extended to add the ability to wait for
trim operations to complete.
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: John Gallagher <john.gallagher@delphix.com>
Closes #9162
2019-09-14 04:09:06 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2019-11-13 20:21:07 +03:00
|
|
|
.Ss Import & Export
|
|
|
|
.Bl -tag -width Ds
|
|
|
|
.It Xr zpool-import 8
|
|
|
|
Make disks containing ZFS storage pools available for use on the system.
|
|
|
|
.It Xr zpool-export 8
|
|
|
|
Exports the given pools from the system.
|
|
|
|
.It Xr zpool-reguid 8
|
|
|
|
Generates a new unique identifier for the pool.
|
2017-06-18 21:27:06 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh EXIT STATUS
|
|
|
|
The following exit values are returned:
|
2021-05-27 03:46:40 +03:00
|
|
|
.Bl -tag -compact -offset 4n -width "a"
|
2017-06-18 21:27:06 +03:00
|
|
|
.It Sy 0
|
|
|
|
Successful completion.
|
|
|
|
.It Sy 1
|
|
|
|
An error occurred.
|
|
|
|
.It Sy 2
|
|
|
|
Invalid command line options were specified.
|
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh EXAMPLES
|
2023-08-26 21:30:19 +03:00
|
|
|
.\" Examples 1, 2, 3, 4, 12, 13 are shared with zpool-create.8.
|
|
|
|
.\" Examples 6, 14 are shared with zpool-add.8.
|
|
|
|
.\" Examples 7, 16 are shared with zpool-list.8.
|
|
|
|
.\" Examples 8 are shared with zpool-destroy.8.
|
|
|
|
.\" Examples 9 are shared with zpool-export.8.
|
|
|
|
.\" Examples 10 are shared with zpool-import.8.
|
|
|
|
.\" Examples 11 are shared with zpool-upgrade.8.
|
|
|
|
.\" Examples 15 are shared with zpool-remove.8.
|
|
|
|
.\" Examples 17 are shared with zpool-status.8.
|
|
|
|
.\" Examples 14, 17 are also shared with zpool-iostat.8.
|
2022-03-16 20:04:41 +03:00
|
|
|
.\" Make sure to update them omnidirectionally
|
2022-03-16 19:46:32 +03:00
|
|
|
.Ss Example 1 : No Creating a RAID-Z Storage Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command creates a pool with a single raidz root vdev that
|
2021-05-27 03:46:40 +03:00
|
|
|
consists of six disks:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm create Ar tank Sy raidz Pa sda sdb sdc sdd sde sdf
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2022-03-16 19:46:32 +03:00
|
|
|
.Ss Example 2 : No Creating a Mirrored Storage Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command creates a pool with two mirrors, where each mirror
|
2021-05-27 03:46:40 +03:00
|
|
|
contains two disks:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm create Ar tank Sy mirror Pa sda sdb Sy mirror Pa sdc sdd
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2022-03-16 19:46:32 +03:00
|
|
|
.Ss Example 3 : No Creating a ZFS Storage Pool by Using Partitions
|
2022-03-16 19:37:18 +03:00
|
|
|
The following command creates a non-redundant pool using two disk partitions:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm create Ar tank Pa sda1 sdb2
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2022-03-16 19:46:32 +03:00
|
|
|
.Ss Example 4 : No Creating a ZFS Storage Pool by Using Files
|
2022-03-16 19:37:18 +03:00
|
|
|
The following command creates a non-redundant pool using files.
|
2017-06-18 21:27:06 +03:00
|
|
|
While not recommended, a pool based on files can be useful for experimental
|
|
|
|
purposes.
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm create Ar tank Pa /path/to/file/a /path/to/file/b
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-29 19:12:40 +03:00
|
|
|
.Ss Example 5 : No Making a non-mirrored ZFS Storage Pool mirrored
|
2023-08-26 21:30:19 +03:00
|
|
|
The following command converts an existing single device
|
|
|
|
.Ar sda
|
|
|
|
into a mirror by attaching a second device to it,
|
|
|
|
.Ar sdb .
|
|
|
|
.Dl # Nm zpool Cm attach Ar tank Pa sda sdb
|
|
|
|
.
|
|
|
|
.Ss Example 6 : No Adding a Mirror to a ZFS Storage Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command adds two mirrored disks to the pool
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar tank ,
|
2017-06-18 21:27:06 +03:00
|
|
|
assuming the pool is already made up of two-way mirrors.
|
|
|
|
The additional space is immediately available to any datasets within the pool.
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm add Ar tank Sy mirror Pa sda sdb
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 7 : No Listing Available ZFS Storage Pools
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command lists all available pools on the system.
|
|
|
|
In this case, the pool
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar zion
|
2017-06-18 21:27:06 +03:00
|
|
|
is faulted due to a missing device.
|
2009-12-12 03:15:33 +03:00
|
|
|
The results from this command are similar to the following:
|
2021-05-27 03:46:40 +03:00
|
|
|
.Bd -literal -compact -offset Ds
|
|
|
|
.No # Nm zpool Cm list
|
2018-02-28 19:54:53 +03:00
|
|
|
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
|
|
|
|
rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
|
|
|
|
tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
|
|
|
|
zion - - - - - - - FAULTED -
|
2017-06-18 21:27:06 +03:00
|
|
|
.Ed
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 8 : No Destroying a ZFS Storage Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command destroys the pool
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar tank
|
|
|
|
and any datasets contained within:
|
|
|
|
.Dl # Nm zpool Cm destroy Fl f Ar tank
|
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 9 : No Exporting a ZFS Storage Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command exports the devices in pool
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar tank
|
|
|
|
so that they can be relocated or later imported:
|
|
|
|
.Dl # Nm zpool Cm export Ar tank
|
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 10 : No Importing a ZFS Storage Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command displays available pools, and then imports the pool
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar tank
|
2017-06-18 21:27:06 +03:00
|
|
|
for use on the system.
|
2009-12-12 03:15:33 +03:00
|
|
|
The results from this command are similar to the following:
|
2021-05-27 03:46:40 +03:00
|
|
|
.Bd -literal -compact -offset Ds
|
|
|
|
.No # Nm zpool Cm import
|
2009-12-12 03:15:33 +03:00
|
|
|
pool: tank
|
|
|
|
id: 15451357997522795478
|
|
|
|
state: ONLINE
|
|
|
|
action: The pool can be imported using its name or numeric identifier.
|
|
|
|
config:
|
|
|
|
|
|
|
|
tank ONLINE
|
|
|
|
mirror ONLINE
|
2011-04-09 07:27:25 +04:00
|
|
|
sda ONLINE
|
|
|
|
sdb ONLINE
|
2009-12-12 03:15:33 +03:00
|
|
|
|
2021-05-27 03:46:40 +03:00
|
|
|
.No # Nm zpool Cm import Ar tank
|
2017-06-18 21:27:06 +03:00
|
|
|
.Ed
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 11 : No Upgrading All ZFS Storage Pools to the Current Version
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command upgrades all ZFS Storage pools to the current version of
|
2021-05-27 03:46:40 +03:00
|
|
|
the software:
|
|
|
|
.Bd -literal -compact -offset Ds
|
|
|
|
.No # Nm zpool Cm upgrade Fl a
|
2017-06-18 21:27:06 +03:00
|
|
|
This system is currently running ZFS version 2.
|
|
|
|
.Ed
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 12 : No Managing Hot Spares
|
2009-12-12 03:15:33 +03:00
|
|
|
The following command creates a new pool with an available hot spare:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm create Ar tank Sy mirror Pa sda sdb Sy spare Pa sdc
|
2017-06-18 21:27:06 +03:00
|
|
|
.Pp
|
|
|
|
If one of the disks were to fail, the pool would be reduced to the degraded
|
|
|
|
state.
|
|
|
|
The failed device can be replaced using the following command:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm replace Ar tank Pa sda sdd
|
2017-06-18 21:27:06 +03:00
|
|
|
.Pp
|
|
|
|
Once the data has been resilvered, the spare is automatically removed and is
|
2017-09-15 23:13:52 +03:00
|
|
|
made available for use should another device fail.
|
2017-06-18 21:27:06 +03:00
|
|
|
The hot spare can be permanently removed from the pool using the following
|
|
|
|
command:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm remove Ar tank Pa sdc
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 13 : No Creating a ZFS Pool with Mirrored Separate Intent Logs
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command creates a ZFS storage pool consisting of two, two-way
|
|
|
|
mirrors and mirrored log devices:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm create Ar pool Sy mirror Pa sda sdb Sy mirror Pa sdc sdd Sy log mirror Pa sde sdf
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 14 : No Adding Cache Devices to a ZFS Pool
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command adds two disks for use as cache devices to a ZFS storage
|
|
|
|
pool:
|
2022-03-16 19:36:03 +03:00
|
|
|
.Dl # Nm zpool Cm add Ar pool Sy cache Pa sdc sdd
|
2017-06-18 21:27:06 +03:00
|
|
|
.Pp
|
|
|
|
Once added, the cache devices gradually fill with content from main memory.
|
|
|
|
Depending on the size of your cache devices, it could take over an hour for
|
|
|
|
them to fill.
|
|
|
|
Capacity and reads can be monitored using the
|
|
|
|
.Cm iostat
|
2021-05-27 03:46:40 +03:00
|
|
|
subcommand as follows:
|
|
|
|
.Dl # Nm zpool Cm iostat Fl v Ar pool 5
|
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 15 : No Removing a Mirrored top-level (Log or Data) Device
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 19:30:13 +03:00
|
|
|
The following commands remove the mirrored log device
|
|
|
|
.Sy mirror-2
|
|
|
|
and mirrored top-level data device
|
|
|
|
.Sy mirror-1 .
|
|
|
|
.Pp
|
2009-12-12 03:15:33 +03:00
|
|
|
Given this configuration:
|
2021-05-27 03:46:40 +03:00
|
|
|
.Bd -literal -compact -offset Ds
|
2017-06-18 21:27:06 +03:00
|
|
|
pool: tank
|
|
|
|
state: ONLINE
|
|
|
|
scrub: none requested
|
2009-12-12 03:15:33 +03:00
|
|
|
config:
|
|
|
|
|
|
|
|
NAME STATE READ WRITE CKSUM
|
|
|
|
tank ONLINE 0 0 0
|
|
|
|
mirror-0 ONLINE 0 0 0
|
2011-04-09 07:27:25 +04:00
|
|
|
sda ONLINE 0 0 0
|
|
|
|
sdb ONLINE 0 0 0
|
2009-12-12 03:15:33 +03:00
|
|
|
mirror-1 ONLINE 0 0 0
|
2011-04-09 07:27:25 +04:00
|
|
|
sdc ONLINE 0 0 0
|
|
|
|
sdd ONLINE 0 0 0
|
2009-12-12 03:15:33 +03:00
|
|
|
logs
|
|
|
|
mirror-2 ONLINE 0 0 0
|
2011-04-09 07:27:25 +04:00
|
|
|
sde ONLINE 0 0 0
|
|
|
|
sdf ONLINE 0 0 0
|
2017-06-18 21:27:06 +03:00
|
|
|
.Ed
|
|
|
|
.Pp
|
|
|
|
The command to remove the mirrored log
|
2022-11-12 15:23:30 +03:00
|
|
|
.Ar mirror-2 No is :
|
2021-05-27 03:46:40 +03:00
|
|
|
.Dl # Nm zpool Cm remove Ar tank mirror-2
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 19:30:13 +03:00
|
|
|
.Pp
|
|
|
|
The command to remove the mirrored data
|
2022-11-12 15:23:30 +03:00
|
|
|
.Ar mirror-1 No is :
|
2021-05-27 03:46:40 +03:00
|
|
|
.Dl # Nm zpool Cm remove Ar tank mirror-1
|
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 16 : No Displaying expanded space on a device
|
2017-06-18 21:27:06 +03:00
|
|
|
The following command displays the detailed information for the pool
|
2021-05-27 03:46:40 +03:00
|
|
|
.Ar data .
|
2017-06-18 21:27:06 +03:00
|
|
|
This pool is comprised of a single raidz vdev where one of its devices
|
2022-05-03 15:07:04 +03:00
|
|
|
increased its capacity by 10 GiB.
|
2017-06-18 21:27:06 +03:00
|
|
|
In this example, the pool will not be able to utilize this extra capacity until
|
|
|
|
all the devices under the raidz vdev have been expanded.
|
2021-05-27 03:46:40 +03:00
|
|
|
.Bd -literal -compact -offset Ds
|
|
|
|
.No # Nm zpool Cm list Fl v Ar data
|
2018-02-28 19:54:53 +03:00
|
|
|
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
|
|
|
|
data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
|
|
|
|
raidz1 23.9G 14.6G 9.30G - 48%
|
|
|
|
sda - - - - -
|
|
|
|
sdb - - - 10G -
|
|
|
|
sdc - - - - -
|
2017-06-18 21:27:06 +03:00
|
|
|
.Ed
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2023-08-26 21:30:19 +03:00
|
|
|
.Ss Example 17 : No Adding output columns
|
2017-06-18 21:27:06 +03:00
|
|
|
Additional columns can be added to the
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm zpool Cm status No and Nm zpool Cm iostat No output with Fl c .
|
|
|
|
.Bd -literal -compact -offset Ds
|
2022-03-16 19:36:03 +03:00
|
|
|
.No # Nm zpool Cm status Fl c Pa vendor , Ns Pa model , Ns Pa size
|
2017-06-18 21:27:06 +03:00
|
|
|
NAME STATE READ WRITE CKSUM vendor model size
|
|
|
|
tank ONLINE 0 0 0
|
|
|
|
mirror-0 ONLINE 0 0 0
|
|
|
|
U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
|
|
|
|
U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
|
|
|
|
U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
|
|
|
|
U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
|
|
|
|
U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
|
|
|
|
U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
|
|
|
|
|
2022-03-16 19:36:03 +03:00
|
|
|
.No # Nm zpool Cm iostat Fl vc Pa size
|
2020-06-11 03:07:59 +03:00
|
|
|
capacity operations bandwidth
|
|
|
|
pool alloc free read write read write size
|
|
|
|
---------- ----- ----- ----- ----- ----- ----- ----
|
|
|
|
rpool 14.6G 54.9G 4 55 250K 2.69M
|
|
|
|
sda1 14.6G 54.9G 4 55 250K 2.69M 70G
|
|
|
|
---------- ----- ----- ----- ----- ----- ----- ----
|
2017-06-18 21:27:06 +03:00
|
|
|
.Ed
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh ENVIRONMENT VARIABLES
|
2023-12-21 21:53:16 +03:00
|
|
|
.Bl -tag -compact -width "ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE"
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZFS_ABORT
|
2017-06-18 21:27:06 +03:00
|
|
|
Cause
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm
|
2017-06-18 21:27:06 +03:00
|
|
|
to dump core on exit for the purposes of running
|
2017-09-16 20:51:24 +03:00
|
|
|
.Sy ::findleaks .
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZFS_COLOR
|
2019-12-20 03:26:07 +03:00
|
|
|
Use ANSI color in
|
2022-03-16 19:46:32 +03:00
|
|
|
.Nm zpool Cm status
|
2023-03-14 01:30:09 +03:00
|
|
|
and
|
|
|
|
.Nm zpool Cm iostat
|
2019-12-20 03:26:07 +03:00
|
|
|
output.
|
2023-12-21 21:53:16 +03:00
|
|
|
.It Sy ZPOOL_AUTO_POWER_ON_SLOT
|
|
|
|
Automatically attempt to turn on the drives enclosure slot power to a drive when
|
|
|
|
running the
|
|
|
|
.Nm zpool Cm online
|
|
|
|
or
|
|
|
|
.Nm zpool Cm clear
|
|
|
|
commands.
|
|
|
|
This has the same effect as passing the
|
|
|
|
.Fl -power
|
|
|
|
option to those commands.
|
|
|
|
.It Sy ZPOOL_POWER_ON_SLOT_TIMEOUT_MS
|
|
|
|
The maximum time in milliseconds to wait for a slot power sysfs value
|
|
|
|
to return the correct value after writing it.
|
|
|
|
For example, after writing "on" to the sysfs enclosure slot power_control file,
|
|
|
|
it can take some time for the enclosure to power down the slot and return
|
|
|
|
"on" if you read back the 'power_control' value.
|
|
|
|
Defaults to 30 seconds (30000ms) if not set.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_IMPORT_PATH
|
|
|
|
The search path for devices or files to use with the pool.
|
|
|
|
This is a colon-separated list of directories in which
|
|
|
|
.Nm
|
2017-06-18 21:27:06 +03:00
|
|
|
looks for device nodes and files.
|
|
|
|
Similar to the
|
|
|
|
.Fl d
|
|
|
|
option in
|
|
|
|
.Nm zpool import .
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_IMPORT_UDEV_TIMEOUT_MS
|
2019-10-09 22:16:12 +03:00
|
|
|
The maximum time in milliseconds that
|
|
|
|
.Nm zpool import
|
|
|
|
will wait for an expected device to be available.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_STATUS_NON_NATIVE_ASHIFT_IGNORE
|
2020-08-21 22:53:17 +03:00
|
|
|
If set, suppress warning about non-native vdev ashift in
|
2022-03-16 19:46:32 +03:00
|
|
|
.Nm zpool Cm status .
|
2020-08-21 22:53:17 +03:00
|
|
|
The value is not used, only the presence or absence of the variable matters.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_VDEV_NAME_GUID
|
2017-06-18 21:27:06 +03:00
|
|
|
Cause
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm
|
|
|
|
subcommands to output vdev guids by default.
|
|
|
|
This behavior is identical to the
|
2021-05-26 19:41:20 +03:00
|
|
|
.Nm zpool Cm status Fl g
|
2017-06-18 21:27:06 +03:00
|
|
|
command line option.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_VDEV_NAME_FOLLOW_LINKS
|
2017-06-18 21:27:06 +03:00
|
|
|
Cause
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm
|
|
|
|
subcommands to follow links for vdev names by default.
|
|
|
|
This behavior is identical to the
|
2021-05-26 19:41:20 +03:00
|
|
|
.Nm zpool Cm status Fl L
|
2017-06-18 21:27:06 +03:00
|
|
|
command line option.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_VDEV_NAME_PATH
|
2017-06-18 21:27:06 +03:00
|
|
|
Cause
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm
|
|
|
|
subcommands to output full vdev path names by default.
|
|
|
|
This behavior is identical to the
|
2021-05-26 19:41:20 +03:00
|
|
|
.Nm zpool Cm status Fl P
|
2017-06-18 21:27:06 +03:00
|
|
|
command line option.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZFS_VDEV_DEVID_OPT_OUT
|
2020-10-09 06:10:13 +03:00
|
|
|
Older OpenZFS implementations had issues when attempting to display pool
|
2022-02-17 23:09:16 +03:00
|
|
|
config vdev names if a
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sy devid
|
|
|
|
NVP value is present in the pool's config.
|
|
|
|
.Pp
|
2021-05-27 03:46:40 +03:00
|
|
|
For example, a pool that originated on illumos platform would have a
|
|
|
|
.Sy devid
|
2017-06-18 21:27:06 +03:00
|
|
|
value in the config and
|
2022-03-16 19:46:32 +03:00
|
|
|
.Nm zpool Cm status
|
2017-06-18 21:27:06 +03:00
|
|
|
would fail when listing the config.
|
2021-05-27 03:46:40 +03:00
|
|
|
This would also be true for future Linux-based pools.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Pp
|
|
|
|
A pool can be stripped of any
|
|
|
|
.Sy devid
|
|
|
|
values on import or prevented from adding
|
|
|
|
them on
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm zpool Cm create
|
2017-06-18 21:27:06 +03:00
|
|
|
or
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm zpool Cm add
|
2017-06-18 21:27:06 +03:00
|
|
|
by setting
|
|
|
|
.Sy ZFS_VDEV_DEVID_OPT_OUT .
|
2021-05-27 03:46:40 +03:00
|
|
|
.Pp
|
|
|
|
.It Sy ZPOOL_SCRIPTS_AS_ROOT
|
|
|
|
Allow a privileged user to run
|
2022-03-16 19:46:32 +03:00
|
|
|
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
|
2021-05-27 03:46:40 +03:00
|
|
|
Normally, only unprivileged users are allowed to run
|
2017-06-18 21:27:06 +03:00
|
|
|
.Fl c .
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_SCRIPTS_PATH
|
2017-06-18 21:27:06 +03:00
|
|
|
The search path for scripts when running
|
2022-03-16 19:46:32 +03:00
|
|
|
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
|
2021-05-27 03:46:40 +03:00
|
|
|
This is a colon-separated list of directories and overrides the default
|
2017-06-18 21:27:06 +03:00
|
|
|
.Pa ~/.zpool.d
|
|
|
|
and
|
|
|
|
.Pa /etc/zfs/zpool.d
|
|
|
|
search paths.
|
2021-05-27 03:46:40 +03:00
|
|
|
.It Sy ZPOOL_SCRIPTS_ENABLED
|
2017-06-18 21:27:06 +03:00
|
|
|
Allow a user to run
|
2022-03-16 19:46:32 +03:00
|
|
|
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
|
2021-05-27 03:46:40 +03:00
|
|
|
If
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sy ZPOOL_SCRIPTS_ENABLED
|
|
|
|
is not set, it is assumed that the user is allowed to run
|
2021-05-27 03:46:40 +03:00
|
|
|
.Nm zpool Cm status Ns / Ns Cm iostat Fl c .
|
2022-04-15 00:30:41 +03:00
|
|
|
.\" Shared with zfs.8
|
|
|
|
.It Sy ZFS_MODULE_TIMEOUT
|
|
|
|
Time, in seconds, to wait for
|
|
|
|
.Pa /dev/zfs
|
|
|
|
to appear.
|
|
|
|
Defaults to
|
|
|
|
.Sy 10 ,
|
|
|
|
max
|
|
|
|
.Sy 600 Pq 10 minutes .
|
|
|
|
If
|
|
|
|
.Pf < Sy 0 ,
|
|
|
|
wait forever; if
|
|
|
|
.Sy 0 ,
|
|
|
|
don't wait.
|
2017-09-16 20:51:24 +03:00
|
|
|
.El
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh INTERFACE STABILITY
|
|
|
|
.Sy Evolving
|
2021-05-27 03:46:40 +03:00
|
|
|
.
|
2017-06-18 21:27:06 +03:00
|
|
|
.Sh SEE ALSO
|
2021-06-04 23:29:26 +03:00
|
|
|
.Xr zfs 4 ,
|
|
|
|
.Xr zpool-features 7 ,
|
|
|
|
.Xr zpoolconcepts 7 ,
|
|
|
|
.Xr zpoolprops 7 ,
|
2017-09-16 20:51:24 +03:00
|
|
|
.Xr zed 8 ,
|
2020-08-27 08:29:00 +03:00
|
|
|
.Xr zfs 8 ,
|
|
|
|
.Xr zpool-add 8 ,
|
|
|
|
.Xr zpool-attach 8 ,
|
|
|
|
.Xr zpool-checkpoint 8 ,
|
|
|
|
.Xr zpool-clear 8 ,
|
|
|
|
.Xr zpool-create 8 ,
|
|
|
|
.Xr zpool-destroy 8 ,
|
|
|
|
.Xr zpool-detach 8 ,
|
|
|
|
.Xr zpool-events 8 ,
|
|
|
|
.Xr zpool-export 8 ,
|
|
|
|
.Xr zpool-get 8 ,
|
|
|
|
.Xr zpool-history 8 ,
|
|
|
|
.Xr zpool-import 8 ,
|
|
|
|
.Xr zpool-initialize 8 ,
|
|
|
|
.Xr zpool-iostat 8 ,
|
|
|
|
.Xr zpool-labelclear 8 ,
|
|
|
|
.Xr zpool-list 8 ,
|
|
|
|
.Xr zpool-offline 8 ,
|
|
|
|
.Xr zpool-online 8 ,
|
|
|
|
.Xr zpool-reguid 8 ,
|
|
|
|
.Xr zpool-remove 8 ,
|
|
|
|
.Xr zpool-reopen 8 ,
|
|
|
|
.Xr zpool-replace 8 ,
|
|
|
|
.Xr zpool-resilver 8 ,
|
|
|
|
.Xr zpool-scrub 8 ,
|
|
|
|
.Xr zpool-set 8 ,
|
|
|
|
.Xr zpool-split 8 ,
|
|
|
|
.Xr zpool-status 8 ,
|
|
|
|
.Xr zpool-sync 8 ,
|
|
|
|
.Xr zpool-trim 8 ,
|
|
|
|
.Xr zpool-upgrade 8 ,
|
2021-06-04 23:29:26 +03:00
|
|
|
.Xr zpool-wait 8
|