2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
2022-07-12 00:16:13 +03:00
|
|
|
* or https://opensource.org/licenses/CDDL-1.0.
|
2008-11-20 23:01:55 +03:00
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
2012-12-14 03:24:15 +04:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
2010-05-29 00:45:14 +04:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2020-04-28 20:55:18 +03:00
|
|
|
* Copyright 2020 Joyent, Inc. All rights reserved.
|
2020-07-13 19:19:18 +03:00
|
|
|
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
|
2017-02-08 01:02:27 +03:00
|
|
|
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
|
2017-07-07 08:16:13 +03:00
|
|
|
* Copyright (c) 2017 Datto Inc.
|
2020-08-01 18:41:31 +03:00
|
|
|
* Copyright (c) 2020 The FreeBSD Foundation
|
|
|
|
*
|
|
|
|
* Portions of this software were developed by Allan Jude
|
|
|
|
* under sponsorship from the FreeBSD Foundation.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Internal utility routines for the ZFS library.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <libintl.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <strings.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <math.h>
|
2021-05-13 07:21:35 +03:00
|
|
|
#if LIBFETCH_DYNAMIC
|
|
|
|
#include <dlfcn.h>
|
|
|
|
#endif
|
2010-08-26 22:56:53 +04:00
|
|
|
#include <sys/stat.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
#include <sys/mnttab.h>
|
|
|
|
#include <sys/mntent.h>
|
|
|
|
#include <sys/types.h>
|
2017-04-08 19:51:04 +03:00
|
|
|
#include <sys/wait.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
#include <libzfs.h>
|
2013-08-28 15:45:09 +04:00
|
|
|
#include <libzfs_core.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
#include "libzfs_impl.h"
|
|
|
|
#include "zfs_prop.h"
|
2012-12-14 03:24:15 +04:00
|
|
|
#include "zfeature_common.h"
|
2016-09-25 11:35:12 +03:00
|
|
|
#include <zfs_fletcher.h>
|
2018-11-05 22:22:33 +03:00
|
|
|
#include <libzutil.h>
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2020-04-28 20:55:18 +03:00
|
|
|
/*
|
|
|
|
* We only care about the scheme in order to match the scheme
|
|
|
|
* with the handler. Each handler should validate the full URI
|
|
|
|
* as necessary.
|
|
|
|
*/
|
|
|
|
#define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):"
|
2019-10-03 20:33:16 +03:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
int
|
|
|
|
libzfs_errno(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
return (hdl->libzfs_error);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
libzfs_error_action(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
return (hdl->libzfs_action);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
libzfs_error_description(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
if (hdl->libzfs_desc[0] != '\0')
|
|
|
|
return (hdl->libzfs_desc);
|
|
|
|
|
|
|
|
switch (hdl->libzfs_error) {
|
|
|
|
case EZFS_NOMEM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "out of memory"));
|
|
|
|
case EZFS_BADPROP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid property value"));
|
|
|
|
case EZFS_PROPREADONLY:
|
2010-08-27 01:24:34 +04:00
|
|
|
return (dgettext(TEXT_DOMAIN, "read-only property"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_PROPTYPE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
|
|
|
|
"datasets of this type"));
|
|
|
|
case EZFS_PROPNONINHERIT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
|
|
|
|
case EZFS_PROPSPACE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
|
|
|
|
case EZFS_BADTYPE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation not applicable to "
|
|
|
|
"datasets of this type"));
|
|
|
|
case EZFS_BUSY:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
|
|
|
|
case EZFS_EXISTS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
|
|
|
|
case EZFS_NOENT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
|
|
|
|
case EZFS_BADSTREAM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
|
|
|
|
case EZFS_DSREADONLY:
|
2010-08-27 01:24:34 +04:00
|
|
|
return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_VOLTOOBIG:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
|
|
|
|
"this system"));
|
|
|
|
case EZFS_INVALIDNAME:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid name"));
|
|
|
|
case EZFS_BADRESTORE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "unable to restore to "
|
|
|
|
"destination"));
|
|
|
|
case EZFS_BADBACKUP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "backup failed"));
|
|
|
|
case EZFS_BADTARGET:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
|
|
|
|
case EZFS_NODEVICE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "no such device in pool"));
|
|
|
|
case EZFS_BADDEV:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid device"));
|
|
|
|
case EZFS_NOREPLICAS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "no valid replicas"));
|
|
|
|
case EZFS_RESILVERING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently resilvering"));
|
|
|
|
case EZFS_BADVERSION:
|
2012-12-14 03:24:15 +04:00
|
|
|
return (dgettext(TEXT_DOMAIN, "unsupported version or "
|
|
|
|
"feature"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_POOLUNAVAIL:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
|
|
|
|
case EZFS_DEVOVERFLOW:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
|
|
|
|
case EZFS_BADPATH:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
|
|
|
|
case EZFS_CROSSTARGET:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
|
|
|
|
"pools"));
|
|
|
|
case EZFS_ZONED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
|
|
|
|
case EZFS_MOUNTFAILED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "mount failed"));
|
|
|
|
case EZFS_UMOUNTFAILED:
|
2020-10-30 18:55:59 +03:00
|
|
|
return (dgettext(TEXT_DOMAIN, "unmount failed"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_UNSHARENFSFAILED:
|
2020-10-30 18:55:59 +03:00
|
|
|
return (dgettext(TEXT_DOMAIN, "NFS share removal failed"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_SHARENFSFAILED:
|
2020-10-30 18:55:59 +03:00
|
|
|
return (dgettext(TEXT_DOMAIN, "NFS share creation failed"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_UNSHARESMBFAILED:
|
2020-10-30 18:55:59 +03:00
|
|
|
return (dgettext(TEXT_DOMAIN, "SMB share removal failed"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_SHARESMBFAILED:
|
2020-10-30 18:55:59 +03:00
|
|
|
return (dgettext(TEXT_DOMAIN, "SMB share creation failed"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_PERM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "permission denied"));
|
|
|
|
case EZFS_NOSPC:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "out of space"));
|
2010-05-29 00:45:14 +04:00
|
|
|
case EZFS_FAULT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "bad address"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_IO:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "I/O error"));
|
|
|
|
case EZFS_INTR:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "signal received"));
|
2022-09-16 23:59:25 +03:00
|
|
|
case EZFS_CKSUM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "insufficient replicas"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_ISSPARE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
|
|
|
|
"spare"));
|
|
|
|
case EZFS_INVALCONFIG:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
|
|
|
|
case EZFS_RECURSIVE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
|
|
|
|
case EZFS_NOHISTORY:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "no history available"));
|
|
|
|
case EZFS_POOLPROPS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "failed to retrieve "
|
|
|
|
"pool properties"));
|
|
|
|
case EZFS_POOL_NOTSUP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation not supported "
|
|
|
|
"on this type of pool"));
|
|
|
|
case EZFS_POOL_INVALARG:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid argument for "
|
|
|
|
"this pool operation"));
|
|
|
|
case EZFS_NAMETOOLONG:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
|
|
|
|
case EZFS_OPENFAILED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "open failed"));
|
|
|
|
case EZFS_NOCAP:
|
|
|
|
return (dgettext(TEXT_DOMAIN,
|
|
|
|
"disk capacity information could not be retrieved"));
|
|
|
|
case EZFS_LABELFAILED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "write of label failed"));
|
|
|
|
case EZFS_BADWHO:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid user/group"));
|
|
|
|
case EZFS_BADPERM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid permission"));
|
|
|
|
case EZFS_BADPERMSET:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
|
|
|
|
case EZFS_NODELEGATION:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "delegated administration is "
|
|
|
|
"disabled on pool"));
|
|
|
|
case EZFS_BADCACHE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
|
|
|
|
case EZFS_ISL2CACHE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
|
|
|
|
case EZFS_VDEVNOTSUP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "vdev specification is not "
|
|
|
|
"supported"));
|
2008-12-03 23:09:06 +03:00
|
|
|
case EZFS_NOTSUP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation not supported "
|
|
|
|
"on this dataset"));
|
Add basic zfs ioc input nvpair validation
We want newer versions of libzfs_core to run against an existing
zfs kernel module (i.e. a deferred reboot or module reload after
an update).
Programmatically document, via a zfs_ioc_key_t, the valid arguments
for the ioc commands that rely on nvpair input arguments (i.e. non
legacy commands from libzfs_core). Automatically verify the expected
pairs before dispatching a command.
This initial phase focuses on the non-legacy ioctls. A follow-on
change can address the legacy ioctl input from the zfs_cmd_t.
The zfs_ioc_key_t for zfs_keys_channel_program looks like:
static const zfs_ioc_key_t zfs_keys_channel_program[] = {
{"program", DATA_TYPE_STRING, 0},
{"arg", DATA_TYPE_UNKNOWN, 0},
{"sync", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
{"instrlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"memlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
};
Introduce four input errors to identify specific input failures
(in addition to generic argument value errors like EINVAL, ERANGE,
EBADF, and E2BIG).
ZFS_ERR_IOC_CMD_UNAVAIL the ioctl number is not supported by kernel
ZFS_ERR_IOC_ARG_UNAVAIL an input argument is not supported by kernel
ZFS_ERR_IOC_ARG_REQUIRED a required input argument is missing
ZFS_ERR_IOC_ARG_BADTYPE an input argument has an invalid type
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Don Brady <don.brady@delphix.com>
Closes #7780
2018-09-02 22:14:01 +03:00
|
|
|
case EZFS_IOC_NOTSUPPORTED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation not supported by "
|
|
|
|
"zfs kernel module"));
|
2008-12-03 23:09:06 +03:00
|
|
|
case EZFS_ACTIVE_SPARE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
|
|
|
|
"device"));
|
2009-07-03 02:44:48 +04:00
|
|
|
case EZFS_UNPLAYED_LOGS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
|
|
|
|
"logs"));
|
2009-08-18 22:43:27 +04:00
|
|
|
case EZFS_REFTAG_RELE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
|
|
|
|
case EZFS_REFTAG_HOLD:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "tag already exists on this "
|
|
|
|
"dataset"));
|
2010-05-29 00:45:14 +04:00
|
|
|
case EZFS_TAGTOOLONG:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "tag too long"));
|
|
|
|
case EZFS_PIPEFAILED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pipe create failed"));
|
|
|
|
case EZFS_THREADCREATEFAILED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "thread create failed"));
|
|
|
|
case EZFS_POSTSPLIT_ONLINE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
|
|
|
|
"into a new one"));
|
2017-07-07 08:16:13 +03:00
|
|
|
case EZFS_SCRUB_PAUSED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
|
2021-12-17 23:35:28 +03:00
|
|
|
"use 'zpool scrub' to resume scrub"));
|
|
|
|
case EZFS_SCRUB_PAUSED_TO_CANCEL:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
|
|
|
|
"use 'zpool scrub' to resume or 'zpool scrub -s' to "
|
|
|
|
"cancel scrub"));
|
2010-05-29 00:45:14 +04:00
|
|
|
case EZFS_SCRUBBING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
|
2021-12-17 23:35:28 +03:00
|
|
|
"use 'zpool scrub -s' to cancel scrub"));
|
|
|
|
case EZFS_ERRORSCRUBBING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently error scrubbing; "
|
|
|
|
"use 'zpool scrub -s' to cancel error scrub"));
|
|
|
|
case EZFS_ERRORSCRUB_PAUSED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "error scrub is paused; "
|
|
|
|
"use 'zpool scrub -e' to resume error scrub"));
|
2010-05-29 00:45:14 +04:00
|
|
|
case EZFS_NO_SCRUB:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
|
2010-08-27 01:24:34 +04:00
|
|
|
case EZFS_DIFF:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
|
|
|
|
case EZFS_DIFFDATA:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid diff data"));
|
|
|
|
case EZFS_POOLREADONLY:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool is read-only"));
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 19:30:13 +03:00
|
|
|
case EZFS_NO_PENDING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation is not "
|
|
|
|
"in progress"));
|
2016-12-17 01:11:29 +03:00
|
|
|
case EZFS_CHECKPOINT_EXISTS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "checkpoint exists"));
|
|
|
|
case EZFS_DISCARDING_CHECKPOINT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently discarding "
|
|
|
|
"checkpoint"));
|
|
|
|
case EZFS_NO_CHECKPOINT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "checkpoint does not exist"));
|
|
|
|
case EZFS_DEVRM_IN_PROGRESS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "device removal in progress"));
|
|
|
|
case EZFS_VDEV_TOO_BIG:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "device exceeds supported size"));
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
case EZFS_ACTIVE_POOL:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool is imported on a "
|
|
|
|
"different host"));
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 20:36:48 +03:00
|
|
|
case EZFS_CRYPTOFAILED:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "encryption failure"));
|
OpenZFS 9102 - zfs should be able to initialize storage devices
PROBLEM
========
The first access to a block incurs a performance penalty on some platforms
(e.g. AWS's EBS, VMware VMDKs). Therefore we recommend that volumes are
"thick provisioned", where supported by the platform (VMware). This can
create a large delay in getting a new virtual machines up and running (or
adding storage to an existing Engine). If the thick provision step is
omitted, write performance will be suboptimal until all blocks on the LUN
have been written.
SOLUTION
=========
This feature introduces a way to 'initialize' the disks at install or in the
background to make sure we don't incur this first read penalty.
When an entire LUN is added to ZFS, we make all space available immediately,
and allow ZFS to find unallocated space and zero it out. This works with
concurrent writes to arbitrary offsets, ensuring that we don't zero out
something that has been (or is in the middle of being) written. This scheme
can also be applied to existing pools (affecting only free regions on the
vdev). Detailed design:
- new subcommand:zpool initialize [-cs] <pool> [<vdev> ...]
- start, suspend, or cancel initialization
- Creates new open-context thread for each vdev
- Thread iterates through all metaslabs in this vdev
- Each metaslab:
- select a metaslab
- load the metaslab
- mark the metaslab as being zeroed
- walk all free ranges within that metaslab and translate
them to ranges on the leaf vdev
- issue a "zeroing" I/O on the leaf vdev that corresponds to
a free range on the metaslab we're working on
- continue until all free ranges for this metaslab have been
"zeroed"
- reset/unmark the metaslab being zeroed
- if more metaslabs exist, then repeat above tasks.
- if no more metaslabs, then we're done.
- progress for the initialization is stored on-disk in the vdev’s
leaf zap object. The following information is stored:
- the last offset that has been initialized
- the state of the initialization process (i.e. active,
suspended, or canceled)
- the start time for the initialization
- progress is reported via the zpool status command and shows
information for each of the vdevs that are initializing
Porting notes:
- Added zfs_initialize_value module parameter to set the pattern
written by "zpool initialize".
- Added zfs_vdev_{initializing,removal}_{min,max}_active module options.
Authored by: George Wilson <george.wilson@delphix.com>
Reviewed by: John Wren Kennedy <john.kennedy@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: loli10K <ezomori.nozomu@gmail.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Richard Lowe <richlowe@richlowe.net>
Signed-off-by: Tim Chase <tim@chase2k.com>
Ported-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/9102
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/c3963210eb
Closes #8230
2018-12-19 17:54:59 +03:00
|
|
|
case EZFS_TOOMANY:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "argument list too long"));
|
|
|
|
case EZFS_INITIALIZING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently initializing"));
|
|
|
|
case EZFS_NO_INITIALIZE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "there is no active "
|
|
|
|
"initialization"));
|
2019-02-09 02:44:15 +03:00
|
|
|
case EZFS_WRONG_PARENT:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "invalid parent dataset"));
|
2019-03-29 19:13:20 +03:00
|
|
|
case EZFS_TRIMMING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently trimming"));
|
|
|
|
case EZFS_NO_TRIM:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "there is no active trim"));
|
|
|
|
case EZFS_TRIM_NOTSUP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "trim operations are not "
|
|
|
|
"supported by this device"));
|
2019-05-03 02:42:31 +03:00
|
|
|
case EZFS_NO_RESILVER_DEFER:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "this action requires the "
|
|
|
|
"resilver_defer feature"));
|
2019-07-18 23:02:33 +03:00
|
|
|
case EZFS_EXPORT_IN_PROGRESS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "pool export in progress"));
|
2020-07-03 21:05:50 +03:00
|
|
|
case EZFS_REBUILDING:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "currently sequentially "
|
|
|
|
"resilvering"));
|
2021-11-30 17:46:25 +03:00
|
|
|
case EZFS_VDEV_NOTSUP:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "operation not supported "
|
|
|
|
"on this type of vdev"));
|
2021-02-21 19:19:43 +03:00
|
|
|
case EZFS_NOT_USER_NAMESPACE:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "the provided file "
|
|
|
|
"was not a user namespace file"));
|
2022-09-28 02:34:27 +03:00
|
|
|
case EZFS_RESUME_EXISTS:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "Resuming recv on existing "
|
|
|
|
"dataset without force"));
|
2008-11-20 23:01:55 +03:00
|
|
|
case EZFS_UNKNOWN:
|
|
|
|
return (dgettext(TEXT_DOMAIN, "unknown error"));
|
|
|
|
default:
|
|
|
|
assert(hdl->libzfs_error == 0);
|
|
|
|
return (dgettext(TEXT_DOMAIN, "no error"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
|
|
|
(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
|
|
|
|
fmt, ap);
|
|
|
|
hdl->libzfs_desc_active = 1;
|
|
|
|
|
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
|
|
|
|
fmt, ap);
|
|
|
|
hdl->libzfs_error = error;
|
|
|
|
|
|
|
|
if (hdl->libzfs_desc_active)
|
|
|
|
hdl->libzfs_desc_active = 0;
|
|
|
|
else
|
|
|
|
hdl->libzfs_desc[0] = '\0';
|
|
|
|
|
|
|
|
if (hdl->libzfs_printerr) {
|
|
|
|
if (error == EZFS_UNKNOWN) {
|
|
|
|
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
|
2020-07-31 19:07:37 +03:00
|
|
|
"error: %s: %s\n"), hdl->libzfs_action,
|
|
|
|
libzfs_error_description(hdl));
|
2008-11-20 23:01:55 +03:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
|
|
|
|
libzfs_error_description(hdl));
|
|
|
|
if (error == EZFS_NOMEM)
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
|
|
|
|
{
|
|
|
|
return (zfs_error_fmt(hdl, error, "%s", msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
|
|
|
zfs_verror(hdl, error, fmt, ap);
|
|
|
|
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
|
|
|
|
va_list ap)
|
|
|
|
{
|
|
|
|
switch (error) {
|
|
|
|
case EPERM:
|
|
|
|
case EACCES:
|
|
|
|
zfs_verror(hdl, EZFS_PERM, fmt, ap);
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
case ECANCELED:
|
|
|
|
zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
case EIO:
|
|
|
|
zfs_verror(hdl, EZFS_IO, fmt, ap);
|
|
|
|
return (-1);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
case EFAULT:
|
|
|
|
zfs_verror(hdl, EZFS_FAULT, fmt, ap);
|
|
|
|
return (-1);
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
case EINTR:
|
|
|
|
zfs_verror(hdl, EZFS_INTR, fmt, ap);
|
|
|
|
return (-1);
|
2022-09-16 23:59:25 +03:00
|
|
|
|
|
|
|
case ECKSUM:
|
|
|
|
zfs_verror(hdl, EZFS_CKSUM, fmt, ap);
|
|
|
|
return (-1);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
|
|
|
|
{
|
|
|
|
return (zfs_standard_error_fmt(hdl, error, "%s", msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
|
|
|
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
|
|
|
|
va_end(ap);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (error) {
|
|
|
|
case ENXIO:
|
|
|
|
case ENODEV:
|
2011-11-17 22:14:36 +04:00
|
|
|
case EPIPE:
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_verror(hdl, EZFS_IO, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOENT:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset does not exist"));
|
|
|
|
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOSPC:
|
|
|
|
case EDQUOT:
|
|
|
|
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
|
2016-07-29 14:40:30 +03:00
|
|
|
break;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
case EEXIST:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset already exists"));
|
|
|
|
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EBUSY:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"dataset is busy"));
|
|
|
|
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
|
|
|
|
break;
|
|
|
|
case EROFS:
|
2010-08-27 01:24:34 +04:00
|
|
|
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
|
2008-11-20 23:01:55 +03:00
|
|
|
break;
|
|
|
|
case ENAMETOOLONG:
|
|
|
|
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
|
|
|
|
break;
|
2009-07-03 02:44:48 +04:00
|
|
|
case EAGAIN:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool I/O is currently suspended"));
|
|
|
|
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
|
|
|
|
break;
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
case EREMOTEIO:
|
|
|
|
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
|
|
|
|
break;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 19:48:13 +03:00
|
|
|
case ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE:
|
Add basic zfs ioc input nvpair validation
We want newer versions of libzfs_core to run against an existing
zfs kernel module (i.e. a deferred reboot or module reload after
an update).
Programmatically document, via a zfs_ioc_key_t, the valid arguments
for the ioc commands that rely on nvpair input arguments (i.e. non
legacy commands from libzfs_core). Automatically verify the expected
pairs before dispatching a command.
This initial phase focuses on the non-legacy ioctls. A follow-on
change can address the legacy ioctl input from the zfs_cmd_t.
The zfs_ioc_key_t for zfs_keys_channel_program looks like:
static const zfs_ioc_key_t zfs_keys_channel_program[] = {
{"program", DATA_TYPE_STRING, 0},
{"arg", DATA_TYPE_UNKNOWN, 0},
{"sync", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
{"instrlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"memlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
};
Introduce four input errors to identify specific input failures
(in addition to generic argument value errors like EINVAL, ERANGE,
EBADF, and E2BIG).
ZFS_ERR_IOC_CMD_UNAVAIL the ioctl number is not supported by kernel
ZFS_ERR_IOC_ARG_UNAVAIL an input argument is not supported by kernel
ZFS_ERR_IOC_ARG_REQUIRED a required input argument is missing
ZFS_ERR_IOC_ARG_BADTYPE an input argument has an invalid type
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Don Brady <don.brady@delphix.com>
Closes #7780
2018-09-02 22:14:01 +03:00
|
|
|
case ZFS_ERR_IOC_CMD_UNAVAIL:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
|
|
|
|
"module does not support this operation. A reboot may "
|
|
|
|
"be required to enable this operation."));
|
|
|
|
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_IOC_ARG_UNAVAIL:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
|
|
|
|
"module does not support an option for this operation. "
|
|
|
|
"A reboot may be required to enable this option."));
|
|
|
|
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_IOC_ARG_REQUIRED:
|
|
|
|
case ZFS_ERR_IOC_ARG_BADTYPE:
|
|
|
|
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
|
|
|
break;
|
2019-02-09 02:44:15 +03:00
|
|
|
case ZFS_ERR_WRONG_PARENT:
|
|
|
|
zfs_verror(hdl, EZFS_WRONG_PARENT, fmt, ap);
|
|
|
|
break;
|
2020-08-01 18:41:31 +03:00
|
|
|
case ZFS_ERR_BADPROP:
|
|
|
|
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
|
|
|
|
break;
|
2021-02-21 19:19:43 +03:00
|
|
|
case ZFS_ERR_NOT_USER_NAMESPACE:
|
|
|
|
zfs_verror(hdl, EZFS_NOT_USER_NAMESPACE, fmt, ap);
|
|
|
|
break;
|
2008-11-20 23:01:55 +03:00
|
|
|
default:
|
2021-05-15 13:23:45 +03:00
|
|
|
zfs_error_aux(hdl, "%s", strerror(error));
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
va_end(ap);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2020-06-11 23:38:25 +03:00
|
|
|
void
|
|
|
|
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
|
|
|
|
char *errbuf)
|
|
|
|
{
|
|
|
|
switch (err) {
|
|
|
|
|
|
|
|
case ENOSPC:
|
|
|
|
/*
|
|
|
|
* For quotas and reservations, ENOSPC indicates
|
|
|
|
* something different; setting a quota or reservation
|
|
|
|
* doesn't use any disk space.
|
|
|
|
*/
|
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_QUOTA:
|
|
|
|
case ZFS_PROP_REFQUOTA:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"size is less than current used or "
|
|
|
|
"reserved space"));
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZFS_PROP_RESERVATION:
|
|
|
|
case ZFS_PROP_REFRESERVATION:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"size is greater than available space"));
|
|
|
|
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
(void) zfs_standard_error(hdl, err, errbuf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EBUSY:
|
|
|
|
(void) zfs_standard_error(hdl, EBUSY, errbuf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EROFS:
|
|
|
|
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case E2BIG:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"property value too long"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool and or dataset must be upgraded to set this "
|
|
|
|
"property or value"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ERANGE:
|
|
|
|
if (prop == ZFS_PROP_COMPRESSION ||
|
|
|
|
prop == ZFS_PROP_DNODESIZE ||
|
|
|
|
prop == ZFS_PROP_RECORDSIZE) {
|
|
|
|
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"property setting is not allowed on "
|
|
|
|
"bootable datasets"));
|
|
|
|
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
|
|
|
|
} else if (prop == ZFS_PROP_CHECKSUM ||
|
|
|
|
prop == ZFS_PROP_DEDUP) {
|
|
|
|
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"property setting is not allowed on "
|
|
|
|
"root pools"));
|
|
|
|
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
|
|
|
|
} else {
|
|
|
|
(void) zfs_standard_error(hdl, err, errbuf);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EINVAL:
|
|
|
|
if (prop == ZPROP_INVAL) {
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
} else {
|
|
|
|
(void) zfs_standard_error(hdl, err, errbuf);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2020-08-01 18:41:31 +03:00
|
|
|
case ZFS_ERR_BADPROP:
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
break;
|
|
|
|
|
2020-06-11 23:38:25 +03:00
|
|
|
case EACCES:
|
|
|
|
if (prop == ZFS_PROP_KEYLOCATION) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"keylocation may only be set on encryption roots"));
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
} else {
|
|
|
|
(void) zfs_standard_error(hdl, err, errbuf);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EOVERFLOW:
|
|
|
|
/*
|
|
|
|
* This platform can't address a volume this big.
|
|
|
|
*/
|
|
|
|
#ifdef _ILP32
|
|
|
|
if (prop == ZFS_PROP_VOLSIZE) {
|
|
|
|
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
|
|
|
|
break;
|
|
|
|
}
|
2022-02-15 19:58:59 +03:00
|
|
|
zfs_fallthrough;
|
2020-06-11 23:38:25 +03:00
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
(void) zfs_standard_error(hdl, err, errbuf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
int
|
|
|
|
zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
|
|
|
|
{
|
|
|
|
return (zpool_standard_error_fmt(hdl, error, "%s", msg));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
|
|
|
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
|
|
|
|
va_end(ap);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (error) {
|
|
|
|
case ENODEV:
|
|
|
|
zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOENT:
|
|
|
|
zfs_error_aux(hdl,
|
|
|
|
dgettext(TEXT_DOMAIN, "no such pool or dataset"));
|
|
|
|
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EEXIST:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool already exists"));
|
|
|
|
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EBUSY:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
|
2008-12-03 23:09:06 +03:00
|
|
|
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
|
2008-11-20 23:01:55 +03:00
|
|
|
break;
|
|
|
|
|
OpenZFS 7614, 9064 - zfs device evacuation/removal
OpenZFS 7614 - zfs device evacuation/removal
OpenZFS 9064 - remove_mirror should wait for device removal to complete
This project allows top-level vdevs to be removed from the storage pool
with "zpool remove", reducing the total amount of storage in the pool.
This operation copies all allocated regions of the device to be removed
onto other devices, recording the mapping from old to new location.
After the removal is complete, read and free operations to the removed
(now "indirect") vdev must be remapped and performed at the new location
on disk. The indirect mapping table is kept in memory whenever the pool
is loaded, so there is minimal performance overhead when doing operations
on the indirect vdev.
The size of the in-memory mapping table will be reduced when its entries
become "obsolete" because they are no longer used by any block pointers
in the pool. An entry becomes obsolete when all the blocks that use
it are freed. An entry can also become obsolete when all the snapshots
that reference it are deleted, and the block pointers that reference it
have been "remapped" in all filesystems/zvols (and clones). Whenever an
indirect block is written, all the block pointers in it will be "remapped"
to their new (concrete) locations if possible. This process can be
accelerated by using the "zfs remap" command to proactively rewrite all
indirect blocks that reference indirect (removed) vdevs.
Note that when a device is removed, we do not verify the checksum of
the data that is copied. This makes the process much faster, but if it
were used on redundant vdevs (i.e. mirror or raidz vdevs), it would be
possible to copy the wrong data, when we have the correct data on e.g.
the other side of the mirror.
At the moment, only mirrors and simple top-level vdevs can be removed
and no removal is allowed if any of the top-level vdevs are raidz.
Porting Notes:
* Avoid zero-sized kmem_alloc() in vdev_compact_children().
The device evacuation code adds a dependency that
vdev_compact_children() be able to properly empty the vdev_child
array by setting it to NULL and zeroing vdev_children. Under Linux,
kmem_alloc() and related functions return a sentinel pointer rather
than NULL for zero-sized allocations.
* Remove comment regarding "mpt" driver where zfs_remove_max_segment
is initialized to SPA_MAXBLOCKSIZE.
Change zfs_condense_indirect_commit_entry_delay_ticks to
zfs_condense_indirect_commit_entry_delay_ms for consistency with
most other tunables in which delays are specified in ms.
* ZTS changes:
Use set_tunable rather than mdb
Use zpool sync as appropriate
Use sync_pool instead of sync
Kill jobs during test_removal_with_operation to allow unmount/export
Don't add non-disk names such as "mirror" or "raidz" to $DISKS
Use $TEST_BASE_DIR instead of /tmp
Increase HZ from 100 to 1000 which is more common on Linux
removal_multiple_indirection.ksh
Reduce iterations in order to not time out on the code
coverage builders.
removal_resume_export:
Functionally, the test case is correct but there exists a race
where the kernel thread hasn't been fully started yet and is
not visible. Wait for up to 1 second for the removal thread
to be started before giving up on it. Also, increase the
amount of data copied in order that the removal not finish
before the export has a chance to fail.
* MMP compatibility, the concept of concrete versus non-concrete devices
has slightly changed the semantics of vdev_writeable(). Update
mmp_random_leaf_impl() accordingly.
* Updated dbuf_remap() to handle the org.zfsonlinux:large_dnode pool
feature which is not supported by OpenZFS.
* Added support for new vdev removal tracepoints.
* Test cases removal_with_zdb and removal_condense_export have been
intentionally disabled. When run manually they pass as intended,
but when running in the automated test environment they produce
unreliable results on the latest Fedora release.
They may work better once the upstream pool import refectoring is
merged into ZoL at which point they will be re-enabled.
Authored by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Alex Reece <alex@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Richard Laager <rlaager@wiktel.com>
Reviewed by: Tim Chase <tim@chase2k.com>
Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov>
Approved by: Garrett D'Amore <garrett@damore.org>
Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Tim Chase <tim@chase2k.com>
OpenZFS-issue: https://www.illumos.org/issues/7614
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/f539f1eb
Closes #6900
2016-09-22 19:30:13 +03:00
|
|
|
/* There is no pending operation to cancel */
|
|
|
|
case ENOTACTIVE:
|
|
|
|
zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
case ENXIO:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"one or more devices is currently unavailable"));
|
|
|
|
zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENAMETOOLONG:
|
|
|
|
zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOTSUP:
|
|
|
|
zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EINVAL:
|
|
|
|
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ENOSPC:
|
|
|
|
case EDQUOT:
|
|
|
|
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
|
2022-09-21 01:20:56 +03:00
|
|
|
break;
|
2010-08-27 01:24:34 +04:00
|
|
|
|
2009-07-03 02:44:48 +04:00
|
|
|
case EAGAIN:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"pool I/O is currently suspended"));
|
|
|
|
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
|
|
|
|
break;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
case EROFS:
|
|
|
|
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
|
|
|
|
break;
|
2015-10-11 18:42:42 +03:00
|
|
|
case EDOM:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"block size out of range or does not match"));
|
|
|
|
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
|
|
|
|
break;
|
Multi-modifier protection (MMP)
Add multihost=on|off pool property to control MMP. When enabled
a new thread writes uberblocks to the last slot in each label, at a
set frequency, to indicate to other hosts the pool is actively imported.
These uberblocks are the last synced uberblock with an updated
timestamp. Property defaults to off.
During tryimport, find the "best" uberblock (newest txg and timestamp)
repeatedly, checking for change in the found uberblock. Include the
results of the activity test in the config returned by tryimport.
These results are reported to user in "zpool import".
Allow the user to control the period between MMP writes, and the
duration of the activity test on import, via a new module parameter
zfs_multihost_interval. The period is specified in milliseconds. The
activity test duration is calculated from this value, and from the
mmp_delay in the "best" uberblock found initially.
Add a kstat interface to export statistics about Multiple Modifier
Protection (MMP) updates. Include the last synced txg number, the
timestamp, the delay since the last MMP update, the VDEV GUID, the VDEV
label that received the last MMP update, and the VDEV path. Abbreviated
output below.
$ cat /proc/spl/kstat/zfs/mypool/multihost
31 0 0x01 10 880 105092382393521 105144180101111
txg timestamp mmp_delay vdev_guid vdev_label vdev_path
20468 261337 250274925 68396651780 3 /dev/sda
20468 261339 252023374 6267402363293 1 /dev/sdc
20468 261340 252000858 6698080955233 1 /dev/sdx
20468 261341 251980635 783892869810 2 /dev/sdy
20468 261342 253385953 8923255792467 3 /dev/sdd
20468 261344 253336622 042125143176 0 /dev/sdab
20468 261345 253310522 1200778101278 2 /dev/sde
20468 261346 253286429 0950576198362 2 /dev/sdt
20468 261347 253261545 96209817917 3 /dev/sds
20468 261349 253238188 8555725937673 3 /dev/sdb
Add a new tunable zfs_multihost_history to specify the number of MMP
updates to store history for. By default it is set to zero meaning that
no MMP statistics are stored.
When using ztest to generate activity, for automated tests of the MMP
function, some test functions interfere with the test. For example, the
pool is exported to run zdb and then imported again. Add a new ztest
function, "-M", to alter ztest behavior to prevent this.
Add new tests to verify the new functionality. Tests provided by
Giuseppe Di Natale.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Ned Bass <bass6@llnl.gov>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Closes #745
Closes #6279
2017-07-08 06:20:35 +03:00
|
|
|
case EREMOTEIO:
|
|
|
|
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
|
|
|
|
break;
|
2016-12-17 01:11:29 +03:00
|
|
|
case ZFS_ERR_CHECKPOINT_EXISTS:
|
|
|
|
zfs_verror(hdl, EZFS_CHECKPOINT_EXISTS, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_DISCARDING_CHECKPOINT:
|
|
|
|
zfs_verror(hdl, EZFS_DISCARDING_CHECKPOINT, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_NO_CHECKPOINT:
|
|
|
|
zfs_verror(hdl, EZFS_NO_CHECKPOINT, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_DEVRM_IN_PROGRESS:
|
|
|
|
zfs_verror(hdl, EZFS_DEVRM_IN_PROGRESS, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_VDEV_TOO_BIG:
|
|
|
|
zfs_verror(hdl, EZFS_VDEV_TOO_BIG, fmt, ap);
|
|
|
|
break;
|
2019-07-18 23:02:33 +03:00
|
|
|
case ZFS_ERR_EXPORT_IN_PROGRESS:
|
|
|
|
zfs_verror(hdl, EZFS_EXPORT_IN_PROGRESS, fmt, ap);
|
|
|
|
break;
|
2020-07-03 21:05:50 +03:00
|
|
|
case ZFS_ERR_RESILVER_IN_PROGRESS:
|
|
|
|
zfs_verror(hdl, EZFS_RESILVERING, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_REBUILD_IN_PROGRESS:
|
|
|
|
zfs_verror(hdl, EZFS_REBUILDING, fmt, ap);
|
|
|
|
break;
|
2020-08-01 18:41:31 +03:00
|
|
|
case ZFS_ERR_BADPROP:
|
|
|
|
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
|
|
|
|
break;
|
2021-11-30 17:46:25 +03:00
|
|
|
case ZFS_ERR_VDEV_NOTSUP:
|
|
|
|
zfs_verror(hdl, EZFS_VDEV_NOTSUP, fmt, ap);
|
|
|
|
break;
|
Add basic zfs ioc input nvpair validation
We want newer versions of libzfs_core to run against an existing
zfs kernel module (i.e. a deferred reboot or module reload after
an update).
Programmatically document, via a zfs_ioc_key_t, the valid arguments
for the ioc commands that rely on nvpair input arguments (i.e. non
legacy commands from libzfs_core). Automatically verify the expected
pairs before dispatching a command.
This initial phase focuses on the non-legacy ioctls. A follow-on
change can address the legacy ioctl input from the zfs_cmd_t.
The zfs_ioc_key_t for zfs_keys_channel_program looks like:
static const zfs_ioc_key_t zfs_keys_channel_program[] = {
{"program", DATA_TYPE_STRING, 0},
{"arg", DATA_TYPE_UNKNOWN, 0},
{"sync", DATA_TYPE_BOOLEAN_VALUE, ZK_OPTIONAL},
{"instrlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
{"memlimit", DATA_TYPE_UINT64, ZK_OPTIONAL},
};
Introduce four input errors to identify specific input failures
(in addition to generic argument value errors like EINVAL, ERANGE,
EBADF, and E2BIG).
ZFS_ERR_IOC_CMD_UNAVAIL the ioctl number is not supported by kernel
ZFS_ERR_IOC_ARG_UNAVAIL an input argument is not supported by kernel
ZFS_ERR_IOC_ARG_REQUIRED a required input argument is missing
ZFS_ERR_IOC_ARG_BADTYPE an input argument has an invalid type
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Don Brady <don.brady@delphix.com>
Closes #7780
2018-09-02 22:14:01 +03:00
|
|
|
case ZFS_ERR_IOC_CMD_UNAVAIL:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
|
|
|
|
"module does not support this operation. A reboot may "
|
|
|
|
"be required to enable this operation."));
|
|
|
|
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_IOC_ARG_UNAVAIL:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
|
|
|
|
"module does not support an option for this operation. "
|
|
|
|
"A reboot may be required to enable this option."));
|
|
|
|
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
|
|
|
break;
|
|
|
|
case ZFS_ERR_IOC_ARG_REQUIRED:
|
|
|
|
case ZFS_ERR_IOC_ARG_BADTYPE:
|
|
|
|
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
|
|
|
|
break;
|
2008-11-20 23:01:55 +03:00
|
|
|
default:
|
2021-05-15 13:23:45 +03:00
|
|
|
zfs_error_aux(hdl, "%s", strerror(error));
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
|
|
|
|
}
|
|
|
|
|
|
|
|
va_end(ap);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Display an out of memory error message and abort the current program.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
no_memory(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A safe form of malloc() which will die if the allocation fails.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
zfs_alloc(libzfs_handle_t *hdl, size_t size)
|
|
|
|
{
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
if ((data = calloc(1, size)) == NULL)
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
|
|
|
|
return (data);
|
|
|
|
}
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
/*
|
|
|
|
* A safe form of asprintf() which will die if the allocation fails.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
char *ret;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
|
|
|
err = vasprintf(&ret, fmt, ap);
|
|
|
|
|
|
|
|
va_end(ap);
|
|
|
|
|
2021-05-03 13:30:16 +03:00
|
|
|
if (err < 0) {
|
2010-08-27 01:24:34 +04:00
|
|
|
(void) no_memory(hdl);
|
2021-05-03 13:30:16 +03:00
|
|
|
ret = NULL;
|
|
|
|
}
|
2010-08-27 01:24:34 +04:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* A safe form of realloc(), which also zeroes newly allocated space.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
|
|
|
if ((ret = realloc(ptr, newsize)) == NULL) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2022-02-25 16:26:54 +03:00
|
|
|
memset((char *)ret + oldsize, 0, newsize - oldsize);
|
2008-11-20 23:01:55 +03:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A safe form of strdup() which will die if the allocation fails.
|
|
|
|
*/
|
|
|
|
char *
|
|
|
|
zfs_strdup(libzfs_handle_t *hdl, const char *str)
|
|
|
|
{
|
|
|
|
char *ret;
|
|
|
|
|
|
|
|
if ((ret = strdup(str)) == NULL)
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
|
|
|
|
{
|
|
|
|
hdl->libzfs_printerr = printerr;
|
|
|
|
}
|
|
|
|
|
2017-04-21 19:27:04 +03:00
|
|
|
/*
|
|
|
|
* Read lines from an open file descriptor and store them in an array of
|
|
|
|
* strings until EOF. lines[] will be allocated and populated with all the
|
|
|
|
* lines read. All newlines are replaced with NULL terminators for
|
|
|
|
* convenience. lines[] must be freed after use with libzfs_free_str_array().
|
|
|
|
*
|
|
|
|
* Returns the number of lines read.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
libzfs_read_stdout_from_fd(int fd, char **lines[])
|
|
|
|
{
|
|
|
|
|
|
|
|
FILE *fp;
|
|
|
|
int lines_cnt = 0;
|
|
|
|
size_t len = 0;
|
|
|
|
char *line = NULL;
|
|
|
|
char **tmp_lines = NULL, **tmp;
|
|
|
|
|
|
|
|
fp = fdopen(fd, "r");
|
2021-05-19 15:32:15 +03:00
|
|
|
if (fp == NULL) {
|
|
|
|
close(fd);
|
2017-04-21 19:27:04 +03:00
|
|
|
return (0);
|
2021-05-19 15:32:15 +03:00
|
|
|
}
|
2021-05-19 15:04:19 +03:00
|
|
|
while (getline(&line, &len, fp) != -1) {
|
2017-04-21 19:27:04 +03:00
|
|
|
tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1));
|
|
|
|
if (tmp == NULL) {
|
|
|
|
/* Return the lines we were able to process */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tmp_lines = tmp;
|
|
|
|
|
2021-05-19 15:04:19 +03:00
|
|
|
/* Remove newline if not EOF */
|
|
|
|
if (line[strlen(line) - 1] == '\n')
|
|
|
|
line[strlen(line) - 1] = '\0';
|
|
|
|
|
|
|
|
tmp_lines[lines_cnt] = strdup(line);
|
|
|
|
if (tmp_lines[lines_cnt] == NULL)
|
|
|
|
break;
|
|
|
|
++lines_cnt;
|
2017-04-21 19:27:04 +03:00
|
|
|
}
|
2021-05-19 15:04:19 +03:00
|
|
|
free(line);
|
2017-04-21 19:27:04 +03:00
|
|
|
fclose(fp);
|
|
|
|
*lines = tmp_lines;
|
|
|
|
return (lines_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags,
|
|
|
|
char **lines[], int *lines_cnt)
|
2010-10-07 05:00:55 +04:00
|
|
|
{
|
|
|
|
pid_t pid;
|
2015-05-21 00:39:52 +03:00
|
|
|
int error, devnull_fd;
|
2017-04-21 19:27:04 +03:00
|
|
|
int link[2];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup a pipe between our child and parent process if we're
|
|
|
|
* reading stdout.
|
|
|
|
*/
|
2021-05-19 14:56:24 +03:00
|
|
|
if (lines != NULL && pipe2(link, O_NONBLOCK | O_CLOEXEC) == -1)
|
2019-10-23 23:48:31 +03:00
|
|
|
return (-EPIPE);
|
2010-10-07 05:00:55 +04:00
|
|
|
|
2021-05-08 14:17:04 +03:00
|
|
|
pid = fork();
|
2010-10-07 05:00:55 +04:00
|
|
|
if (pid == 0) {
|
2017-04-21 19:27:04 +03:00
|
|
|
/* Child process */
|
2021-04-08 23:17:38 +03:00
|
|
|
devnull_fd = open("/dev/null", O_WRONLY | O_CLOEXEC);
|
2011-07-29 12:17:46 +04:00
|
|
|
|
|
|
|
if (devnull_fd < 0)
|
|
|
|
_exit(-1);
|
|
|
|
|
2017-04-21 19:27:04 +03:00
|
|
|
if (!(flags & STDOUT_VERBOSE) && (lines == NULL))
|
2011-07-29 12:17:46 +04:00
|
|
|
(void) dup2(devnull_fd, STDOUT_FILENO);
|
2017-04-21 19:27:04 +03:00
|
|
|
else if (lines != NULL) {
|
|
|
|
/* Save the output to lines[] */
|
|
|
|
dup2(link[1], STDOUT_FILENO);
|
|
|
|
}
|
2011-03-07 21:10:20 +03:00
|
|
|
|
|
|
|
if (!(flags & STDERR_VERBOSE))
|
2011-07-29 12:17:46 +04:00
|
|
|
(void) dup2(devnull_fd, STDERR_FILENO);
|
|
|
|
|
2017-04-21 19:27:04 +03:00
|
|
|
if (flags & NO_DEFAULT_PATH) {
|
|
|
|
if (env == NULL)
|
|
|
|
execv(path, argv);
|
|
|
|
else
|
|
|
|
execve(path, argv, env);
|
|
|
|
} else {
|
|
|
|
if (env == NULL)
|
|
|
|
execvp(path, argv);
|
|
|
|
else
|
|
|
|
execvpe(path, argv, env);
|
|
|
|
}
|
|
|
|
|
2010-10-07 05:00:55 +04:00
|
|
|
_exit(-1);
|
|
|
|
} else if (pid > 0) {
|
2017-04-21 19:27:04 +03:00
|
|
|
/* Parent process */
|
2010-10-07 05:00:55 +04:00
|
|
|
int status;
|
|
|
|
|
2015-05-21 00:39:52 +03:00
|
|
|
while ((error = waitpid(pid, &status, 0)) == -1 &&
|
2021-05-19 14:56:24 +03:00
|
|
|
errno == EINTR)
|
|
|
|
;
|
2015-05-21 00:39:52 +03:00
|
|
|
if (error < 0 || !WIFEXITED(status))
|
2013-11-01 23:26:11 +04:00
|
|
|
return (-1);
|
2010-10-07 05:00:55 +04:00
|
|
|
|
2017-04-21 19:27:04 +03:00
|
|
|
if (lines != NULL) {
|
|
|
|
close(link[1]);
|
|
|
|
*lines_cnt = libzfs_read_stdout_from_fd(link[0], lines);
|
|
|
|
}
|
2013-11-01 23:26:11 +04:00
|
|
|
return (WEXITSTATUS(status));
|
2010-10-07 05:00:55 +04:00
|
|
|
}
|
|
|
|
|
2013-11-01 23:26:11 +04:00
|
|
|
return (-1);
|
2010-10-07 05:00:55 +04:00
|
|
|
}
|
|
|
|
|
2017-04-21 19:27:04 +03:00
|
|
|
int
|
|
|
|
libzfs_run_process(const char *path, char *argv[], int flags)
|
|
|
|
{
|
|
|
|
return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Run a command and store its stdout lines in an array of strings (lines[]).
|
|
|
|
* lines[] is allocated and populated for you, and the number of lines is set in
|
|
|
|
* lines_cnt. lines[] must be freed after use with libzfs_free_str_array().
|
|
|
|
* All newlines (\n) in lines[] are terminated for convenience.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
|
|
|
|
char **lines[], int *lines_cnt)
|
|
|
|
{
|
|
|
|
return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Same as libzfs_run_process_get_stdout(), but run without $PATH set. This
|
|
|
|
* means that *path needs to be the full path to the executable.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
|
|
|
|
char *env[], char **lines[], int *lines_cnt)
|
|
|
|
{
|
|
|
|
return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH,
|
|
|
|
lines, lines_cnt));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free an array of strings. Free both the strings contained in the array and
|
|
|
|
* the array itself.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
libzfs_free_str_array(char **strs, int count)
|
|
|
|
{
|
|
|
|
while (--count >= 0)
|
|
|
|
free(strs[count]);
|
|
|
|
|
|
|
|
free(strs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or
|
|
|
|
* a non-zero number.
|
|
|
|
*
|
|
|
|
* Returns 0 otherwise.
|
|
|
|
*/
|
2022-04-19 21:38:30 +03:00
|
|
|
boolean_t
|
|
|
|
libzfs_envvar_is_set(const char *envvar)
|
2017-04-21 19:27:04 +03:00
|
|
|
{
|
|
|
|
char *env = getenv(envvar);
|
2022-04-19 21:38:30 +03:00
|
|
|
return (env && (strtoul(env, NULL, 0) > 0 ||
|
2017-04-21 19:27:04 +03:00
|
|
|
(!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) ||
|
2022-04-19 21:38:30 +03:00
|
|
|
(!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2)));
|
2017-04-21 19:27:04 +03:00
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
libzfs_handle_t *
|
|
|
|
libzfs_init(void)
|
|
|
|
{
|
|
|
|
libzfs_handle_t *hdl;
|
2015-05-21 00:36:37 +03:00
|
|
|
int error;
|
2020-08-25 21:04:20 +03:00
|
|
|
char *env;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2020-10-13 19:38:40 +03:00
|
|
|
if ((error = libzfs_load_module()) != 0) {
|
2015-05-21 00:39:52 +03:00
|
|
|
errno = error;
|
2010-10-07 05:00:55 +04:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2010-08-27 01:24:34 +04:00
|
|
|
if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
|
2008-11-20 23:01:55 +03:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2020-04-28 20:55:18 +03:00
|
|
|
if (regcomp(&hdl->libzfs_urire, URI_REGEX, 0) != 0) {
|
|
|
|
free(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2021-03-10 02:00:43 +03:00
|
|
|
if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR|O_EXCL|O_CLOEXEC)) < 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
free(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2013-08-28 15:45:09 +04:00
|
|
|
if (libzfs_core_init() != 0) {
|
|
|
|
(void) close(hdl->libzfs_fd);
|
|
|
|
free(hdl);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
zfs_prop_init();
|
|
|
|
zpool_prop_init();
|
2012-12-14 03:24:15 +04:00
|
|
|
zpool_feature_init();
|
2021-11-30 17:46:25 +03:00
|
|
|
vdev_prop_init();
|
2009-07-03 02:44:48 +04:00
|
|
|
libzfs_mnttab_init(hdl);
|
2016-09-25 11:35:12 +03:00
|
|
|
fletcher_4_init();
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2018-02-08 19:16:23 +03:00
|
|
|
if (getenv("ZFS_PROP_DEBUG") != NULL) {
|
|
|
|
hdl->libzfs_prop_debug = B_TRUE;
|
|
|
|
}
|
2020-08-25 21:04:20 +03:00
|
|
|
if ((env = getenv("ZFS_SENDRECV_MAX_NVLIST")) != NULL) {
|
|
|
|
if ((error = zfs_nicestrtonum(hdl, env,
|
|
|
|
&hdl->libzfs_max_nvlist))) {
|
|
|
|
errno = error;
|
2020-09-18 20:23:29 +03:00
|
|
|
(void) close(hdl->libzfs_fd);
|
|
|
|
free(hdl);
|
2020-08-25 21:04:20 +03:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
hdl->libzfs_max_nvlist = (SPA_MAXBLOCKSIZE * 4);
|
|
|
|
}
|
2018-02-08 19:16:23 +03:00
|
|
|
|
2018-09-02 22:09:53 +03:00
|
|
|
/*
|
|
|
|
* For testing, remove some settable properties and features
|
|
|
|
*/
|
|
|
|
if (libzfs_envvar_is_set("ZFS_SYSFS_PROP_SUPPORT_TEST")) {
|
|
|
|
zprop_desc_t *proptbl;
|
|
|
|
|
|
|
|
proptbl = zpool_prop_get_table();
|
|
|
|
proptbl[ZPOOL_PROP_COMMENT].pd_zfs_mod_supported = B_FALSE;
|
|
|
|
|
|
|
|
proptbl = zfs_prop_get_table();
|
|
|
|
proptbl[ZFS_PROP_DNODESIZE].pd_zfs_mod_supported = B_FALSE;
|
|
|
|
|
|
|
|
zfeature_info_t *ftbl = spa_feature_table;
|
|
|
|
ftbl[SPA_FEATURE_LARGE_BLOCKS].fi_zfs_mod_supported = B_FALSE;
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
return (hdl);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
libzfs_fini(libzfs_handle_t *hdl)
|
|
|
|
{
|
|
|
|
(void) close(hdl->libzfs_fd);
|
2008-12-03 23:09:06 +03:00
|
|
|
zpool_free_handles(hdl);
|
2008-11-20 23:01:55 +03:00
|
|
|
namespace_clear(hdl);
|
2009-07-03 02:44:48 +04:00
|
|
|
libzfs_mnttab_fini(hdl);
|
2013-08-28 15:45:09 +04:00
|
|
|
libzfs_core_fini();
|
2020-04-28 20:55:18 +03:00
|
|
|
regfree(&hdl->libzfs_urire);
|
2016-09-25 11:35:12 +03:00
|
|
|
fletcher_4_fini();
|
2021-05-13 07:21:35 +03:00
|
|
|
#if LIBFETCH_DYNAMIC
|
|
|
|
if (hdl->libfetch != (void *)-1 && hdl->libfetch != NULL)
|
|
|
|
(void) dlclose(hdl->libfetch);
|
|
|
|
free(hdl->libfetch_load_error);
|
|
|
|
#endif
|
2008-11-20 23:01:55 +03:00
|
|
|
free(hdl);
|
|
|
|
}
|
|
|
|
|
|
|
|
libzfs_handle_t *
|
|
|
|
zpool_get_handle(zpool_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zpool_hdl);
|
|
|
|
}
|
|
|
|
|
|
|
|
libzfs_handle_t *
|
|
|
|
zfs_get_handle(zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zfs_hdl);
|
|
|
|
}
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
zpool_handle_t *
|
|
|
|
zfs_get_pool_handle(const zfs_handle_t *zhp)
|
|
|
|
{
|
|
|
|
return (zhp->zpool_hdl);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Given a name, determine whether or not it's a valid path
|
|
|
|
* (starts with '/' or "./"). If so, walk the mnttab trying
|
|
|
|
* to match the device number. If not, treat the path as an
|
2017-01-27 01:42:15 +03:00
|
|
|
* fs/vol/snap/bkmark name.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
zfs_handle_t *
|
2020-07-22 21:14:20 +03:00
|
|
|
zfs_path_to_zhandle(libzfs_handle_t *hdl, const char *path, zfs_type_t argtype)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
struct stat64 statbuf;
|
|
|
|
struct extmnttab entry;
|
|
|
|
|
|
|
|
if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
|
|
|
|
/*
|
|
|
|
* It's not a valid path, assume it's a name of type 'argtype'.
|
|
|
|
*/
|
|
|
|
return (zfs_open(hdl, path, argtype));
|
|
|
|
}
|
|
|
|
|
2019-10-02 20:39:48 +03:00
|
|
|
if (getextmntent(path, &entry, &statbuf) != 0)
|
2008-11-20 23:01:55 +03:00
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
|
|
|
|
(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
|
|
|
|
path);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
|
|
|
|
* an ioctl().
|
|
|
|
*/
|
2022-03-16 21:51:28 +03:00
|
|
|
void
|
2008-11-20 23:01:55 +03:00
|
|
|
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
|
|
|
|
{
|
|
|
|
if (len == 0)
|
2019-07-30 19:59:38 +03:00
|
|
|
len = 256 * 1024;
|
2008-11-20 23:01:55 +03:00
|
|
|
zc->zc_nvlist_dst_size = len;
|
2015-07-06 02:11:09 +03:00
|
|
|
zc->zc_nvlist_dst =
|
|
|
|
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
|
|
|
|
* expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
|
|
|
|
* filled in by the kernel to indicate the actual required size.
|
|
|
|
*/
|
2022-03-16 21:51:28 +03:00
|
|
|
void
|
2008-11-20 23:01:55 +03:00
|
|
|
zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
|
|
|
|
{
|
|
|
|
free((void *)(uintptr_t)zc->zc_nvlist_dst);
|
2015-07-06 02:11:09 +03:00
|
|
|
zc->zc_nvlist_dst =
|
|
|
|
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called to free the src and dst nvlists stored in the command structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
zcmd_free_nvlists(zfs_cmd_t *zc)
|
|
|
|
{
|
|
|
|
free((void *)(uintptr_t)zc->zc_nvlist_conf);
|
|
|
|
free((void *)(uintptr_t)zc->zc_nvlist_src);
|
|
|
|
free((void *)(uintptr_t)zc->zc_nvlist_dst);
|
2015-07-06 02:11:09 +03:00
|
|
|
zc->zc_nvlist_conf = 0;
|
|
|
|
zc->zc_nvlist_src = 0;
|
|
|
|
zc->zc_nvlist_dst = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2022-03-16 21:51:28 +03:00
|
|
|
static void
|
2008-11-20 23:01:55 +03:00
|
|
|
zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
|
|
|
|
nvlist_t *nvl)
|
|
|
|
{
|
|
|
|
char *packed;
|
|
|
|
|
2022-03-15 01:44:56 +03:00
|
|
|
size_t len = fnvlist_size(nvl);
|
2022-03-16 21:51:28 +03:00
|
|
|
packed = zfs_alloc(hdl, len);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
|
|
|
|
|
|
|
|
*outnv = (uint64_t)(uintptr_t)packed;
|
|
|
|
*outlen = len;
|
|
|
|
}
|
|
|
|
|
2022-03-16 21:51:28 +03:00
|
|
|
void
|
2008-11-20 23:01:55 +03:00
|
|
|
zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
|
|
|
|
{
|
2022-03-16 21:51:28 +03:00
|
|
|
zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
|
|
|
|
&zc->zc_nvlist_conf_size, nvl);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
2022-03-16 21:51:28 +03:00
|
|
|
void
|
2008-11-20 23:01:55 +03:00
|
|
|
zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
|
|
|
|
{
|
2022-03-16 21:51:28 +03:00
|
|
|
zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
|
|
|
|
&zc->zc_nvlist_src_size, nvl);
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unpacks an nvlist from the ZFS ioctl command structure.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
|
|
|
|
{
|
|
|
|
if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
|
|
|
|
zc->zc_nvlist_dst_size, nvlp, 0) != 0)
|
|
|
|
return (no_memory(hdl));
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ================================================================
|
|
|
|
* API shared by zfs and zpool property management
|
|
|
|
* ================================================================
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
|
|
|
|
{
|
Cleanup: Address Clang's static analyzer's unused code complaints
These were categorized as the following:
* Dead assignment 23
* Dead increment 4
* Dead initialization 6
* Dead nested assignment 18
Most of these are harmless, but since actual issues can hide among them,
we correct them.
That said, there were a few return values that were being ignored that
appeared to merit some correction:
* `destroy_callback()` in `cmd/zfs/zfs_main.c` ignored the error from
`destroy_batched()`. We handle it by returning -1 if there is an
error.
* `zfs_do_upgrade()` in `cmd/zfs/zfs_main.c` ignored the error from
`zfs_for_each()`. We handle it by doing a binary OR of the error
value from the subsequent `zfs_for_each()` call to the existing
value. This is how errors are mostly handled inside `zfs_for_each()`.
The error value here is passed to exit from the zfs command, so doing
a binary or on it is better than what we did previously.
* `get_zap_prop()` in `module/zfs/zcp_get.c` ignored the error from
`dsl_prop_get_ds()` when the property is not of type string. We
return an error when it does. There is a small concern that the
`zfs_get_temporary_prop()` call would handle things, but in the case
that it does not, we would be pushing an uninitialized numval onto
the lua stack. It is expected that `dsl_prop_get_ds()` will succeed
anytime that `zfs_get_temporary_prop()` does, so that not giving it a
chance to fix things is not a problem.
* `draid_merge_impl()` in `tests/zfs-tests/cmd/draid.c` used
`nvlist_add_nvlist()` twice in ways in which errors are expected to
be impossible, so we switch to `fnvlist_add_nvlist()`.
A few notable ones did not merit use of the return value, so we
suppressed it with `(void)`:
* `write_free_diffs()` in `lib/libzfs/libzfs_diff.c` ignored the error
value from `describe_free()`. A look through the commit history
revealed that this was intentional.
* `arc_evict_hdr()` in `module/zfs/arc.c` did not need to use the
returned handle from `arc_hdr_realloc()` because it is already
referenced in lists.
* `spa_vdev_detach()` in `module/zfs/spa.c` has a comment explicitly
saying not to use the error from `vdev_label_init()` because whatever
causes the error could be the reason why a detach is being done.
Unfortunately, I am not presently able to analyze the kernel modules
with Clang's static analyzer, so I could have missed some cases of this.
In cases where reports were present in code that is duplicated between
Linux and FreeBSD, I made a conscious effort to fix the FreeBSD version
too.
After this commit is merged, regressions like dee8934 should become
extremely obvious with Clang's static analyzer since a regression would
appear in the results as the only instance of unused code. That assumes
that Coverity does not catch the issue first.
My local branch with fixes from all of my outstanding non-draft pull
requests shows 118 reports from Clang's static anlayzer after this
patch. That is down by 51 from 169.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Cedric Berger <cedric@precidata.com>
Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Closes #13986
2022-10-14 23:37:54 +03:00
|
|
|
zprop_list_t *pl;
|
2008-11-20 23:01:55 +03:00
|
|
|
int i;
|
|
|
|
char *title;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
cbp->cb_first = B_FALSE;
|
|
|
|
if (cbp->cb_scripted)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start with the length of the column headers.
|
|
|
|
*/
|
|
|
|
cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
|
|
|
|
cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
|
|
|
|
"PROPERTY"));
|
|
|
|
cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
|
|
|
|
"VALUE"));
|
2010-05-29 00:45:14 +04:00
|
|
|
cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
|
|
|
|
"RECEIVED"));
|
2008-11-20 23:01:55 +03:00
|
|
|
cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
|
|
|
|
"SOURCE"));
|
|
|
|
|
2009-01-16 00:59:39 +03:00
|
|
|
/* first property is always NAME */
|
|
|
|
assert(cbp->cb_proplist->pl_prop ==
|
2021-11-30 17:46:25 +03:00
|
|
|
((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
|
|
|
|
((type == ZFS_TYPE_VDEV) ? VDEV_PROP_NAME : ZFS_PROP_NAME)));
|
2009-01-16 00:59:39 +03:00
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Go through and calculate the widths for each column. For the
|
|
|
|
* 'source' column, we kludge it up by taking the worst-case scenario of
|
|
|
|
* inheriting from the longest name. This is acceptable because in the
|
|
|
|
* majority of cases 'SOURCE' is the last column displayed, and we don't
|
|
|
|
* use the width anyway. Note that the 'VALUE' column can be oversized,
|
2010-05-29 00:45:14 +04:00
|
|
|
* if the name of the property is much longer than any values we find.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
|
|
|
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
|
|
|
|
/*
|
|
|
|
* 'PROPERTY' column
|
|
|
|
*/
|
2022-06-14 21:27:53 +03:00
|
|
|
if (pl->pl_prop != ZPROP_USERPROP) {
|
2008-11-20 23:01:55 +03:00
|
|
|
const char *propname = (type == ZFS_TYPE_POOL) ?
|
|
|
|
zpool_prop_to_name(pl->pl_prop) :
|
2021-11-30 17:46:25 +03:00
|
|
|
((type == ZFS_TYPE_VDEV) ?
|
|
|
|
vdev_prop_to_name(pl->pl_prop) :
|
|
|
|
zfs_prop_to_name(pl->pl_prop));
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2021-11-30 17:46:25 +03:00
|
|
|
assert(propname != NULL);
|
2008-11-20 23:01:55 +03:00
|
|
|
len = strlen(propname);
|
|
|
|
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
|
|
|
|
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
|
|
|
|
} else {
|
2021-11-30 17:46:25 +03:00
|
|
|
assert(pl->pl_user_prop != NULL);
|
2008-11-20 23:01:55 +03:00
|
|
|
len = strlen(pl->pl_user_prop);
|
|
|
|
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
|
|
|
|
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-01-16 00:59:39 +03:00
|
|
|
* 'VALUE' column. The first property is always the 'name'
|
|
|
|
* property that was tacked on either by /sbin/zfs's
|
|
|
|
* zfs_do_get() or when calling zprop_expand_list(), so we
|
|
|
|
* ignore its width. If the user specified the name property
|
|
|
|
* to display, then it will be later in the list in any case.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2009-01-16 00:59:39 +03:00
|
|
|
if (pl != cbp->cb_proplist &&
|
2008-11-20 23:01:55 +03:00
|
|
|
pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
|
|
|
|
cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/* 'RECEIVED' column. */
|
|
|
|
if (pl != cbp->cb_proplist &&
|
|
|
|
pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
|
|
|
|
cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* 'NAME' and 'SOURCE' columns
|
|
|
|
*/
|
2021-11-30 17:46:25 +03:00
|
|
|
if (pl->pl_prop == ((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
|
|
|
|
((type == ZFS_TYPE_VDEV) ? VDEV_PROP_NAME :
|
|
|
|
ZFS_PROP_NAME)) && pl->pl_width >
|
|
|
|
cbp->cb_colwidths[GET_COL_NAME]) {
|
2008-11-20 23:01:55 +03:00
|
|
|
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
|
|
|
|
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
|
|
|
|
strlen(dgettext(TEXT_DOMAIN, "inherited from"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now go through and print the headers.
|
|
|
|
*/
|
2010-05-29 00:45:14 +04:00
|
|
|
for (i = 0; i < ZFS_GET_NCOLS; i++) {
|
2008-11-20 23:01:55 +03:00
|
|
|
switch (cbp->cb_columns[i]) {
|
|
|
|
case GET_COL_NAME:
|
|
|
|
title = dgettext(TEXT_DOMAIN, "NAME");
|
|
|
|
break;
|
|
|
|
case GET_COL_PROPERTY:
|
|
|
|
title = dgettext(TEXT_DOMAIN, "PROPERTY");
|
|
|
|
break;
|
|
|
|
case GET_COL_VALUE:
|
|
|
|
title = dgettext(TEXT_DOMAIN, "VALUE");
|
|
|
|
break;
|
2010-05-29 00:45:14 +04:00
|
|
|
case GET_COL_RECVD:
|
|
|
|
title = dgettext(TEXT_DOMAIN, "RECEIVED");
|
|
|
|
break;
|
2008-11-20 23:01:55 +03:00
|
|
|
case GET_COL_SOURCE:
|
|
|
|
title = dgettext(TEXT_DOMAIN, "SOURCE");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
title = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (title != NULL) {
|
2010-05-29 00:45:14 +04:00
|
|
|
if (i == (ZFS_GET_NCOLS - 1) ||
|
|
|
|
cbp->cb_columns[i + 1] == GET_COL_NONE)
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("%s", title);
|
|
|
|
else
|
|
|
|
(void) printf("%-*s ",
|
|
|
|
cbp->cb_colwidths[cbp->cb_columns[i]],
|
|
|
|
title);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
(void) printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Display a single line of output, according to the settings in the callback
|
|
|
|
* structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
|
|
|
|
const char *propname, const char *value, zprop_source_t sourcetype,
|
2010-05-29 00:45:14 +04:00
|
|
|
const char *source, const char *recvd_value)
|
2008-11-20 23:01:55 +03:00
|
|
|
{
|
|
|
|
int i;
|
2010-08-26 20:58:04 +04:00
|
|
|
const char *str = NULL;
|
2008-11-20 23:01:55 +03:00
|
|
|
char buf[128];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore those source types that the user has chosen to ignore.
|
|
|
|
*/
|
|
|
|
if ((sourcetype & cbp->cb_sources) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (cbp->cb_first)
|
|
|
|
zprop_print_headers(cbp, cbp->cb_type);
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
for (i = 0; i < ZFS_GET_NCOLS; i++) {
|
2008-11-20 23:01:55 +03:00
|
|
|
switch (cbp->cb_columns[i]) {
|
|
|
|
case GET_COL_NAME:
|
|
|
|
str = name;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GET_COL_PROPERTY:
|
|
|
|
str = propname;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GET_COL_VALUE:
|
|
|
|
str = value;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case GET_COL_SOURCE:
|
|
|
|
switch (sourcetype) {
|
|
|
|
case ZPROP_SRC_NONE:
|
|
|
|
str = "-";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZPROP_SRC_DEFAULT:
|
|
|
|
str = "default";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZPROP_SRC_LOCAL:
|
|
|
|
str = "local";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZPROP_SRC_TEMPORARY:
|
|
|
|
str = "temporary";
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ZPROP_SRC_INHERITED:
|
|
|
|
(void) snprintf(buf, sizeof (buf),
|
|
|
|
"inherited from %s", source);
|
|
|
|
str = buf;
|
|
|
|
break;
|
2010-05-29 00:45:14 +04:00
|
|
|
case ZPROP_SRC_RECEIVED:
|
|
|
|
str = "received";
|
|
|
|
break;
|
2017-02-08 01:02:27 +03:00
|
|
|
|
|
|
|
default:
|
|
|
|
str = NULL;
|
|
|
|
assert(!"unhandled zprop_source_t");
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
case GET_COL_RECVD:
|
|
|
|
str = (recvd_value == NULL ? "-" : recvd_value);
|
|
|
|
break;
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
default:
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-22 06:20:22 +03:00
|
|
|
if (i == (ZFS_GET_NCOLS - 1) ||
|
|
|
|
cbp->cb_columns[i + 1] == GET_COL_NONE)
|
2008-11-20 23:01:55 +03:00
|
|
|
(void) printf("%s", str);
|
|
|
|
else if (cbp->cb_scripted)
|
|
|
|
(void) printf("%s\t", str);
|
|
|
|
else
|
|
|
|
(void) printf("%-*s ",
|
|
|
|
cbp->cb_colwidths[cbp->cb_columns[i]],
|
|
|
|
str);
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a numeric suffix, convert the value into a number of bits that the
|
|
|
|
* resulting value must be shifted.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
str2shift(libzfs_handle_t *hdl, const char *buf)
|
|
|
|
{
|
|
|
|
const char *ends = "BKMGTPEZ";
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (buf[0] == '\0')
|
|
|
|
return (0);
|
|
|
|
for (i = 0; i < strlen(ends); i++) {
|
|
|
|
if (toupper(buf[0]) == ends[i])
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == strlen(ends)) {
|
2012-11-29 09:56:07 +04:00
|
|
|
if (hdl)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid numeric suffix '%s'"), buf);
|
2008-11-20 23:01:55 +03:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2011-04-11 03:08:53 +04:00
|
|
|
* Allow 'G' = 'GB' = 'GiB', case-insensitively.
|
|
|
|
* However, 'BB' and 'BiB' are disallowed.
|
2008-11-20 23:01:55 +03:00
|
|
|
*/
|
2011-04-11 03:08:53 +04:00
|
|
|
if (buf[1] == '\0' ||
|
|
|
|
(toupper(buf[0]) != 'B' &&
|
2013-11-01 23:26:11 +04:00
|
|
|
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
|
|
|
|
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
|
|
|
|
buf[3] == '\0'))))
|
|
|
|
return (10 * i);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
2012-11-29 09:56:07 +04:00
|
|
|
if (hdl)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid numeric suffix '%s'"), buf);
|
2008-11-20 23:01:55 +03:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert a string of the form '100G' into a real number. Used when setting
|
|
|
|
* properties or creating a volume. 'buf' is used to place an extended error
|
|
|
|
* message for the caller to use.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
|
|
|
|
{
|
|
|
|
char *end;
|
|
|
|
int shift;
|
|
|
|
|
|
|
|
*num = 0;
|
|
|
|
|
|
|
|
/* Check to see if this looks like a number. */
|
|
|
|
if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
|
|
|
|
if (hdl)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"bad numeric value '%s'"), value);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2010-05-29 00:45:14 +04:00
|
|
|
/* Rely on strtoull() to process the numeric portion. */
|
2008-11-20 23:01:55 +03:00
|
|
|
errno = 0;
|
2009-02-18 23:51:31 +03:00
|
|
|
*num = strtoull(value, &end, 10);
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for ERANGE, which indicates that the value is too large to fit
|
|
|
|
* in a 64-bit value.
|
|
|
|
*/
|
|
|
|
if (errno == ERANGE) {
|
|
|
|
if (hdl)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"numeric value is too large"));
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a decimal value, then do the computation with floating
|
|
|
|
* point arithmetic. Otherwise, use standard arithmetic.
|
|
|
|
*/
|
|
|
|
if (*end == '.') {
|
|
|
|
double fval = strtod(value, &end);
|
|
|
|
|
|
|
|
if ((shift = str2shift(hdl, end)) == -1)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
fval *= pow(2, shift);
|
|
|
|
|
2020-03-16 21:56:29 +03:00
|
|
|
/*
|
|
|
|
* UINT64_MAX is not exactly representable as a double.
|
|
|
|
* The closest representation is UINT64_MAX + 1, so we
|
|
|
|
* use a >= comparison instead of > for the bounds check.
|
|
|
|
*/
|
|
|
|
if (fval >= (double)UINT64_MAX) {
|
2008-11-20 23:01:55 +03:00
|
|
|
if (hdl)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"numeric value is too large"));
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
*num = (uint64_t)fval;
|
|
|
|
} else {
|
|
|
|
if ((shift = str2shift(hdl, end)) == -1)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
/* Check for overflow */
|
|
|
|
if (shift >= 64 || (*num << shift) >> shift != *num) {
|
|
|
|
if (hdl)
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"numeric value is too large"));
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
*num <<= shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a propname=value nvpair to set, parse any numeric properties
|
|
|
|
* (index, boolean, etc) if they are specified as strings and add the
|
|
|
|
* resulting nvpair to the returned nvlist.
|
|
|
|
*
|
|
|
|
* At the DSL layer, all properties are either 64-bit numbers or strings.
|
|
|
|
* We want the user to be able to ignore this fact and specify properties
|
|
|
|
* as native values (numbers, for example) or as strings (to simplify
|
|
|
|
* command line utilities). This also handles converting index types
|
|
|
|
* (compression, checksum, etc) from strings to their on-disk index.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
|
2023-03-11 21:39:24 +03:00
|
|
|
zfs_type_t type, nvlist_t *ret, const char **svalp, uint64_t *ivalp,
|
2008-11-20 23:01:55 +03:00
|
|
|
const char *errbuf)
|
|
|
|
{
|
|
|
|
data_type_t datatype = nvpair_type(elem);
|
|
|
|
zprop_type_t proptype;
|
|
|
|
const char *propname;
|
2023-03-11 21:39:24 +03:00
|
|
|
const char *value;
|
2008-11-20 23:01:55 +03:00
|
|
|
boolean_t isnone = B_FALSE;
|
2018-04-11 19:14:45 +03:00
|
|
|
boolean_t isauto = B_FALSE;
|
2016-10-14 21:00:47 +03:00
|
|
|
int err = 0;
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
if (type == ZFS_TYPE_POOL) {
|
|
|
|
proptype = zpool_prop_get_type(prop);
|
|
|
|
propname = zpool_prop_to_name(prop);
|
2021-11-30 17:46:25 +03:00
|
|
|
} else if (type == ZFS_TYPE_VDEV) {
|
|
|
|
proptype = vdev_prop_get_type(prop);
|
|
|
|
propname = vdev_prop_to_name(prop);
|
2008-11-20 23:01:55 +03:00
|
|
|
} else {
|
|
|
|
proptype = zfs_prop_get_type(prop);
|
|
|
|
propname = zfs_prop_to_name(prop);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert any properties to the internal DSL value types.
|
|
|
|
*/
|
|
|
|
*svalp = NULL;
|
|
|
|
*ivalp = 0;
|
|
|
|
|
|
|
|
switch (proptype) {
|
|
|
|
case PROP_TYPE_STRING:
|
|
|
|
if (datatype != DATA_TYPE_STRING) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be a string"), nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
2016-10-14 21:00:47 +03:00
|
|
|
err = nvpair_value_string(elem, svalp);
|
|
|
|
if (err != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' is invalid"), nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
2008-11-20 23:01:55 +03:00
|
|
|
if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' is too long"), nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PROP_TYPE_NUMBER:
|
|
|
|
if (datatype == DATA_TYPE_STRING) {
|
|
|
|
(void) nvpair_value_string(elem, &value);
|
|
|
|
if (strcmp(value, "none") == 0) {
|
|
|
|
isnone = B_TRUE;
|
2018-04-11 19:14:45 +03:00
|
|
|
} else if (strcmp(value, "auto") == 0) {
|
|
|
|
isauto = B_TRUE;
|
|
|
|
} else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) {
|
2008-11-20 23:01:55 +03:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
} else if (datatype == DATA_TYPE_UINT64) {
|
|
|
|
(void) nvpair_value_uint64(elem, ivalp);
|
|
|
|
} else {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be a number"), nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Quota special: force 'none' and don't allow 0.
|
|
|
|
*/
|
|
|
|
if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
|
|
|
|
(prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"use 'none' to disable quota/refquota"));
|
|
|
|
goto error;
|
|
|
|
}
|
2015-04-01 16:07:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Special handling for "*_limit=none". In this case it's not
|
|
|
|
* 0 but UINT64_MAX.
|
|
|
|
*/
|
|
|
|
if ((type & ZFS_TYPE_DATASET) && isnone &&
|
|
|
|
(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
|
|
|
|
prop == ZFS_PROP_SNAPSHOT_LIMIT)) {
|
|
|
|
*ivalp = UINT64_MAX;
|
|
|
|
}
|
2023-01-24 00:14:25 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Special handling for "checksum_*=none". In this case it's not
|
|
|
|
* 0 but UINT64_MAX.
|
|
|
|
*/
|
|
|
|
if ((type & ZFS_TYPE_VDEV) && isnone &&
|
|
|
|
(prop == VDEV_PROP_CHECKSUM_N ||
|
|
|
|
prop == VDEV_PROP_CHECKSUM_T ||
|
|
|
|
prop == VDEV_PROP_IO_N ||
|
|
|
|
prop == VDEV_PROP_IO_T)) {
|
|
|
|
*ivalp = UINT64_MAX;
|
|
|
|
}
|
2018-04-11 19:14:45 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Special handling for setting 'refreservation' to 'auto'. Use
|
|
|
|
* UINT64_MAX to tell the caller to use zfs_fix_auto_resv().
|
|
|
|
* 'auto' is only allowed on volumes.
|
|
|
|
*/
|
|
|
|
if (isauto) {
|
|
|
|
switch (prop) {
|
|
|
|
case ZFS_PROP_REFRESERVATION:
|
|
|
|
if ((type & ZFS_TYPE_VOLUME) == 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s=auto' only allowed on "
|
|
|
|
"volumes"), nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
*ivalp = UINT64_MAX;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'auto' is invalid value for '%s'"),
|
|
|
|
nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PROP_TYPE_INDEX:
|
|
|
|
if (datatype != DATA_TYPE_STRING) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be a string"), nvpair_name(elem));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) nvpair_value_string(elem, &value);
|
|
|
|
|
|
|
|
if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"'%s' must be one of '%s'"), propname,
|
|
|
|
zprop_values(prop, type));
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the result to our return set of properties.
|
|
|
|
*/
|
|
|
|
if (*svalp != NULL) {
|
|
|
|
if (nvlist_add_string(ret, propname, *svalp) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
|
|
|
|
(void) no_memory(hdl);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
error:
|
|
|
|
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
static int
|
2022-01-23 02:12:27 +03:00
|
|
|
addlist(libzfs_handle_t *hdl, const char *propname, zprop_list_t **listp,
|
2008-12-03 23:09:06 +03:00
|
|
|
zfs_type_t type)
|
|
|
|
{
|
2022-01-23 02:12:27 +03:00
|
|
|
int prop = zprop_name_to_prop(propname, type);
|
2014-04-21 22:22:08 +04:00
|
|
|
if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE))
|
2008-12-03 23:09:06 +03:00
|
|
|
prop = ZPROP_INVAL;
|
|
|
|
|
|
|
|
/*
|
2021-11-30 17:46:25 +03:00
|
|
|
* Return failure if no property table entry was found and this isn't
|
|
|
|
* a user-defined property.
|
2008-12-03 23:09:06 +03:00
|
|
|
*/
|
2022-06-14 21:27:53 +03:00
|
|
|
if (prop == ZPROP_USERPROP && ((type == ZFS_TYPE_POOL &&
|
2023-04-21 20:20:36 +03:00
|
|
|
!zfs_prop_user(propname) &&
|
2012-12-14 03:24:15 +04:00
|
|
|
!zpool_prop_feature(propname) &&
|
|
|
|
!zpool_prop_unsupported(propname)) ||
|
2021-11-30 17:46:25 +03:00
|
|
|
((type == ZFS_TYPE_DATASET) && !zfs_prop_user(propname) &&
|
|
|
|
!zfs_prop_userquota(propname) && !zfs_prop_written(propname)) ||
|
|
|
|
((type == ZFS_TYPE_VDEV) && !vdev_prop_user(propname)))) {
|
2008-12-03 23:09:06 +03:00
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"invalid property '%s'"), propname);
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP,
|
|
|
|
dgettext(TEXT_DOMAIN, "bad property list")));
|
|
|
|
}
|
|
|
|
|
2022-01-23 02:12:27 +03:00
|
|
|
zprop_list_t *entry = zfs_alloc(hdl, sizeof (*entry));
|
|
|
|
|
2008-12-03 23:09:06 +03:00
|
|
|
entry->pl_prop = prop;
|
2022-06-14 21:27:53 +03:00
|
|
|
if (prop == ZPROP_USERPROP) {
|
2022-02-20 06:06:43 +03:00
|
|
|
entry->pl_user_prop = zfs_strdup(hdl, propname);
|
2008-12-03 23:09:06 +03:00
|
|
|
entry->pl_width = strlen(propname);
|
|
|
|
} else {
|
|
|
|
entry->pl_width = zprop_width(prop, &entry->pl_fixed,
|
|
|
|
type);
|
|
|
|
}
|
|
|
|
|
|
|
|
*listp = entry;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-11-20 23:01:55 +03:00
|
|
|
/*
|
|
|
|
* Given a comma-separated list of properties, construct a property list
|
|
|
|
* containing both user-defined and native properties. This function will
|
|
|
|
* return a NULL list if 'all' is specified, which can later be expanded
|
|
|
|
* by zprop_expand_list().
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
|
|
|
|
zfs_type_t type)
|
|
|
|
{
|
|
|
|
*listp = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If 'all' is specified, return a NULL list.
|
|
|
|
*/
|
|
|
|
if (strcmp(props, "all") == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If no props were specified, return an error.
|
|
|
|
*/
|
|
|
|
if (props[0] == '\0') {
|
|
|
|
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
|
|
|
|
"no properties specified"));
|
|
|
|
return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
|
|
|
|
"bad property list")));
|
|
|
|
}
|
|
|
|
|
2022-01-23 02:12:27 +03:00
|
|
|
for (char *p; (p = strsep(&props, ",")); )
|
|
|
|
if (strcmp(p, "space") == 0) {
|
|
|
|
static const char *const spaceprops[] = {
|
2008-12-03 23:09:06 +03:00
|
|
|
"name", "avail", "used", "usedbysnapshots",
|
|
|
|
"usedbydataset", "usedbyrefreservation",
|
2022-01-23 02:12:27 +03:00
|
|
|
"usedbychildren"
|
2008-12-03 23:09:06 +03:00
|
|
|
};
|
|
|
|
|
2022-01-23 02:12:27 +03:00
|
|
|
for (int i = 0; i < ARRAY_SIZE(spaceprops); i++) {
|
2008-12-03 23:09:06 +03:00
|
|
|
if (addlist(hdl, spaceprops[i], listp, type))
|
|
|
|
return (-1);
|
|
|
|
listp = &(*listp)->pl_next;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
} else {
|
2022-01-23 02:12:27 +03:00
|
|
|
if (addlist(hdl, p, listp, type))
|
2008-12-03 23:09:06 +03:00
|
|
|
return (-1);
|
|
|
|
listp = &(*listp)->pl_next;
|
2008-11-20 23:01:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
zprop_free_list(zprop_list_t *pl)
|
|
|
|
{
|
|
|
|
zprop_list_t *next;
|
|
|
|
|
|
|
|
while (pl != NULL) {
|
|
|
|
next = pl->pl_next;
|
|
|
|
free(pl->pl_user_prop);
|
|
|
|
free(pl);
|
|
|
|
pl = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct expand_data {
|
|
|
|
zprop_list_t **last;
|
|
|
|
libzfs_handle_t *hdl;
|
|
|
|
zfs_type_t type;
|
|
|
|
} expand_data_t;
|
|
|
|
|
2020-06-15 21:30:37 +03:00
|
|
|
static int
|
2008-11-20 23:01:55 +03:00
|
|
|
zprop_expand_list_cb(int prop, void *cb)
|
|
|
|
{
|
|
|
|
zprop_list_t *entry;
|
|
|
|
expand_data_t *edp = cb;
|
|
|
|
|
2022-03-16 21:51:28 +03:00
|
|
|
entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t));
|
2008-11-20 23:01:55 +03:00
|
|
|
|
|
|
|
entry->pl_prop = prop;
|
|
|
|
entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
|
|
|
|
entry->pl_all = B_TRUE;
|
|
|
|
|
|
|
|
*(edp->last) = entry;
|
|
|
|
edp->last = &entry->pl_next;
|
|
|
|
|
|
|
|
return (ZPROP_CONT);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
|
|
|
|
{
|
|
|
|
zprop_list_t *entry;
|
|
|
|
zprop_list_t **last;
|
|
|
|
expand_data_t exp;
|
|
|
|
|
|
|
|
if (*plp == NULL) {
|
|
|
|
/*
|
|
|
|
* If this is the very first time we've been called for an 'all'
|
|
|
|
* specification, expand the list to include all native
|
|
|
|
* properties.
|
|
|
|
*/
|
|
|
|
last = plp;
|
|
|
|
|
|
|
|
exp.last = last;
|
|
|
|
exp.hdl = hdl;
|
|
|
|
exp.type = type;
|
|
|
|
|
|
|
|
if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
|
|
|
|
B_FALSE, type) == ZPROP_INVAL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add 'name' to the beginning of the list, which is handled
|
|
|
|
* specially.
|
|
|
|
*/
|
2022-02-20 06:06:43 +03:00
|
|
|
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
|
2021-11-30 17:46:25 +03:00
|
|
|
entry->pl_prop = ((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
|
|
|
|
((type == ZFS_TYPE_VDEV) ? VDEV_PROP_NAME : ZFS_PROP_NAME));
|
2008-11-20 23:01:55 +03:00
|
|
|
entry->pl_width = zprop_width(entry->pl_prop,
|
|
|
|
&entry->pl_fixed, type);
|
|
|
|
entry->pl_all = B_TRUE;
|
|
|
|
entry->pl_next = *plp;
|
|
|
|
*plp = entry;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
|
|
|
|
zfs_type_t type)
|
|
|
|
{
|
|
|
|
return (zprop_iter_common(func, cb, show_all, ordered, type));
|
|
|
|
}
|
2019-04-10 10:43:28 +03:00
|
|
|
|
2022-04-15 01:00:02 +03:00
|
|
|
const char *
|
|
|
|
zfs_version_userland(void)
|
2019-04-10 10:43:28 +03:00
|
|
|
{
|
2022-04-15 01:00:02 +03:00
|
|
|
return (ZFS_META_ALIAS);
|
2019-04-10 10:43:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prints both zfs userland and kernel versions
|
2022-04-15 01:00:02 +03:00
|
|
|
* Returns 0 on success, and -1 on error
|
2019-04-10 10:43:28 +03:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
zfs_version_print(void)
|
|
|
|
{
|
2022-04-15 01:00:02 +03:00
|
|
|
(void) puts(ZFS_META_ALIAS);
|
2019-04-10 10:43:28 +03:00
|
|
|
|
2022-04-15 01:00:02 +03:00
|
|
|
char *kver = zfs_version_kernel();
|
|
|
|
if (kver == NULL) {
|
2019-04-10 10:43:28 +03:00
|
|
|
fprintf(stderr, "zfs_version_kernel() failed: %s\n",
|
|
|
|
strerror(errno));
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
2022-04-15 01:00:02 +03:00
|
|
|
(void) printf("zfs-kmod-%s\n", kver);
|
|
|
|
free(kver);
|
2019-04-10 10:43:28 +03:00
|
|
|
return (0);
|
|
|
|
}
|
2019-12-20 03:26:07 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Return 1 if the user requested ANSI color output, and our terminal supports
|
|
|
|
* it. Return 0 for no color.
|
|
|
|
*/
|
2023-04-05 19:57:01 +03:00
|
|
|
int
|
2019-12-20 03:26:07 +03:00
|
|
|
use_color(void)
|
|
|
|
{
|
|
|
|
static int use_color = -1;
|
|
|
|
char *term;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Optimization:
|
|
|
|
*
|
|
|
|
* For each zpool invocation, we do a single check to see if we should
|
|
|
|
* be using color or not, and cache that value for the lifetime of the
|
|
|
|
* the zpool command. That makes it cheap to call use_color() when
|
|
|
|
* we're printing with color. We assume that the settings are not going
|
|
|
|
* to change during the invocation of a zpool command (the user isn't
|
|
|
|
* going to change the ZFS_COLOR value while zpool is running, for
|
|
|
|
* example).
|
|
|
|
*/
|
|
|
|
if (use_color != -1) {
|
|
|
|
/*
|
|
|
|
* We've already figured out if we should be using color or
|
|
|
|
* not. Return the cached value.
|
|
|
|
*/
|
|
|
|
return (use_color);
|
|
|
|
}
|
|
|
|
|
|
|
|
term = getenv("TERM");
|
|
|
|
/*
|
|
|
|
* The user sets the ZFS_COLOR env var set to enable zpool ANSI color
|
|
|
|
* output. However if NO_COLOR is set (https://no-color.org/) then
|
|
|
|
* don't use it. Also, don't use color if terminal doesn't support
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
if (libzfs_envvar_is_set("ZFS_COLOR") &&
|
|
|
|
!libzfs_envvar_is_set("NO_COLOR") &&
|
|
|
|
isatty(STDOUT_FILENO) && term && strcmp("dumb", term) != 0 &&
|
|
|
|
strcmp("unknown", term) != 0) {
|
|
|
|
/* Color supported */
|
|
|
|
use_color = 1;
|
|
|
|
} else {
|
|
|
|
use_color = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (use_color);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-03-14 01:23:04 +03:00
|
|
|
* The functions color_start() and color_end() are used for when you want
|
|
|
|
* to colorize a block of text.
|
2019-12-20 03:26:07 +03:00
|
|
|
*
|
2023-03-14 01:23:04 +03:00
|
|
|
* For example:
|
|
|
|
* color_start(ANSI_RED)
|
2019-12-20 03:26:07 +03:00
|
|
|
* printf("hello");
|
|
|
|
* printf("world");
|
|
|
|
* color_end();
|
|
|
|
*/
|
|
|
|
void
|
2022-04-19 21:38:30 +03:00
|
|
|
color_start(const char *color)
|
2019-12-20 03:26:07 +03:00
|
|
|
{
|
2023-03-14 01:23:04 +03:00
|
|
|
if (color && use_color()) {
|
2022-04-19 21:38:30 +03:00
|
|
|
fputs(color, stdout);
|
2022-12-13 02:30:51 +03:00
|
|
|
fflush(stdout);
|
|
|
|
}
|
2019-12-20 03:26:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
color_end(void)
|
|
|
|
{
|
2022-12-13 02:30:51 +03:00
|
|
|
if (use_color()) {
|
2022-04-19 21:38:30 +03:00
|
|
|
fputs(ANSI_RESET, stdout);
|
2022-12-13 02:30:51 +03:00
|
|
|
fflush(stdout);
|
|
|
|
}
|
|
|
|
|
2019-12-20 03:26:07 +03:00
|
|
|
}
|
|
|
|
|
2023-03-14 01:23:04 +03:00
|
|
|
/*
|
|
|
|
* printf() with a color. If color is NULL, then do a normal printf.
|
|
|
|
*/
|
2019-12-20 03:26:07 +03:00
|
|
|
int
|
2022-04-19 21:38:30 +03:00
|
|
|
printf_color(const char *color, const char *format, ...)
|
2019-12-20 03:26:07 +03:00
|
|
|
{
|
|
|
|
va_list aptr;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (color)
|
|
|
|
color_start(color);
|
|
|
|
|
|
|
|
va_start(aptr, format);
|
|
|
|
rc = vprintf(format, aptr);
|
|
|
|
va_end(aptr);
|
|
|
|
|
|
|
|
if (color)
|
|
|
|
color_end();
|
|
|
|
|
|
|
|
return (rc);
|
|
|
|
}
|