mirror_zfs/lib/libzfs/libzfs_dataset.c

4784 lines
116 KiB
C
Raw Normal View History

2008-11-20 23:01:55 +03:00
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
2008-11-20 23:01:55 +03:00
*/
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <zone.h>
#include <fcntl.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <pwd.h>
#include <grp.h>
#include <stddef.h>
#include <ucred.h>
#ifdef HAVE_IDMAP
2009-07-03 02:44:48 +04:00
#include <idmap.h>
#include <aclutils.h>
2009-08-18 22:43:27 +04:00
#include <directory.h>
#endif /* HAVE_IDMAP */
2008-11-20 23:01:55 +03:00
#include <sys/dnode.h>
2008-11-20 23:01:55 +03:00
#include <sys/spa.h>
#include <sys/zap.h>
#include <libzfs.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_deleg.h"
2009-07-03 02:44:48 +04:00
static int userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
2008-11-20 23:01:55 +03:00
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
*/
const char *
zfs_type_to_name(zfs_type_t type)
{
switch (type) {
case ZFS_TYPE_FILESYSTEM:
return (dgettext(TEXT_DOMAIN, "filesystem"));
case ZFS_TYPE_SNAPSHOT:
return (dgettext(TEXT_DOMAIN, "snapshot"));
case ZFS_TYPE_VOLUME:
return (dgettext(TEXT_DOMAIN, "volume"));
default:
break;
2008-11-20 23:01:55 +03:00
}
return (NULL);
}
/*
* Validate a ZFS path. This is used even before trying to open the dataset, to
2009-07-03 02:44:48 +04:00
* provide a more meaningful error message. We call zfs_error_aux() to
* explain exactly why the name was not valid.
2008-11-20 23:01:55 +03:00
*/
int
2008-11-20 23:01:55 +03:00
zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
boolean_t modifying)
{
namecheck_err_t why;
char what;
(void) zfs_prop_get_table();
2008-11-20 23:01:55 +03:00
if (dataset_namecheck(path, &why, &what) != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is too long"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in name"), what);
break;
case NAME_ERR_MULTIPLE_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' delimiters in name"));
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool doesn't begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"reserved disk name"));
break;
default:
break;
2008-11-20 23:01:55 +03:00
}
}
return (0);
}
if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshot delimiter '@' in filesystem name"));
return (0);
}
if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '@' delimiter in snapshot name"));
return (0);
}
if (modifying && strchr(path, '%') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid character %c in name"), '%');
return (0);
}
return (-1);
}
int
zfs_name_valid(const char *name, zfs_type_t type)
{
if (type == ZFS_TYPE_POOL)
return (zpool_name_valid(NULL, B_FALSE, name));
return (zfs_validate_name(NULL, name, type, B_FALSE));
}
/*
* This function takes the raw DSL properties, and filters out the user-defined
* properties into a separate nvlist.
*/
static nvlist_t *
process_user_props(zfs_handle_t *zhp, nvlist_t *props)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvpair_t *elem;
nvlist_t *propval;
nvlist_t *nvl;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
if (!zfs_prop_user(nvpair_name(elem)))
continue;
verify(nvpair_value_nvlist(elem, &propval) == 0);
if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
nvlist_free(nvl);
(void) no_memory(hdl);
return (NULL);
}
}
return (nvl);
}
static zpool_handle_t *
zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph;
if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
if (hdl->libzfs_pool_handles != NULL)
zph->zpool_next = hdl->libzfs_pool_handles;
hdl->libzfs_pool_handles = zph;
}
return (zph);
}
static zpool_handle_t *
zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph = hdl->libzfs_pool_handles;
while ((zph != NULL) &&
(strncmp(pool_name, zpool_get_name(zph), len) != 0))
zph = zph->zpool_next;
return (zph);
}
/*
* Returns a handle to the pool that contains the provided dataset.
* If a handle to that pool already exists then that handle is returned.
* Otherwise, a new handle is created and added to the list of handles.
*/
static zpool_handle_t *
zpool_handle(zfs_handle_t *zhp)
{
char *pool_name;
int len;
zpool_handle_t *zph;
len = strcspn(zhp->zfs_name, "/@#") + 1;
pool_name = zfs_alloc(zhp->zfs_hdl, len);
(void) strlcpy(pool_name, zhp->zfs_name, len);
zph = zpool_find_handle(zhp, pool_name, len);
if (zph == NULL)
zph = zpool_add_handle(zhp, pool_name);
free(pool_name);
return (zph);
}
void
zpool_free_handles(libzfs_handle_t *hdl)
{
zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
while (zph != NULL) {
next = zph->zpool_next;
zpool_close(zph);
zph = next;
}
hdl->libzfs_pool_handles = NULL;
}
2008-11-20 23:01:55 +03:00
/*
* Utility function to gather stats (objset and zpl) for the given object.
*/
static int
2009-01-16 00:59:39 +03:00
get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
2008-11-20 23:01:55 +03:00
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
2009-01-16 00:59:39 +03:00
(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
2008-11-20 23:01:55 +03:00
2009-01-16 00:59:39 +03:00
while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, zc) != 0) {
2008-11-20 23:01:55 +03:00
if (errno == ENOMEM) {
2009-01-16 00:59:39 +03:00
if (zcmd_expand_dst_nvlist(hdl, zc) != 0) {
2008-11-20 23:01:55 +03:00
return (-1);
}
} else {
return (-1);
}
}
2009-01-16 00:59:39 +03:00
return (0);
}
2008-11-20 23:01:55 +03:00
/*
* Utility function to get the received properties of the given object.
*/
static int
get_recvd_props_ioctl(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *recvdprops;
zfs_cmd_t zc = {"\0"};
int err;
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
zcmd_free_nvlists(&zc);
if (err != 0)
return (-1);
nvlist_free(zhp->zfs_recvd_props);
zhp->zfs_recvd_props = recvdprops;
return (0);
}
2009-01-16 00:59:39 +03:00
static int
put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
nvlist_t *allprops, *userprops;
2008-11-20 23:01:55 +03:00
2009-01-16 00:59:39 +03:00
zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
2008-11-20 23:01:55 +03:00
return (-1);
}
2009-07-03 02:44:48 +04:00
/*
* XXX Why do we store the user props separately, in addition to
* storing them in zfs_props?
*/
2008-11-20 23:01:55 +03:00
if ((userprops = process_user_props(zhp, allprops)) == NULL) {
nvlist_free(allprops);
return (-1);
}
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
zhp->zfs_props = allprops;
zhp->zfs_user_props = userprops;
return (0);
}
2009-01-16 00:59:39 +03:00
static int
get_stats(zfs_handle_t *zhp)
{
int rc = 0;
zfs_cmd_t zc = {"\0"};
2009-01-16 00:59:39 +03:00
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
if (get_stats_ioctl(zhp, &zc) != 0)
rc = -1;
else if (put_stats_zhdl(zhp, &zc) != 0)
rc = -1;
zcmd_free_nvlists(&zc);
return (rc);
}
2008-11-20 23:01:55 +03:00
/*
* Refresh the properties currently stored in the handle.
*/
void
zfs_refresh_properties(zfs_handle_t *zhp)
{
(void) get_stats(zhp);
}
/*
* Makes a handle from the given dataset name. Used by zfs_open() and
* zfs_iter_* to create child handles on the fly.
*/
2009-01-16 00:59:39 +03:00
static int
make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
2008-11-20 23:01:55 +03:00
{
if (put_stats_zhdl(zhp, zc) != 0)
2009-01-16 00:59:39 +03:00
return (-1);
2008-11-20 23:01:55 +03:00
/*
* We've managed to open the dataset and gather statistics. Determine
* the high-level type.
*/
if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_head_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER)
Remove ZFC_IOC_*_MINOR ioctl()s Early versions of ZFS coordinated the creation and destruction of device minors from userspace. This was inherently racy and in late 2009 these ioctl()s were removed leaving everything up to the kernel. This significantly simplified the code. However, we never picked up these changes in ZoL since we'd already significantly adjusted this code for Linux. This patch aims to rectify that by finally removing ZFC_IOC_*_MINOR ioctl()s and moving all the functionality down in to the kernel. Since this cleanup will change the kernel/user ABI it's being done in the same tag as the previous libzfs_core ABI changes. This will minimize, but not eliminate, the disruption to end users. Once merged ZoL, Illumos, and FreeBSD will basically be back in sync in regards to handling ZVOLs in the common code. While each platform must have its own custom zvol.c implemenation the interfaces provided are consistent. NOTES: 1) This patch introduces one subtle change in behavior which could not be easily avoided. Prior to this change callers of 'zfs create -V ...' were guaranteed that upon exit the /dev/zvol/ block device link would be created or an error returned. That's no longer the case. The utilities will no longer block waiting for the symlink to be created. Callers are now responsible for blocking, this is why a 'udev_wait' call was added to the 'label' function in scripts/common.sh. 2) The read-only behavior of a ZVOL now solely depends on if the ZVOL_RDONLY bit is set in zv->zv_flags. The redundant policy setting in the gendisk structure was removed. This both simplifies the code and allows us to safely leverage set_disk_ro() to issue a KOBJ_CHANGE uevent. See the comment in the code for futher details on this. 3) Because __zvol_create_minor() and zvol_alloc() may now be called in a sync task they must use KM_PUSHPAGE. References: illumos/illumos-gate@681d9761e8516a7dc5ab6589e2dfe717777e1123 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #1969
2013-12-07 02:20:22 +04:00
return (-1);
2008-11-20 23:01:55 +03:00
else
abort();
if (zhp->zfs_dmustats.dds_is_snapshot)
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
else
abort(); /* we should never see any other types */
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
return (-1);
2009-01-16 00:59:39 +03:00
return (0);
}
zfs_handle_t *
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
{
zfs_cmd_t zc = {"\0"};
2009-01-16 00:59:39 +03:00
zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) {
free(zhp);
return (NULL);
}
if (get_stats_ioctl(zhp, &zc) == -1) {
zcmd_free_nvlists(&zc);
free(zhp);
return (NULL);
}
if (make_dataset_handle_common(zhp, &zc) == -1) {
free(zhp);
zhp = NULL;
}
zcmd_free_nvlists(&zc);
return (zhp);
}
zfs_handle_t *
2009-01-16 00:59:39 +03:00
make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
if (make_dataset_handle_common(zhp, zc) == -1) {
free(zhp);
return (NULL);
}
2008-11-20 23:01:55 +03:00
return (zhp);
}
zfs_handle_t *
make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = pzhp->zfs_hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
zhp->zfs_head_type = pzhp->zfs_type;
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
zhp->zpool_hdl = zpool_handle(zhp);
return (zhp);
}
zfs_handle_t *
zfs_handle_dup(zfs_handle_t *zhp_orig)
{
zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = zhp_orig->zfs_hdl;
zhp->zpool_hdl = zhp_orig->zpool_hdl;
(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
sizeof (zhp->zfs_name));
zhp->zfs_type = zhp_orig->zfs_type;
zhp->zfs_head_type = zhp_orig->zfs_head_type;
zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
if (zhp_orig->zfs_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_user_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_user_props,
&zhp->zfs_user_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_recvd_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_recvd_props,
&zhp->zfs_recvd_props, 0)) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
if (zhp_orig->zfs_mntopts != NULL) {
zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
zhp_orig->zfs_mntopts);
}
zhp->zfs_props_table = zhp_orig->zfs_props_table;
return (zhp);
}
boolean_t
zfs_bookmark_exists(const char *path)
{
nvlist_t *bmarks;
nvlist_t *props;
char fsname[ZFS_MAXNAMELEN];
char *bmark_name;
char *pound;
int err;
boolean_t rv;
(void) strlcpy(fsname, path, sizeof (fsname));
pound = strchr(fsname, '#');
if (pound == NULL)
return (B_FALSE);
*pound = '\0';
bmark_name = pound + 1;
props = fnvlist_alloc();
err = lzc_get_bookmarks(fsname, props, &bmarks);
nvlist_free(props);
if (err != 0) {
nvlist_free(bmarks);
return (B_FALSE);
}
rv = nvlist_exists(bmarks, bmark_name);
nvlist_free(bmarks);
return (rv);
}
zfs_handle_t *
make_bookmark_handle(zfs_handle_t *parent, const char *path,
nvlist_t *bmark_props)
{
zfs_handle_t *zhp = calloc(sizeof (zfs_handle_t), 1);
if (zhp == NULL)
return (NULL);
/* Fill in the name. */
zhp->zfs_hdl = parent->zfs_hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
/* Set the property lists. */
if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) {
free(zhp);
return (NULL);
}
/* Set the types. */
zhp->zfs_head_type = parent->zfs_head_type;
zhp->zfs_type = ZFS_TYPE_BOOKMARK;
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) {
nvlist_free(zhp->zfs_props);
free(zhp);
return (NULL);
}
return (zhp);
}
2008-11-20 23:01:55 +03:00
/*
* Opens the given snapshot, filesystem, or volume. The 'types'
* argument is a mask of acceptable types. The function will print an
* appropriate error message and return NULL if it can't be opened.
*/
zfs_handle_t *
zfs_open(libzfs_handle_t *hdl, const char *path, int types)
{
zfs_handle_t *zhp;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
/*
* Validate the name before we even try to open it.
*/
if (!zfs_validate_name(hdl, path, ZFS_TYPE_DATASET, B_FALSE)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid dataset name"));
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
return (NULL);
}
/*
* Try to get stats for the dataset, which will tell us if it exists.
*/
errno = 0;
if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
if (!(types & zhp->zfs_type)) {
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Release a ZFS handle. Nothing to do but free the associated memory.
*/
void
zfs_close(zfs_handle_t *zhp)
{
if (zhp->zfs_mntopts)
free(zhp->zfs_mntopts);
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
nvlist_free(zhp->zfs_recvd_props);
2008-11-20 23:01:55 +03:00
free(zhp);
}
2009-01-16 00:59:39 +03:00
typedef struct mnttab_node {
struct mnttab mtn_mt;
avl_node_t mtn_node;
} mnttab_node_t;
static int
libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
{
const mnttab_node_t *mtn1 = arg1;
const mnttab_node_t *mtn2 = arg2;
int rv;
rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
if (rv == 0)
return (0);
return (rv > 0 ? 1 : -1);
}
void
libzfs_mnttab_init(libzfs_handle_t *hdl)
{
assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
2009-07-03 02:44:48 +04:00
}
int
2009-07-03 02:44:48 +04:00
libzfs_mnttab_update(libzfs_handle_t *hdl)
{
struct mnttab entry;
2009-01-16 00:59:39 +03:00
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
return (ENOENT);
2009-01-16 00:59:39 +03:00
while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
mnttab_node_t *mtn;
avl_index_t where;
2009-01-16 00:59:39 +03:00
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
2009-01-16 00:59:39 +03:00
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
/* Exclude duplicate mounts */
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
continue;
}
2009-01-16 00:59:39 +03:00
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
return (0);
2009-01-16 00:59:39 +03:00
}
void
libzfs_mnttab_fini(libzfs_handle_t *hdl)
{
void *cookie = NULL;
mnttab_node_t *mtn;
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))) {
2009-01-16 00:59:39 +03:00
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
}
avl_destroy(&hdl->libzfs_mnttab_cache);
}
2009-07-03 02:44:48 +04:00
void
libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
{
hdl->libzfs_mnttab_enable = enable;
}
2009-01-16 00:59:39 +03:00
int
libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
struct mnttab *entry)
{
mnttab_node_t find;
mnttab_node_t *mtn;
int error;
2009-01-16 00:59:39 +03:00
2009-07-03 02:44:48 +04:00
if (!hdl->libzfs_mnttab_enable) {
struct mnttab srch = { 0 };
if (avl_numnodes(&hdl->libzfs_mnttab_cache))
libzfs_mnttab_fini(hdl);
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
return (ENOENT);
2009-07-03 02:44:48 +04:00
srch.mnt_special = (char *)fsname;
srch.mnt_fstype = MNTTYPE_ZFS;
if (getmntany(hdl->libzfs_mnttab, entry, &srch) == 0)
return (0);
else
return (ENOENT);
}
2009-01-16 00:59:39 +03:00
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0)
if ((error = libzfs_mnttab_update(hdl)) != 0)
return (error);
2009-01-16 00:59:39 +03:00
find.mtn_mt.mnt_special = (char *)fsname;
mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
if (mtn) {
*entry = mtn->mtn_mt;
return (0);
}
return (ENOENT);
}
void
libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
const char *mountp, const char *mntopts)
{
mnttab_node_t *mtn;
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0)
return;
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
void
libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
{
mnttab_node_t find;
mnttab_node_t *ret;
find.mtn_mt.mnt_special = (char *)fsname;
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))) {
2009-01-16 00:59:39 +03:00
avl_remove(&hdl->libzfs_mnttab_cache, ret);
free(ret->mtn_mt.mnt_special);
free(ret->mtn_mt.mnt_mountp);
free(ret->mtn_mt.mnt_fstype);
free(ret->mtn_mt.mnt_mntopts);
free(ret);
}
}
2008-11-20 23:01:55 +03:00
int
zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
{
zpool_handle_t *zpool_handle = zhp->zpool_hdl;
2008-11-20 23:01:55 +03:00
if (zpool_handle == NULL)
return (-1);
*spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
return (0);
}
/*
* The choice of reservation property depends on the SPA version.
*/
static int
zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
{
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
if (spa_version >= SPA_VERSION_REFRESERVATION)
*resv_prop = ZFS_PROP_REFRESERVATION;
else
*resv_prop = ZFS_PROP_RESERVATION;
return (0);
}
/*
* Given an nvlist of properties to set, validates that they are correct, and
* parses any numeric properties (index, boolean, etc) if they are specified as
* strings.
*/
nvlist_t *
zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl,
const char *errbuf)
2008-11-20 23:01:55 +03:00
{
nvpair_t *elem;
uint64_t intval;
char *strval;
zfs_prop_t prop;
nvlist_t *ret;
int chosen_normal = -1;
int chosen_utf = -1;
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
2009-07-03 02:44:48 +04:00
/*
* Make sure this property is valid and applies to this type.
*/
2008-11-20 23:01:55 +03:00
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
const char *propname = nvpair_name(elem);
2009-07-03 02:44:48 +04:00
prop = zfs_name_to_prop(propname);
if (prop == ZPROP_INVAL && zfs_prop_user(propname)) {
2008-11-20 23:01:55 +03:00
/*
2009-07-03 02:44:48 +04:00
* This is a user property: make sure it's a
2008-11-20 23:01:55 +03:00
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (nvlist_add_string(ret, propname, strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
2009-07-03 02:44:48 +04:00
/*
* Currently, only user properties can be modified on
* snapshots.
*/
if (type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"this property can not be modified for snapshots"));
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
2009-07-03 02:44:48 +04:00
if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) {
zfs_userquota_prop_t uqtype;
char newpropname[128];
char domain[128];
uint64_t rid;
uint64_t valary[3];
if (userquota_propname_decode(propname, zoned,
&uqtype, domain, sizeof (domain), &rid) != 0) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' has an invalid user/group name"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (uqtype != ZFS_PROP_USERQUOTA &&
uqtype != ZFS_PROP_GROUPQUOTA) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, "none") == 0) {
intval = 0;
} else if (zfs_nicestrtonum(hdl,
strval, &intval) != 0) {
(void) zfs_error(hdl,
EZFS_BADPROP, errbuf);
goto error;
}
} else if (nvpair_type(elem) ==
DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, &intval);
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable "
"userquota/groupquota"));
goto error;
}
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* Encode the prop name as
* userquota@<hex-rid>-domain, to make it easy
* for the kernel to decode.
*/
2009-07-03 02:44:48 +04:00
(void) snprintf(newpropname, sizeof (newpropname),
"%s%llx-%s", zfs_userquota_prop_prefixes[uqtype],
(longlong_t)rid, domain);
2009-07-03 02:44:48 +04:00
valary[0] = uqtype;
valary[1] = rid;
valary[2] = intval;
if (nvlist_add_uint64_array(ret, newpropname,
valary, 3) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
} else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
2009-07-03 02:44:48 +04:00
}
if (prop == ZPROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) {
2008-11-20 23:01:55 +03:00
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' does not "
"apply to datasets of this type"), propname);
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (zfs_prop_readonly(prop) &&
(!zfs_prop_setonce(prop) || zhp != NULL)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, type, ret,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform some additional checks for specific properties.
*/
switch (prop) {
case ZFS_PROP_VERSION:
{
int version;
if (zhp == NULL)
break;
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (intval < version) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can not downgrade; already at version %u"),
version);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_VOLBLOCKSIZE:
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
case ZFS_PROP_RECORDSIZE:
{
int maxbs = SPA_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
maxbs = zpool_get_prop_int(zpool_hdl,
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
}
/*
* The value must be a power of two between
* SPA_MINBLOCKSIZE and maxbs.
*/
2008-11-20 23:01:55 +03:00
if (intval < SPA_MINBLOCKSIZE ||
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
intval > maxbs || !ISP2(intval)) {
zfs_nicenum(maxbs, buf, sizeof (buf));
2008-11-20 23:01:55 +03:00
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
"'%s' must be power of 2 from 512B "
"to %s"), propname, buf);
2008-11-20 23:01:55 +03:00
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
}
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
/*
* Verify the mlslabel string and convert to
* internal hex label string.
*/
m_label_t *new_sl;
char *hex = NULL; /* internal label string */
/* Default value is already OK. */
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
break;
/* Verify the label can be converted to binary form */
if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
(str_to_label(strval, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1)) {
goto badlabel;
2008-11-20 23:01:55 +03:00
}
/* Now translate to hex internal label string */
if (label_to_str(new_sl, &hex, M_INTERNAL,
DEF_NAMES) != 0) {
if (hex)
free(hex);
goto badlabel;
}
m_label_free(new_sl);
/* If string is already in internal form, we're done. */
if (strcmp(strval, hex) == 0) {
free(hex);
break;
}
/* Replace the label string with the internal form. */
(void) nvlist_remove(ret, zfs_prop_to_name(prop),
DATA_TYPE_STRING);
verify(nvlist_add_string(ret, zfs_prop_to_name(prop),
hex) == 0);
free(hex);
2008-11-20 23:01:55 +03:00
break;
badlabel:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid mlslabel '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
m_label_free(new_sl); /* OK if null */
goto error;
#else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mlslabels are unsupported"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
#endif /* HAVE_MLSLABEL */
}
2008-11-20 23:01:55 +03:00
case ZFS_PROP_MOUNTPOINT:
{
namecheck_err_t why;
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
break;
if (mountpoint_namecheck(strval, &why)) {
switch (why) {
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' must be an absolute path, "
"'none', or 'legacy'"), propname);
break;
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"component of '%s' is too long"),
propname);
break;
default:
break;
2008-11-20 23:01:55 +03:00
}
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
/*FALLTHRU*/
case ZFS_PROP_SHARESMB:
case ZFS_PROP_SHARENFS:
/*
* For the mountpoint and sharenfs or sharesmb
* properties, check if it can be set in a
* global/non-global zone based on
* the zoned property value:
*
* global zone non-global zone
* --------------------------------------------------
* zoned=on mountpoint (no) mountpoint (yes)
* sharenfs (no) sharenfs (no)
* sharesmb (no) sharesmb (no)
*
* zoned=off mountpoint (yes) N/A
* sharenfs (yes)
* sharesmb (yes)
*/
if (zoned) {
if (getzoneid() == GLOBAL_ZONEID) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set on "
"dataset in a non-global zone"),
propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
} else if (prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set in "
"a non-global zone"), propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
}
} else if (getzoneid() != GLOBAL_ZONEID) {
/*
* If zoned property is 'off', this must be in
2009-07-03 02:44:48 +04:00
* a global zone. If not, something is wrong.
2008-11-20 23:01:55 +03:00
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set while dataset "
"'zoned' property is set"), propname);
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
/*
* At this point, it is legitimate to set the
* property. Now we want to make sure that the
* property value is valid if it is sharenfs.
*/
if ((prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) &&
strcmp(strval, "on") != 0 &&
strcmp(strval, "off") != 0) {
zfs_share_proto_t proto;
if (prop == ZFS_PROP_SHARESMB)
proto = PROTO_SMB;
else
proto = PROTO_NFS;
/*
* Must be an valid sharing protocol
* option string so init the libshare
* in order to enable the parser and
* then parse the options. We use the
* control API since we don't care about
* the current configuration and don't
* want the overhead of loading it
* until we actually do something.
*/
if (zfs_init_libshare(hdl,
SA_INIT_CONTROL_API) != SA_OK) {
/*
* An error occurred so we can't do
* anything
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set: problem "
"in share initialization"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (zfs_parse_options(strval, proto) != SA_OK) {
/*
* There was an error in parsing so
* deal with it by issuing an error
* message and leaving after
* uninitializing the the libshare
* interface.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set to invalid "
"options"), propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
zfs_uninit_libshare(hdl);
goto error;
}
zfs_uninit_libshare(hdl);
}
break;
case ZFS_PROP_UTF8ONLY:
chosen_utf = (int)intval;
break;
case ZFS_PROP_NORMALIZE:
chosen_normal = (int)intval;
break;
default:
break;
2008-11-20 23:01:55 +03:00
}
/*
* For changes to existing volumes, we have some additional
* checks to enforce.
*/
if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
uint64_t volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
uint64_t blocksize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLBLOCKSIZE);
char buf[64];
switch (prop) {
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
if (intval > volsize) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is greater than current "
"volume size"), propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
case ZFS_PROP_VOLSIZE:
if (intval % blocksize != 0) {
zfs_nicenum(blocksize, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a multiple of "
"volume block size (%s)"),
propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be zero"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
2008-11-20 23:01:55 +03:00
}
}
}
/*
* If normalization was chosen, but no UTF8 choice was made,
* enforce rejection of non-UTF8 names.
*
* If normalization was chosen, but rejecting non-UTF8 names
* was explicitly not chosen, it is an error.
*/
if (chosen_normal > 0 && chosen_utf < 0) {
if (nvlist_add_uint64(ret,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
(void) no_memory(hdl);
goto error;
}
} else if (chosen_normal > 0 && chosen_utf == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be set 'on' if normalization chosen"),
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
return (ret);
error:
nvlist_free(ret);
return (NULL);
}
int
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t old_volsize;
uint64_t new_volsize;
uint64_t old_reservation;
uint64_t new_reservation;
zfs_prop_t resv_prop;
nvlist_t *props;
2008-11-20 23:01:55 +03:00
/*
* If this is an existing volume, and someone is setting the volsize,
* make sure that it matches the reservation, or add it if necessary.
*/
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_reservation = zfs_prop_get_int(zhp, resv_prop);
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if ((zvol_volsize_to_reservation(old_volsize, props) !=
old_reservation) || nvlist_exists(nvl,
zfs_prop_to_name(resv_prop))) {
fnvlist_free(props);
return (0);
2008-11-20 23:01:55 +03:00
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&new_volsize) != 0) {
fnvlist_free(props);
return (-1);
}
new_reservation = zvol_volsize_to_reservation(new_volsize, props);
fnvlist_free(props);
if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
new_reservation) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
2008-11-20 23:01:55 +03:00
}
void
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
char *errbuf)
{
switch (err) {
case ENOSPC:
/*
* For quotas and reservations, ENOSPC indicates
* something different; setting a quota or reservation
* doesn't use any disk space.
*/
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, err, errbuf);
break;
}
break;
case EBUSY:
(void) zfs_standard_error(hdl, EBUSY, errbuf);
break;
case EROFS:
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value too long"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool and or dataset must be upgraded to set this "
"property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ERANGE:
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
if (prop == ZFS_PROP_COMPRESSION ||
Implement large_dnode pool feature Justification ------------- This feature adds support for variable length dnodes. Our motivation is to eliminate the overhead associated with using spill blocks. Spill blocks are used to store system attribute data (i.e. file metadata) that does not fit in the dnode's bonus buffer. By allowing a larger bonus buffer area the use of a spill block can be avoided. Spill blocks potentially incur an additional read I/O for every dnode in a dnode block. As a worst case example, reading 32 dnodes from a 16k dnode block and all of the spill blocks could issue 33 separate reads. Now suppose those dnodes have size 1024 and therefore don't need spill blocks. Then the worst case number of blocks read is reduced to from 33 to two--one per dnode block. In practice spill blocks may tend to be co-located on disk with the dnode blocks so the reduction in I/O would not be this drastic. In a badly fragmented pool, however, the improvement could be significant. ZFS-on-Linux systems that make heavy use of extended attributes would benefit from this feature. In particular, ZFS-on-Linux supports the xattr=sa dataset property which allows file extended attribute data to be stored in the dnode bonus buffer as an alternative to the traditional directory-based format. Workloads such as SELinux and the Lustre distributed filesystem often store enough xattr data to force spill bocks when xattr=sa is in effect. Large dnodes may therefore provide a performance benefit to such systems. Other use cases that may benefit from this feature include files with large ACLs and symbolic links with long target names. Furthermore, this feature may be desirable on other platforms in case future applications or features are developed that could make use of a larger bonus buffer area. Implementation -------------- The size of a dnode may be a multiple of 512 bytes up to the size of a dnode block (currently 16384 bytes). A dn_extra_slots field was added to the current on-disk dnode_phys_t structure to describe the size of the physical dnode on disk. The 8 bits for this field were taken from the zero filled dn_pad2 field. The field represents how many "extra" dnode_phys_t slots a dnode consumes in its dnode block. This convention results in a value of 0 for 512 byte dnodes which preserves on-disk format compatibility with older software. Similarly, the in-memory dnode_t structure has a new dn_num_slots field to represent the total number of dnode_phys_t slots consumed on disk. Thus dn->dn_num_slots is 1 greater than the corresponding dnp->dn_extra_slots. This difference in convention was adopted because, unlike on-disk structures, backward compatibility is not a concern for in-memory objects, so we used a more natural way to represent size for a dnode_t. The default size for newly created dnodes is determined by the value of a new "dnodesize" dataset property. By default the property is set to "legacy" which is compatible with older software. Setting the property to "auto" will allow the filesystem to choose the most suitable dnode size. Currently this just sets the default dnode size to 1k, but future code improvements could dynamically choose a size based on observed workload patterns. Dnodes of varying sizes can coexist within the same dataset and even within the same dnode block. For example, to enable automatically-sized dnodes, run # zfs set dnodesize=auto tank/fish The user can also specify literal values for the dnodesize property. These are currently limited to powers of two from 1k to 16k. The power-of-2 limitation is only for simplicity of the user interface. Internally the implementation can handle any multiple of 512 up to 16k, and consumers of the DMU API can specify any legal dnode value. The size of a new dnode is determined at object allocation time and stored as a new field in the znode in-memory structure. New DMU interfaces are added to allow the consumer to specify the dnode size that a newly allocated object should use. Existing interfaces are unchanged to avoid having to update every call site and to preserve compatibility with external consumers such as Lustre. The new interfaces names are given below. The versions of these functions that don't take a dnodesize parameter now just call the _dnsize() versions with a dnodesize of 0, which means use the legacy dnode size. New DMU interfaces: dmu_object_alloc_dnsize() dmu_object_claim_dnsize() dmu_object_reclaim_dnsize() New ZAP interfaces: zap_create_dnsize() zap_create_norm_dnsize() zap_create_flags_dnsize() zap_create_claim_norm_dnsize() zap_create_link_dnsize() The constant DN_MAX_BONUSLEN is renamed to DN_OLD_MAX_BONUSLEN. The spa_maxdnodesize() function should be used to determine the maximum bonus length for a pool. These are a few noteworthy changes to key functions: * The prototype for dnode_hold_impl() now takes a "slots" parameter. When the DNODE_MUST_BE_FREE flag is set, this parameter is used to ensure the hole at the specified object offset is large enough to hold the dnode being created. The slots parameter is also used to ensure a dnode does not span multiple dnode blocks. In both of these cases, if a failure occurs, ENOSPC is returned. Keep in mind, these failure cases are only possible when using DNODE_MUST_BE_FREE. If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0. dnode_hold_impl() will check if the requested dnode is already consumed as an extra dnode slot by an large dnode, in which case it returns ENOENT. * The function dmu_object_alloc() advances to the next dnode block if dnode_hold_impl() returns an error for a requested object. This is because the beginning of the next dnode block is the only location it can safely assume to either be a hole or a valid starting point for a dnode. * dnode_next_offset_level() and other functions that iterate through dnode blocks may no longer use a simple array indexing scheme. These now use the current dnode's dn_num_slots field to advance to the next dnode in the block. This is to ensure we properly skip the current dnode's bonus area and don't interpret it as a valid dnode. zdb --- The zdb command was updated to display a dnode's size under the "dnsize" column when the object is dumped. For ZIL create log records, zdb will now display the slot count for the object. ztest ----- Ztest chooses a random dnodesize for every newly created object. The random distribution is more heavily weighted toward small dnodes to better simulate real-world datasets. Unused bonus buffer space is filled with non-zero values computed from the object number, dataset id, offset, and generation number. This helps ensure that the dnode traversal code properly skips the interior regions of large dnodes, and that these interior regions are not overwritten by data belonging to other dnodes. A new test visits each object in a dataset. It verifies that the actual dnode size matches what was stored in the ztest block tag when it was created. It also verifies that the unused bonus buffer space is filled with the expected data patterns. ZFS Test Suite -------------- Added six new large dnode-specific tests, and integrated the dnodesize property into existing tests for zfs allow and send/recv. Send/Receive ------------ ZFS send streams for datasets containing large dnodes cannot be received on pools that don't support the large_dnode feature. A send stream with large dnodes sets a DMU_BACKUP_FEATURE_LARGE_DNODE flag which will be unrecognized by an incompatible receiving pool so that the zfs receive will fail gracefully. While not implemented here, it may be possible to generate a backward-compatible send stream from a dataset containing large dnodes. The implementation may be tricky, however, because the send object record for a large dnode would need to be resized to a 512 byte dnode, possibly kicking in a spill block in the process. This means we would need to construct a new SA layout and possibly register it in the SA layout object. The SA layout is normally just sent as an ordinary object record. But if we are constructing new layouts while generating the send stream we'd have to build the SA layout object dynamically and send it at the end of the stream. For sending and receiving between pools that do support large dnodes, the drr_object send record type is extended with a new field to store the dnode slot count. This field was repurposed from unused padding in the structure. ZIL Replay ---------- The dnode slot count is stored in the uppermost 8 bits of the lr_foid field. The bits were unused as the object id is currently capped at 48 bits. Resizing Dnodes --------------- It should be possible to resize a dnode when it is dirtied if the current dnodesize dataset property differs from the dnode's size, but this functionality is not currently implemented. Clearly a dnode can only grow if there are sufficient contiguous unused slots in the dnode block, but it should always be possible to shrink a dnode. Growing dnodes may be useful to reduce fragmentation in a pool with many spill blocks in use. Shrinking dnodes may be useful to allow sending a dataset to a pool that doesn't support the large_dnode feature. Feature Reference Counting -------------------------- The reference count for the large_dnode pool feature tracks the number of datasets that have ever contained a dnode of size larger than 512 bytes. The first time a large dnode is created in a dataset the dataset is converted to an extensible dataset. This is a one-way operation and the only way to decrement the feature count is to destroy the dataset, even if the dataset no longer contains any large dnodes. The complexity of reference counting on a per-dnode basis was too high, so we chose to track it on a per-dataset basis similarly to the large_block feature. Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #3542
2016-03-17 04:25:34 +03:00
prop == ZFS_PROP_DNODESIZE ||
Illumos 5027 - zfs large block support 5027 zfs large block support Reviewed by: Alek Pinchuk <pinchuk.alek@gmail.com> Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com> Reviewed by: Richard Elling <richard.elling@richardelling.com> Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com> Reviewed by: Brian Behlendorf <behlendorf1@llnl.gov> Approved by: Dan McDonald <danmcd@omniti.com> References: https://www.illumos.org/issues/5027 https://github.com/illumos/illumos-gate/commit/b515258 Porting Notes: * Included in this patch is a tiny ISP2() cleanup in zio_init() from Illumos 5255. * Unlike the upstream Illumos commit this patch does not impose an arbitrary 128K block size limit on volumes. Volumes, like filesystems, are limited by the zfs_max_recordsize=1M module option. * By default the maximum record size is limited to 1M by the module option zfs_max_recordsize. This value may be safely increased up to 16M which is the largest block size supported by the on-disk format. At the moment, 1M blocks clearly offer a significant performance improvement but the benefits of going beyond this for the majority of workloads are less clear. * The illumos version of this patch increased DMU_MAX_ACCESS to 32M. This was determined not to be large enough when using 16M blocks because the zfs_make_xattrdir() function will fail (EFBIG) when assigning a TX. This was immediately observed under Linux because all newly created files must have a security xattr created and that was failing. Therefore, we've set DMU_MAX_ACCESS to 64M. * On 32-bit platforms a hard limit of 1M is set for blocks due to the limited virtual address space. We should be able to relax this one the ABD patches are merged. Ported-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #354
2014-11-03 23:15:08 +03:00
prop == ZFS_PROP_RECORDSIZE) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"bootable datasets"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EINVAL:
if (prop == ZPROP_INVAL) {
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
#ifdef _ILP32
if (prop == ZFS_PROP_VOLSIZE) {
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
break;
}
#endif
/* FALLTHROUGH */
default:
(void) zfs_standard_error(hdl, err, errbuf);
}
}
Linux compat 2.6.39: mount_nodev() The .get_sb callback has been replaced by a .mount callback in the file_system_type structure. When using the new interface the caller must now use the mount_nodev() helper. Unfortunately, the new interface no longer passes the vfsmount down to the zfs layers. This poses a problem for the existing implementation because we currently save this pointer in the super block for latter use. It provides our only entry point in to the namespace layer for manipulating certain mount options. This needed to be done originally to allow commands like 'zfs set atime=off tank' to work properly. It also allowed me to keep more of the original Solaris code unmodified. Under Solaris there is a 1-to-1 mapping between a mount point and a file system so this is a fairly natural thing to do. However, under Linux they many be multiple entries in the namespace which reference the same filesystem. Thus keeping a back reference from the filesystem to the namespace is complicated. Rather than introduce some ugly hack to get the vfsmount and continue as before. I'm leveraging this API change to update the ZFS code to do things in a more natural way for Linux. This has the upside that is resolves the compatibility issue for the long term and fixes several other minor bugs which have been reported. This commit updates the code to remove this vfsmount back reference entirely. All modifications to filesystem mount options are now passed in to the kernel via a '-o remount'. This is the expected Linux mechanism and allows the namespace to properly handle any options which apply to it before passing them on to the file system itself. Aside from fixing the compatibility issue, removing the vfsmount has had the benefit of simplifying the code. This change which fairly involved has turned out nicely. Closes #246 Closes #217 Closes #187 Closes #248 Closes #231
2011-05-19 22:44:07 +04:00
static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)
{
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
Linux compat 2.6.39: mount_nodev() The .get_sb callback has been replaced by a .mount callback in the file_system_type structure. When using the new interface the caller must now use the mount_nodev() helper. Unfortunately, the new interface no longer passes the vfsmount down to the zfs layers. This poses a problem for the existing implementation because we currently save this pointer in the super block for latter use. It provides our only entry point in to the namespace layer for manipulating certain mount options. This needed to be done originally to allow commands like 'zfs set atime=off tank' to work properly. It also allowed me to keep more of the original Solaris code unmodified. Under Solaris there is a 1-to-1 mapping between a mount point and a file system so this is a fairly natural thing to do. However, under Linux they many be multiple entries in the namespace which reference the same filesystem. Thus keeping a back reference from the filesystem to the namespace is complicated. Rather than introduce some ugly hack to get the vfsmount and continue as before. I'm leveraging this API change to update the ZFS code to do things in a more natural way for Linux. This has the upside that is resolves the compatibility issue for the long term and fixes several other minor bugs which have been reported. This commit updates the code to remove this vfsmount back reference entirely. All modifications to filesystem mount options are now passed in to the kernel via a '-o remount'. This is the expected Linux mechanism and allows the namespace to properly handle any options which apply to it before passing them on to the file system itself. Aside from fixing the compatibility issue, removing the vfsmount has had the benefit of simplifying the code. This change which fairly involved has turned out nicely. Closes #246 Closes #217 Closes #187 Closes #248 Closes #231
2011-05-19 22:44:07 +04:00
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_SETUID:
case ZFS_PROP_READONLY:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
return (B_TRUE);
default:
return (B_FALSE);
}
}
2008-11-20 23:01:55 +03:00
/*
* Given a property name and value, set the property for the given dataset.
*/
int
zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
{
int ret = -1;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl = NULL;
2008-11-20 23:01:55 +03:00
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_string(nvl, propname, propval) != 0) {
(void) no_memory(hdl);
goto error;
}
ret = zfs_prop_set_list(zhp, nvl);
2008-11-20 23:01:55 +03:00
error:
2008-11-20 23:01:55 +03:00
nvlist_free(nvl);
return (ret);
}
2008-11-20 23:01:55 +03:00
/*
* Given an nvlist of property names and values, set the properties for the
* given dataset.
*/
int
zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
prop_changelist_t **cls = NULL;
int cl_idx;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl;
int nvl_len;
int added_resv = 0;
zfs_prop_t prop = 0;
nvpair_t *elem;
2008-11-20 23:01:55 +03:00
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl,
errbuf)) == NULL)
2008-11-20 23:01:55 +03:00
goto error;
/*
* We have to check for any extra properties which need to be added
* before computing the length of the nvlist.
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE &&
(added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) {
goto error;
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
}
/*
* Check how many properties we're setting and allocate an array to
* store changelist pointers for postfix().
*/
nvl_len = 0;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem))
nvl_len++;
if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL)
goto error;
2008-11-20 23:01:55 +03:00
cl_idx = 0;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
assert(cl_idx < nvl_len);
/*
* We don't want to unmount & remount the dataset when changing
* its canmount property to 'on' or 'noauto'. We only use
* the changelist logic to unmount when setting canmount=off.
*/
if (!(prop == ZFS_PROP_CANMOUNT &&
fnvpair_value_uint64(elem) != ZFS_CANMOUNT_OFF)) {
cls[cl_idx] = changelist_gather(zhp, prop, 0, 0);
if (cls[cl_idx] == NULL)
goto error;
}
if (prop == ZFS_PROP_MOUNTPOINT &&
changelist_haszonedchild(cls[cl_idx])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if (cls[cl_idx] != NULL &&
(ret = changelist_prefix(cls[cl_idx])) != 0)
goto error;
cl_idx++;
}
assert(cl_idx == nvl_len);
2008-11-20 23:01:55 +03:00
/*
* Execute the corresponding ioctl() to set this list of properties.
2008-11-20 23:01:55 +03:00
*/
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if ((ret = zcmd_write_src_nvlist(hdl, &zc, nvl)) != 0 ||
(ret = zcmd_alloc_dst_nvlist(hdl, &zc, 0)) != 0)
2008-11-20 23:01:55 +03:00
goto error;
ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
2009-07-03 02:44:48 +04:00
2008-11-20 23:01:55 +03:00
if (ret != 0) {
/* Get the list of unset properties back and report them. */
nvlist_t *errorprops = NULL;
if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0)
goto error;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
zfs_setprop_error(hdl, prop, errno, errbuf);
}
nvlist_free(errorprops);
if (added_resv && errno == ENOSPC) {
/* clean up the volsize property we tried to set */
uint64_t old_volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
nvlist_free(nvl);
nvl = NULL;
zcmd_free_nvlists(&zc);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
goto error;
if (nvlist_add_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_VOLSIZE),
old_volsize) != 0)
goto error;
if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
goto error;
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
}
2008-11-20 23:01:55 +03:00
} else {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL) {
int clp_err = changelist_postfix(cls[cl_idx]);
if (clp_err != 0)
ret = clp_err;
}
}
2008-11-20 23:01:55 +03:00
Linux compat 2.6.39: mount_nodev() The .get_sb callback has been replaced by a .mount callback in the file_system_type structure. When using the new interface the caller must now use the mount_nodev() helper. Unfortunately, the new interface no longer passes the vfsmount down to the zfs layers. This poses a problem for the existing implementation because we currently save this pointer in the super block for latter use. It provides our only entry point in to the namespace layer for manipulating certain mount options. This needed to be done originally to allow commands like 'zfs set atime=off tank' to work properly. It also allowed me to keep more of the original Solaris code unmodified. Under Solaris there is a 1-to-1 mapping between a mount point and a file system so this is a fairly natural thing to do. However, under Linux they many be multiple entries in the namespace which reference the same filesystem. Thus keeping a back reference from the filesystem to the namespace is complicated. Rather than introduce some ugly hack to get the vfsmount and continue as before. I'm leveraging this API change to update the ZFS code to do things in a more natural way for Linux. This has the upside that is resolves the compatibility issue for the long term and fixes several other minor bugs which have been reported. This commit updates the code to remove this vfsmount back reference entirely. All modifications to filesystem mount options are now passed in to the kernel via a '-o remount'. This is the expected Linux mechanism and allows the namespace to properly handle any options which apply to it before passing them on to the file system itself. Aside from fixing the compatibility issue, removing the vfsmount has had the benefit of simplifying the code. This change which fairly involved has turned out nicely. Closes #246 Closes #217 Closes #187 Closes #248 Closes #231
2011-05-19 22:44:07 +04:00
if (ret == 0) {
/*
* Refresh the statistics so the new property
* value is reflected.
*/
2008-11-20 23:01:55 +03:00
(void) get_stats(zhp);
Linux compat 2.6.39: mount_nodev() The .get_sb callback has been replaced by a .mount callback in the file_system_type structure. When using the new interface the caller must now use the mount_nodev() helper. Unfortunately, the new interface no longer passes the vfsmount down to the zfs layers. This poses a problem for the existing implementation because we currently save this pointer in the super block for latter use. It provides our only entry point in to the namespace layer for manipulating certain mount options. This needed to be done originally to allow commands like 'zfs set atime=off tank' to work properly. It also allowed me to keep more of the original Solaris code unmodified. Under Solaris there is a 1-to-1 mapping between a mount point and a file system so this is a fairly natural thing to do. However, under Linux they many be multiple entries in the namespace which reference the same filesystem. Thus keeping a back reference from the filesystem to the namespace is complicated. Rather than introduce some ugly hack to get the vfsmount and continue as before. I'm leveraging this API change to update the ZFS code to do things in a more natural way for Linux. This has the upside that is resolves the compatibility issue for the long term and fixes several other minor bugs which have been reported. This commit updates the code to remove this vfsmount back reference entirely. All modifications to filesystem mount options are now passed in to the kernel via a '-o remount'. This is the expected Linux mechanism and allows the namespace to properly handle any options which apply to it before passing them on to the file system itself. Aside from fixing the compatibility issue, removing the vfsmount has had the benefit of simplifying the code. This change which fairly involved has turned out nicely. Closes #246 Closes #217 Closes #187 Closes #248 Closes #231
2011-05-19 22:44:07 +04:00
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
2008-11-20 23:01:55 +03:00
}
error:
nvlist_free(nvl);
zcmd_free_nvlists(&zc);
if (cls != NULL) {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL)
changelist_free(cls[cl_idx]);
}
free(cls);
}
2008-11-20 23:01:55 +03:00
return (ret);
}
/*
* Given a property, inherit the value from the parent dataset, or if received
* is TRUE, revert to the received value, if any.
2008-11-20 23:01:55 +03:00
*/
int
zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
2008-11-20 23:01:55 +03:00
{
zfs_cmd_t zc = {"\0"};
2008-11-20 23:01:55 +03:00
int ret;
prop_changelist_t *cl;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
zfs_prop_t prop;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
zc.zc_cookie = received;
2008-11-20 23:01:55 +03:00
if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
/*
* For user properties, the amount of work we have to do is very
* small, so just do it here.
*/
if (!zfs_prop_user(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
return (zfs_standard_error(hdl, errno, errbuf));
return (0);
}
/*
* Verify that this property is inheritable.
*/
if (zfs_prop_readonly(prop))
return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
if (!zfs_prop_inheritable(prop) && !received)
2008-11-20 23:01:55 +03:00
return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
/*
* Check to see if the value applies to this type
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
2008-11-20 23:01:55 +03:00
return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
/*
* Normalize the name, to get rid of shorthand abbreviations.
2008-11-20 23:01:55 +03:00
*/
propname = zfs_prop_to_name(prop);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Determine datasets which will be affected by this change, if any.
*/
if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
2008-11-20 23:01:55 +03:00
return (-1);
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) {
return (zfs_standard_error(hdl, errno, errbuf));
} else {
if ((ret = changelist_postfix(cl)) != 0)
goto error;
/*
* Refresh the statistics so the new property is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
2008-11-20 23:01:55 +03:00
}
error:
changelist_free(cl);
return (ret);
}
/*
* True DSL properties are stored in an nvlist. The following two functions
* extract them appropriately.
*/
Linux compat 2.6.39: mount_nodev() The .get_sb callback has been replaced by a .mount callback in the file_system_type structure. When using the new interface the caller must now use the mount_nodev() helper. Unfortunately, the new interface no longer passes the vfsmount down to the zfs layers. This poses a problem for the existing implementation because we currently save this pointer in the super block for latter use. It provides our only entry point in to the namespace layer for manipulating certain mount options. This needed to be done originally to allow commands like 'zfs set atime=off tank' to work properly. It also allowed me to keep more of the original Solaris code unmodified. Under Solaris there is a 1-to-1 mapping between a mount point and a file system so this is a fairly natural thing to do. However, under Linux they many be multiple entries in the namespace which reference the same filesystem. Thus keeping a back reference from the filesystem to the namespace is complicated. Rather than introduce some ugly hack to get the vfsmount and continue as before. I'm leveraging this API change to update the ZFS code to do things in a more natural way for Linux. This has the upside that is resolves the compatibility issue for the long term and fixes several other minor bugs which have been reported. This commit updates the code to remove this vfsmount back reference entirely. All modifications to filesystem mount options are now passed in to the kernel via a '-o remount'. This is the expected Linux mechanism and allows the namespace to properly handle any options which apply to it before passing them on to the file system itself. Aside from fixing the compatibility issue, removing the vfsmount has had the benefit of simplifying the code. This change which fairly involved has turned out nicely. Closes #246 Closes #217 Closes #187 Closes #248 Closes #231
2011-05-19 22:44:07 +04:00
uint64_t
2008-11-20 23:01:55 +03:00
getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
uint64_t value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
2009-07-03 02:44:48 +04:00
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
2008-11-20 23:01:55 +03:00
value = zfs_prop_default_numeric(prop);
*source = "";
}
return (value);
}
static char *
getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
char *value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
2009-07-03 02:44:48 +04:00
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
2008-11-20 23:01:55 +03:00
if ((value = (char *)zfs_prop_default_string(prop)) == NULL)
value = "";
*source = "";
}
return (value);
}
static boolean_t
zfs_is_recvd_props_mode(zfs_handle_t *zhp)
{
return (zhp->zfs_props == zhp->zfs_recvd_props);
}
static void
zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
*cookie = (uint64_t)(uintptr_t)zhp->zfs_props;
zhp->zfs_props = zhp->zfs_recvd_props;
}
static void
zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie;
*cookie = 0;
}
2008-11-20 23:01:55 +03:00
/*
* Internal function for getting a numeric property. Both zfs_prop_get() and
* zfs_prop_get_int() are built using this interface.
*
* Certain properties can be overridden using 'mount -o'. In this case, scan
* the contents of the /etc/mtab entry, searching for the appropriate options.
2008-11-20 23:01:55 +03:00
* If they differ from the on-disk values, report the current values and mark
* the source "temporary".
*/
static int
get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
char **source, uint64_t *val)
{
zfs_cmd_t zc = {"\0"};
2008-11-20 23:01:55 +03:00
nvlist_t *zplprops = NULL;
struct mnttab mnt;
char *mntopt_on = NULL;
char *mntopt_off = NULL;
boolean_t received = zfs_is_recvd_props_mode(zhp);
2008-11-20 23:01:55 +03:00
*source = NULL;
/*
* If the property is being fetched for a snapshot, check whether
* the property is valid for the snapshot's head dataset type.
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
}
2008-11-20 23:01:55 +03:00
switch (prop) {
case ZFS_PROP_ATIME:
mntopt_on = MNTOPT_ATIME;
mntopt_off = MNTOPT_NOATIME;
break;
case ZFS_PROP_RELATIME:
mntopt_on = MNTOPT_RELATIME;
mntopt_off = MNTOPT_NORELATIME;
break;
2008-11-20 23:01:55 +03:00
case ZFS_PROP_DEVICES:
mntopt_on = MNTOPT_DEVICES;
mntopt_off = MNTOPT_NODEVICES;
break;
case ZFS_PROP_EXEC:
mntopt_on = MNTOPT_EXEC;
mntopt_off = MNTOPT_NOEXEC;
break;
case ZFS_PROP_READONLY:
mntopt_on = MNTOPT_RO;
mntopt_off = MNTOPT_RW;
break;
case ZFS_PROP_SETUID:
mntopt_on = MNTOPT_SETUID;
mntopt_off = MNTOPT_NOSETUID;
break;
case ZFS_PROP_XATTR:
mntopt_on = MNTOPT_XATTR;
mntopt_off = MNTOPT_NOXATTR;
break;
case ZFS_PROP_NBMAND:
mntopt_on = MNTOPT_NBMAND;
mntopt_off = MNTOPT_NONBMAND;
break;
default:
break;
2008-11-20 23:01:55 +03:00
}
/*
* Because looking up the mount options is potentially expensive
* (iterating over all of /etc/mtab), we defer its calculation until
2008-11-20 23:01:55 +03:00
* we're looking up a property which requires its presence.
*/
if (!zhp->zfs_mntcheck &&
(mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
2009-01-16 00:59:39 +03:00
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
2008-11-20 23:01:55 +03:00
2009-01-16 00:59:39 +03:00
if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) {
zhp->zfs_mntopts = zfs_strdup(hdl,
2008-11-20 23:01:55 +03:00
entry.mnt_mntopts);
if (zhp->zfs_mntopts == NULL)
return (-1);
}
zhp->zfs_mntcheck = B_TRUE;
}
if (zhp->zfs_mntopts == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = zhp->zfs_mntopts;
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
2008-11-20 23:01:55 +03:00
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_READONLY:
case ZFS_PROP_SETUID:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
*val = getprop_uint64(zhp, prop, source);
if (received)
break;
2008-11-20 23:01:55 +03:00
if (hasmntopt(&mnt, mntopt_on) && !*val) {
*val = B_TRUE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
*val = B_FALSE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
}
break;
case ZFS_PROP_CANMOUNT:
case ZFS_PROP_VOLSIZE:
2008-11-20 23:01:55 +03:00
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
2008-11-20 23:01:55 +03:00
*val = getprop_uint64(zhp, prop, source);
if (*source == NULL) {
/* not default, must be local */
2008-11-20 23:01:55 +03:00
*source = zhp->zfs_name;
}
2008-11-20 23:01:55 +03:00
break;
case ZFS_PROP_MOUNTED:
*val = (zhp->zfs_mntopts != NULL);
break;
case ZFS_PROP_NUMCLONES:
*val = zhp->zfs_dmustats.dds_num_clones;
break;
case ZFS_PROP_VERSION:
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
case ZFS_PROP_CASE:
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
2008-11-20 23:01:55 +03:00
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
zcmd_free_nvlists(&zc);
if (prop == ZFS_PROP_VERSION &&
zhp->zfs_type == ZFS_TYPE_VOLUME)
*val = zfs_prop_default_numeric(prop);
2009-08-18 22:43:27 +04:00
return (-1);
2008-11-20 23:01:55 +03:00
}
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
val) != 0) {
zcmd_free_nvlists(&zc);
2009-08-18 22:43:27 +04:00
return (-1);
2008-11-20 23:01:55 +03:00
}
nvlist_free(zplprops);
2008-11-20 23:01:55 +03:00
zcmd_free_nvlists(&zc);
break;
case ZFS_PROP_INCONSISTENT:
*val = zhp->zfs_dmustats.dds_inconsistent;
break;
2008-11-20 23:01:55 +03:00
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
*val = getprop_uint64(zhp, prop, source);
/*
2009-07-03 02:44:48 +04:00
* If we tried to use a default value for a
* readonly property, it means that it was not
* present.
*/
if (zfs_prop_readonly(prop) &&
*source != NULL && (*source)[0] == '\0') {
*source = NULL;
}
2008-11-20 23:01:55 +03:00
break;
case PROP_TYPE_STRING:
default:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"cannot get non-numeric property"));
return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "internal error")));
}
}
return (0);
}
/*
* Calculate the source type, given the raw source string.
*/
static void
get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source,
char *statbuf, size_t statlen)
{
if (statbuf == NULL || *srctype == ZPROP_SRC_TEMPORARY)
return;
if (source == NULL) {
*srctype = ZPROP_SRC_NONE;
} else if (source[0] == '\0') {
*srctype = ZPROP_SRC_DEFAULT;
} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
*srctype = ZPROP_SRC_RECEIVED;
2008-11-20 23:01:55 +03:00
} else {
if (strcmp(source, zhp->zfs_name) == 0) {
*srctype = ZPROP_SRC_LOCAL;
} else {
(void) strlcpy(statbuf, source, statlen);
*srctype = ZPROP_SRC_INHERITED;
}
}
}
int
zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
size_t proplen, boolean_t literal)
{
zfs_prop_t prop;
int err = 0;
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (-1);
prop = zfs_name_to_prop(propname);
if (prop != ZPROP_INVAL) {
uint64_t cookie;
if (!nvlist_exists(zhp->zfs_recvd_props, propname))
return (-1);
zfs_set_recvd_props_mode(zhp, &cookie);
err = zfs_prop_get(zhp, prop, propbuf, proplen,
NULL, NULL, 0, literal);
zfs_unset_recvd_props_mode(zhp, &cookie);
} else {
nvlist_t *propval;
char *recvdval;
if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
propname, &propval) != 0)
return (-1);
verify(nvlist_lookup_string(propval, ZPROP_VALUE,
&recvdval) == 0);
(void) strlcpy(propbuf, recvdval, proplen);
}
return (err == 0 ? 0 : -1);
}
static int
get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
nvpair_t *pair;
value = zfs_get_clones_nvl(zhp);
if (value == NULL)
return (-1);
propbuf[0] = '\0';
for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
pair = nvlist_next_nvpair(value, pair)) {
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) strlcat(propbuf, nvpair_name(pair), proplen);
}
return (0);
}
struct get_clones_arg {
uint64_t numclones;
nvlist_t *value;
const char *origin;
char buf[ZFS_MAXNAMELEN];
};
int
get_clones_cb(zfs_handle_t *zhp, void *arg)
{
struct get_clones_arg *gca = arg;
if (gca->numclones == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
NULL, NULL, 0, B_TRUE) != 0)
goto out;
if (strcmp(gca->buf, gca->origin) == 0) {
fnvlist_add_boolean(gca->value, zfs_get_name(zhp));
gca->numclones--;
}
out:
(void) zfs_iter_children(zhp, get_clones_cb, gca);
zfs_close(zhp);
return (0);
}
nvlist_t *
zfs_get_clones_nvl(zfs_handle_t *zhp)
{
nvlist_t *nv, *value;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
struct get_clones_arg gca;
/*
* if this is a snapshot, then the kernel wasn't able
* to get the clones. Do it by slowly iterating.
*/
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
return (NULL);
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
nvlist_free(nv);
return (NULL);
}
gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
gca.value = value;
gca.origin = zhp->zfs_name;
if (gca.numclones != 0) {
zfs_handle_t *root;
char pool[ZFS_MAXNAMELEN];
char *cp = pool;
/* get the pool name */
(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
(void) strsep(&cp, "/@");
root = zfs_open(zhp->zfs_hdl, pool,
ZFS_TYPE_FILESYSTEM);
(void) get_clones_cb(root, &gca);
}
if (gca.numclones != 0 ||
nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
nvlist_add_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
nvlist_free(nv);
nvlist_free(value);
verify(0 == nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv));
}
verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0);
return (value);
}
2008-11-20 23:01:55 +03:00
/*
* Retrieve a property from the given object. If 'literal' is specified, then
* numbers are left as exact values. Otherwise, numbers are converted to a
* human-readable form.
*
* Returns 0 on success, or -1 on error.
*/
int
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
{
char *source = NULL;
uint64_t val;
char *str;
const char *strval;
boolean_t received = zfs_is_recvd_props_mode(zhp);
2008-11-20 23:01:55 +03:00
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
2008-11-20 23:01:55 +03:00
return (-1);
if (received && zfs_prop_readonly(prop))
return (-1);
2008-11-20 23:01:55 +03:00
if (src)
*src = ZPROP_SRC_NONE;
switch (prop) {
case ZFS_PROP_CREATION:
/*
* 'creation' is a time_t stored in the statistics. We convert
* this into a string unless 'literal' is specified.
*/
{
val = getprop_uint64(zhp, prop, &source);
time_t time = (time_t)val;
struct tm t;
if (literal ||
localtime_r(&time, &t) == NULL ||
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
&t) == 0)
Remove ZFC_IOC_*_MINOR ioctl()s Early versions of ZFS coordinated the creation and destruction of device minors from userspace. This was inherently racy and in late 2009 these ioctl()s were removed leaving everything up to the kernel. This significantly simplified the code. However, we never picked up these changes in ZoL since we'd already significantly adjusted this code for Linux. This patch aims to rectify that by finally removing ZFC_IOC_*_MINOR ioctl()s and moving all the functionality down in to the kernel. Since this cleanup will change the kernel/user ABI it's being done in the same tag as the previous libzfs_core ABI changes. This will minimize, but not eliminate, the disruption to end users. Once merged ZoL, Illumos, and FreeBSD will basically be back in sync in regards to handling ZVOLs in the common code. While each platform must have its own custom zvol.c implemenation the interfaces provided are consistent. NOTES: 1) This patch introduces one subtle change in behavior which could not be easily avoided. Prior to this change callers of 'zfs create -V ...' were guaranteed that upon exit the /dev/zvol/ block device link would be created or an error returned. That's no longer the case. The utilities will no longer block waiting for the symlink to be created. Callers are now responsible for blocking, this is why a 'udev_wait' call was added to the 'label' function in scripts/common.sh. 2) The read-only behavior of a ZVOL now solely depends on if the ZVOL_RDONLY bit is set in zv->zv_flags. The redundant policy setting in the gendisk structure was removed. This both simplifies the code and allows us to safely leverage set_disk_ro() to issue a KOBJ_CHANGE uevent. See the comment in the code for futher details on this. 3) Because __zvol_create_minor() and zvol_alloc() may now be called in a sync task they must use KM_PUSHPAGE. References: illumos/illumos-gate@681d9761e8516a7dc5ab6589e2dfe717777e1123 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #1969
2013-12-07 02:20:22 +04:00
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t) val);
2008-11-20 23:01:55 +03:00
}
break;
case ZFS_PROP_MOUNTPOINT:
/*
* Getting the precise mountpoint can be tricky.
*
* - for 'none' or 'legacy', return those values.
* - for inherited mountpoints, we want to take everything
* after our ancestor and append it to the inherited value.
*
* If the pool has an alternate root, we want to prepend that
* root to any values we return.
*/
2008-11-20 23:01:55 +03:00
str = getprop_string(zhp, prop, &source);
if (str[0] == '/') {
char buf[MAXPATHLEN];
char *root = buf;
const char *relpath;
2008-11-20 23:01:55 +03:00
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
relpath = zhp->zfs_name + strlen(source);
if (relpath[0] == '/')
relpath++;
}
if ((zpool_get_prop(zhp->zpool_hdl,
ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL,
B_FALSE)) || (strcmp(root, "-") == 0))
root[0] = '\0';
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
if (str[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
2008-11-20 23:01:55 +03:00
str++;
if (relpath[0] == '\0')
(void) snprintf(propbuf, proplen, "%s%s",
root, str);
else
(void) snprintf(propbuf, proplen, "%s%s%s%s",
root, str, relpath[0] == '@' ? "" : "/",
relpath);
} else {
/* 'legacy' or 'none' */
(void) strlcpy(propbuf, str, proplen);
}
break;
case ZFS_PROP_ORIGIN:
(void) strlcpy(propbuf, getprop_string(zhp, prop, &source),
proplen);
/*
* If there is no parent at all, return failure to indicate that
* it doesn't apply to this dataset.
*/
if (propbuf[0] == '\0')
return (-1);
break;
case ZFS_PROP_CLONES:
if (get_clones_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
2008-11-20 23:01:55 +03:00
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If quota or reservation is 0, we translate this into 'none'
* (unless literal is set), and indicate that it's the default
* value. Otherwise, we print the number nicely and indicate
* that its set locally.
*/
if (val == 0) {
if (literal)
(void) strlcpy(propbuf, "0", proplen);
else
(void) strlcpy(propbuf, "none", proplen);
} else {
if (literal)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
else
zfs_nicenum(val, propbuf, proplen);
}
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If limit is UINT64_MAX, we translate this into 'none' (unless
* literal is set), and indicate that it's the default value.
* Otherwise, we print the number nicely and indicate that it's
* set locally.
*/
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else if (val == UINT64_MAX) {
(void) strlcpy(propbuf, "none", proplen);
} else {
zfs_nicenum(val, propbuf, proplen);
}
break;
case ZFS_PROP_REFRATIO:
2008-11-20 23:01:55 +03:00
case ZFS_PROP_COMPRESSRATIO:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
(void) snprintf(propbuf, proplen, "%llu.%02llux",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
2008-11-20 23:01:55 +03:00
break;
case ZFS_PROP_TYPE:
switch (zhp->zfs_type) {
case ZFS_TYPE_FILESYSTEM:
str = "filesystem";
break;
case ZFS_TYPE_VOLUME:
str = "volume";
break;
case ZFS_TYPE_SNAPSHOT:
str = "snapshot";
break;
case ZFS_TYPE_BOOKMARK:
str = "bookmark";
break;
2008-11-20 23:01:55 +03:00
default:
abort();
}
(void) snprintf(propbuf, proplen, "%s", str);
break;
case ZFS_PROP_MOUNTED:
/*
* The 'mounted' property is a pseudo-property that described
* whether the filesystem is currently mounted. Even though
* it's a boolean value, the typical values of "on" and "off"
* don't make sense, so we translate to "yes" and "no".
*/
if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
src, &source, &val) != 0)
return (-1);
if (val)
(void) strlcpy(propbuf, "yes", proplen);
else
(void) strlcpy(propbuf, "no", proplen);
break;
case ZFS_PROP_NAME:
/*
* The 'name' property is a pseudo-property derived from the
* dataset name. It is presented as a real property to simplify
* consumers.
*/
(void) strlcpy(propbuf, zhp->zfs_name, proplen);
break;
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
m_label_t *new_sl = NULL;
char *ascii = NULL; /* human readable label */
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
if (literal || (strcasecmp(propbuf,
ZFS_MLSLABEL_DEFAULT) == 0))
break;
/*
* Try to translate the internal hex string to
* human-readable output. If there are any
* problems just use the hex string.
*/
if (str_to_label(propbuf, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1) {
m_label_free(new_sl);
break;
}
if (label_to_str(new_sl, &ascii, M_LABEL,
DEF_NAMES) != 0) {
if (ascii)
free(ascii);
m_label_free(new_sl);
break;
}
m_label_free(new_sl);
(void) strlcpy(propbuf, ascii, proplen);
free(ascii);
#else
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
#endif /* HAVE_MLSLABEL */
}
break;
case ZFS_PROP_GUID:
/*
* GUIDs are stored as numbers, but they are identifiers.
* We don't want them to be pretty printed, because pretty
* printing mangles the ID into a truncated and useless value.
*/
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
break;
2008-11-20 23:01:55 +03:00
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0)
return (-1);
if (literal)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
else
zfs_nicenum(val, propbuf, proplen);
break;
case PROP_TYPE_STRING:
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
break;
case PROP_TYPE_INDEX:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0)
return (-1);
if (zfs_prop_index_to_string(prop, val, &strval) != 0)
return (-1);
(void) strlcpy(propbuf, strval, proplen);
break;
default:
abort();
}
}
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
/*
* Utility function to get the given numeric property. Does no validation that
* the given property is the appropriate type; should only be used with
* hard-coded property types.
*/
uint64_t
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
{
char *source;
uint64_t val;
(void) get_numeric_property(zhp, prop, NULL, &source, &val);
return (val);
}
int
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
{
char buf[64];
2009-07-03 02:44:48 +04:00
(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
2008-11-20 23:01:55 +03:00
return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
}
/*
* Similar to zfs_prop_get(), but returns the value as an integer.
*/
int
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
zprop_source_t *src, char *statbuf, size_t statlen)
{
char *source;
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) {
2008-11-20 23:01:55 +03:00
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
zfs_prop_to_name(prop)));
}
if (src)
*src = ZPROP_SRC_NONE;
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
return (-1);
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
#ifdef HAVE_IDMAP
2009-07-03 02:44:48 +04:00
static int
idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
char **domainp, idmap_rid_t *ridp)
{
idmap_get_handle_t *get_hdl = NULL;
idmap_stat status;
int err = EINVAL;
if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
2009-07-03 02:44:48 +04:00
goto out;
if (isuser) {
err = idmap_get_sidbyuid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
} else {
err = idmap_get_sidbygid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
}
if (err == IDMAP_SUCCESS &&
idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
status == IDMAP_SUCCESS)
err = 0;
else
err = EINVAL;
out:
if (get_hdl)
idmap_get_destroy(get_hdl);
return (err);
}
#endif /* HAVE_IDMAP */
2009-07-03 02:44:48 +04:00
/*
* convert the propname into parameters needed by kernel
* Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
* Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
* Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234
* Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234
2009-07-03 02:44:48 +04:00
*/
static int
userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
{
zfs_userquota_prop_t type;
char *cp;
2009-07-03 02:44:48 +04:00
boolean_t isuser;
boolean_t isgroup;
struct passwd *pw;
struct group *gr;
2009-07-03 02:44:48 +04:00
domain[0] = '\0';
/* Figure out the property type ({user|group}{quota|space}) */
for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
if (strncmp(propname, zfs_userquota_prop_prefixes[type],
strlen(zfs_userquota_prop_prefixes[type])) == 0)
break;
}
if (type == ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
*typep = type;
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED);
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED);
2009-07-03 02:44:48 +04:00
cp = strchr(propname, '@') + 1;
if (isuser && (pw = getpwnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = pw->pw_uid;
} else if (isgroup && (gr = getgrnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = gr->gr_gid;
} else if (strchr(cp, '@')) {
#ifdef HAVE_IDMAP
2009-07-03 02:44:48 +04:00
/*
* It's a SID name (eg "user@domain") that needs to be
2009-08-18 22:43:27 +04:00
* turned into S-1-domainID-RID.
2009-07-03 02:44:48 +04:00
*/
2009-08-18 22:43:27 +04:00
directory_error_t e;
char *numericsid = NULL;
char *end;
2009-07-03 02:44:48 +04:00
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
2009-08-18 22:43:27 +04:00
if (isuser) {
e = directory_sid_from_user_name(NULL,
cp, &numericsid);
} else {
e = directory_sid_from_group_name(NULL,
cp, &numericsid);
}
if (e != NULL) {
directory_error_free(e);
2009-07-03 02:44:48 +04:00
return (ENOENT);
2009-08-18 22:43:27 +04:00
}
if (numericsid == NULL)
2009-07-03 02:44:48 +04:00
return (ENOENT);
2009-08-18 22:43:27 +04:00
cp = numericsid;
(void) strlcpy(domain, cp, domainlen);
2009-07-03 02:44:48 +04:00
cp = strrchr(domain, '-');
*cp = '\0';
cp++;
errno = 0;
*ridp = strtoull(cp, &end, 10);
free(numericsid);
2009-07-03 02:44:48 +04:00
if (errno != 0 || *end != '\0')
return (EINVAL);
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
2009-07-03 02:44:48 +04:00
} else {
/* It's a user/group ID (eg "12345"). */
uid_t id;
char *end;
id = strtoul(cp, &end, 10);
2009-07-03 02:44:48 +04:00
if (*end != '\0')
return (EINVAL);
if (id > MAXUID) {
#ifdef HAVE_IDMAP
2009-07-03 02:44:48 +04:00
/* It's an ephemeral ID. */
idmap_rid_t rid;
char *mapdomain;
2009-07-03 02:44:48 +04:00
if (idmap_id_to_numeric_domain_rid(id, isuser,
&mapdomain, &rid) != 0)
return (ENOENT);
2009-08-18 22:43:27 +04:00
(void) strlcpy(domain, mapdomain, domainlen);
2009-07-03 02:44:48 +04:00
*ridp = rid;
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
2009-07-03 02:44:48 +04:00
} else {
*ridp = id;
}
}
return (0);
}
static int
zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue, zfs_userquota_prop_t *typep)
{
int err;
zfs_cmd_t zc = {"\0"};
2009-07-03 02:44:48 +04:00
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
2009-07-03 02:44:48 +04:00
err = userquota_propname_decode(propname,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
zc.zc_objset_type = *typep;
if (err)
return (err);
err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_USERSPACE_ONE, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
zfs_userquota_prop_t type;
return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
&type));
}
int
zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
zfs_userquota_prop_t type;
err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
&type);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
Remove ZFC_IOC_*_MINOR ioctl()s Early versions of ZFS coordinated the creation and destruction of device minors from userspace. This was inherently racy and in late 2009 these ioctl()s were removed leaving everything up to the kernel. This significantly simplified the code. However, we never picked up these changes in ZoL since we'd already significantly adjusted this code for Linux. This patch aims to rectify that by finally removing ZFC_IOC_*_MINOR ioctl()s and moving all the functionality down in to the kernel. Since this cleanup will change the kernel/user ABI it's being done in the same tag as the previous libzfs_core ABI changes. This will minimize, but not eliminate, the disruption to end users. Once merged ZoL, Illumos, and FreeBSD will basically be back in sync in regards to handling ZVOLs in the common code. While each platform must have its own custom zvol.c implemenation the interfaces provided are consistent. NOTES: 1) This patch introduces one subtle change in behavior which could not be easily avoided. Prior to this change callers of 'zfs create -V ...' were guaranteed that upon exit the /dev/zvol/ block device link would be created or an error returned. That's no longer the case. The utilities will no longer block waiting for the symlink to be created. Callers are now responsible for blocking, this is why a 'udev_wait' call was added to the 'label' function in scripts/common.sh. 2) The read-only behavior of a ZVOL now solely depends on if the ZVOL_RDONLY bit is set in zv->zv_flags. The redundant policy setting in the gendisk structure was removed. This both simplifies the code and allows us to safely leverage set_disk_ro() to issue a KOBJ_CHANGE uevent. See the comment in the code for futher details on this. 3) Because __zvol_create_minor() and zvol_alloc() may now be called in a sync task they must use KM_PUSHPAGE. References: illumos/illumos-gate@681d9761e8516a7dc5ab6589e2dfe717777e1123 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #1969
2013-12-07 02:20:22 +04:00
(u_longlong_t)propvalue);
2009-07-03 02:44:48 +04:00
} else if (propvalue == 0 &&
(type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA)) {
(void) strlcpy(propbuf, "none", proplen);
} else {
zfs_nicenum(propvalue, propbuf, proplen);
}
return (0);
}
int
zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
2008-11-20 23:01:55 +03:00
{
int err;
zfs_cmd_t zc = {"\0"};
const char *snapname;
2008-11-20 23:01:55 +03:00
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
2008-11-20 23:01:55 +03:00
snapname = strchr(propname, '@') + 1;
if (strchr(snapname, '@')) {
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
} else {
/* snapname is the short name, append it to zhp's fsname */
char *cp;
(void) strlcpy(zc.zc_value, zhp->zfs_name,
sizeof (zc.zc_value));
cp = strchr(zc.zc_value, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(zc.zc_value, "@", sizeof (zc.zc_value));
(void) strlcat(zc.zc_value, snapname, sizeof (zc.zc_value));
}
2009-01-16 00:59:39 +03:00
err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_WRITTEN, &zc);
if (err)
return (err);
2009-01-16 00:59:39 +03:00
*propvalue = zc.zc_cookie;
return (0);
2009-01-16 00:59:39 +03:00
}
2008-11-20 23:01:55 +03:00
int
zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
2008-11-20 23:01:55 +03:00
{
int err;
uint64_t propvalue;
2008-11-20 23:01:55 +03:00
err = zfs_prop_get_written_int(zhp, propname, &propvalue);
2009-01-16 00:59:39 +03:00
if (err)
return (err);
2008-11-20 23:01:55 +03:00
if (literal) {
Remove ZFC_IOC_*_MINOR ioctl()s Early versions of ZFS coordinated the creation and destruction of device minors from userspace. This was inherently racy and in late 2009 these ioctl()s were removed leaving everything up to the kernel. This significantly simplified the code. However, we never picked up these changes in ZoL since we'd already significantly adjusted this code for Linux. This patch aims to rectify that by finally removing ZFC_IOC_*_MINOR ioctl()s and moving all the functionality down in to the kernel. Since this cleanup will change the kernel/user ABI it's being done in the same tag as the previous libzfs_core ABI changes. This will minimize, but not eliminate, the disruption to end users. Once merged ZoL, Illumos, and FreeBSD will basically be back in sync in regards to handling ZVOLs in the common code. While each platform must have its own custom zvol.c implemenation the interfaces provided are consistent. NOTES: 1) This patch introduces one subtle change in behavior which could not be easily avoided. Prior to this change callers of 'zfs create -V ...' were guaranteed that upon exit the /dev/zvol/ block device link would be created or an error returned. That's no longer the case. The utilities will no longer block waiting for the symlink to be created. Callers are now responsible for blocking, this is why a 'udev_wait' call was added to the 'label' function in scripts/common.sh. 2) The read-only behavior of a ZVOL now solely depends on if the ZVOL_RDONLY bit is set in zv->zv_flags. The redundant policy setting in the gendisk structure was removed. This both simplifies the code and allows us to safely leverage set_disk_ro() to issue a KOBJ_CHANGE uevent. See the comment in the code for futher details on this. 3) Because __zvol_create_minor() and zvol_alloc() may now be called in a sync task they must use KM_PUSHPAGE. References: illumos/illumos-gate@681d9761e8516a7dc5ab6589e2dfe717777e1123 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #1969
2013-12-07 02:20:22 +04:00
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else {
zfs_nicenum(propvalue, propbuf, proplen);
2008-11-20 23:01:55 +03:00
}
return (0);
2008-11-20 23:01:55 +03:00
}
/*
* Returns the name of the given zfs handle.
2008-11-20 23:01:55 +03:00
*/
const char *
zfs_get_name(const zfs_handle_t *zhp)
2008-11-20 23:01:55 +03:00
{
return (zhp->zfs_name);
}
2008-11-20 23:01:55 +03:00
/*
* Returns the type of the given zfs handle.
*/
zfs_type_t
zfs_get_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_type);
2008-11-20 23:01:55 +03:00
}
/*
* Is one dataset name a child dataset of another?
*
* Needs to handle these cases:
* Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo"
* Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar"
* Descendant? No. No. No. Yes.
*/
static boolean_t
is_descendant(const char *ds1, const char *ds2)
{
size_t d1len = strlen(ds1);
/* ds2 can't be a descendant if it's smaller */
if (strlen(ds2) < d1len)
return (B_FALSE);
/* otherwise, compare strings and verify that there's a '/' char */
return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
}
2008-11-20 23:01:55 +03:00
/*
* Given a complete name, return just the portion that refers to the parent.
* Will return -1 if there is no parent (path is just the name of the
* pool).
2008-11-20 23:01:55 +03:00
*/
static int
parent_name(const char *path, char *buf, size_t buflen)
{
char *slashp;
2008-11-20 23:01:55 +03:00
(void) strlcpy(buf, path, buflen);
2008-11-20 23:01:55 +03:00
if ((slashp = strrchr(buf, '/')) == NULL)
return (-1);
*slashp = '\0';
2008-11-20 23:01:55 +03:00
return (0);
}
/*
* If accept_ancestor is false, then check to make sure that the given path has
* a parent, and that it exists. If accept_ancestor is true, then find the
* closest existing ancestor for the given path. In prefixlen return the
* length of already existing prefix of the given path. We also fetch the
* 'zoned' property, which is used to validate property settings when creating
* new datasets.
*/
static int
check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
boolean_t accept_ancestor, int *prefixlen)
{
zfs_cmd_t zc = {"\0"};
2008-11-20 23:01:55 +03:00
char parent[ZFS_MAXNAMELEN];
char *slash;
zfs_handle_t *zhp;
char errbuf[1024];
uint64_t is_zoned;
2008-11-20 23:01:55 +03:00
2009-01-16 00:59:39 +03:00
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
2008-11-20 23:01:55 +03:00
/* get parent, and check to see if this is just a pool */
if (parent_name(path, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* check to see if the pool exists */
if ((slash = strchr(parent, '/')) == NULL)
slash = parent + strlen(parent);
(void) strncpy(zc.zc_name, parent, slash - parent);
zc.zc_name[slash - parent] = '\0';
if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* check to see if the parent dataset exists */
while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
if (errno == ENOENT && accept_ancestor) {
/*
* Go deeper to find an ancestor, give up on top level.
*/
if (parent_name(parent, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
} else if (errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent does not exist"));
return (zfs_error(hdl, EZFS_NOENT, errbuf));
} else
return (zfs_standard_error(hdl, errno, errbuf));
}
is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned != NULL)
*zoned = is_zoned;
2008-11-20 23:01:55 +03:00
/* we are in a non-global zone, but parent is in the global zone */
if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
2008-11-20 23:01:55 +03:00
(void) zfs_standard_error(hdl, EPERM, errbuf);
zfs_close(zhp);
return (-1);
}
/* make sure parent is a filesystem */
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent is not a filesystem"));
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (-1);
}
zfs_close(zhp);
if (prefixlen != NULL)
*prefixlen = strlen(parent);
return (0);
}
/*
* Finds whether the dataset of the given type(s) exists.
*/
boolean_t
zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
{
zfs_handle_t *zhp;
if (!zfs_validate_name(hdl, path, types, B_FALSE))
return (B_FALSE);
/*
* Try to get stats for the dataset, which will tell us if it exists.
*/
if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
int ds_type = zhp->zfs_type;
zfs_close(zhp);
if (types & ds_type)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a path to 'target', create all the ancestors between
* the prefixlen portion of the path, and the target itself.
* Fail if the initial prefixlen-ancestor does not already exist.
*/
int
create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
{
zfs_handle_t *h;
char *cp;
const char *opname;
/* make sure prefix exists */
cp = target + prefixlen;
if (*cp != '/') {
assert(strchr(cp, '/') == NULL);
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
} else {
*cp = '\0';
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
*cp = '/';
}
if (h == NULL)
return (-1);
zfs_close(h);
/*
* Attempt to create, mount, and share any ancestor filesystems,
* up to the prefixlen-long one.
*/
for (cp = target + prefixlen + 1;
(cp = strchr(cp, '/')); *cp = '/', cp++) {
2008-11-20 23:01:55 +03:00
*cp = '\0';
h = make_dataset_handle(hdl, target);
if (h) {
/* it already exists, nothing to do here */
zfs_close(h);
continue;
}
if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
NULL) != 0) {
opname = dgettext(TEXT_DOMAIN, "create");
goto ancestorerr;
}
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
if (h == NULL) {
opname = dgettext(TEXT_DOMAIN, "open");
goto ancestorerr;
}
if (zfs_mount(h, NULL, 0) != 0) {
opname = dgettext(TEXT_DOMAIN, "mount");
goto ancestorerr;
}
if (zfs_share(h) != 0) {
opname = dgettext(TEXT_DOMAIN, "share");
goto ancestorerr;
}
zfs_close(h);
}
return (0);
ancestorerr:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to %s ancestor '%s'"), opname, target);
return (-1);
}
/*
* Creates non-existing ancestors of the given path.
*/
int
zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
{
int prefix;
char *path_copy;
int rc = 0;
2008-11-20 23:01:55 +03:00
if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
2008-11-20 23:01:55 +03:00
return (-1);
if ((path_copy = strdup(path)) != NULL) {
rc = create_parents(hdl, path_copy, prefix);
free(path_copy);
}
if (path_copy == NULL || rc != 0)
return (-1);
return (0);
}
/*
* Create a new filesystem or volume.
*/
int
zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
nvlist_t *props)
{
int ret;
uint64_t size = 0;
uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
char errbuf[1024];
uint64_t zoned;
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
dmu_objset_type_t ost;
2008-11-20 23:01:55 +03:00
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/* validate the path, taking care to note the extended error message */
if (!zfs_validate_name(hdl, path, type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
return (-1);
/*
* The failure modes when creating a dataset of a different type over
* one that already exists is a little strange. In particular, if you
* try to create a dataset on top of an existing dataset, the ioctl()
* will return ENOENT, not EEXIST. To prevent this from happening, we
* first try to see if the dataset exists.
*/
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) {
2008-11-20 23:01:55 +03:00
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
}
if (type == ZFS_TYPE_VOLUME)
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
ost = DMU_OST_ZVOL;
2008-11-20 23:01:55 +03:00
else
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
ost = DMU_OST_ZFS;
2008-11-20 23:01:55 +03:00
/* open zpool handle for prop validation */
char pool_path[MAXNAMELEN];
(void) strlcpy(pool_path, path, sizeof (pool_path));
/* truncate pool_path at first slash */
char *p = strchr(pool_path, '/');
if (p != NULL)
*p = '\0';
zpool_handle_t *zpool_handle = zpool_open(hdl, pool_path);
if (props && (props = zfs_valid_proplist(hdl, type, props,
zoned, NULL, zpool_handle, errbuf)) == 0) {
zpool_close(zpool_handle);
2008-11-20 23:01:55 +03:00
return (-1);
}
zpool_close(zpool_handle);
2008-11-20 23:01:55 +03:00
if (type == ZFS_TYPE_VOLUME) {
/*
* If we are creating a volume, the size and block size must
* satisfy a few restraints. First, the blocksize must be a
* valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
* volsize must be a multiple of the block size, and cannot be
* zero.
*/
if (props == NULL || nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if ((ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&blocksize)) != 0) {
if (ret == ENOENT) {
blocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
} else {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume block size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
if (size == 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size cannot be zero"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (size % blocksize != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size must be a multiple of volume block "
"size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
/* create the dataset */
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
ret = lzc_create(path, ost, props);
nvlist_free(props);
2008-11-20 23:01:55 +03:00
/* check for failure */
if (ret != 0) {
char parent[ZFS_MAXNAMELEN];
(void) parent_name(path, parent, sizeof (parent));
switch (errno) {
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EINVAL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), parent);
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to set this "
"property or value"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
#ifdef _ILP32
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
if (type == ZFS_TYPE_VOLUME)
return (zfs_error(hdl, EZFS_VOLTOOBIG,
errbuf));
#endif
/* FALLTHROUGH */
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (0);
}
/*
* Destroys the given dataset. The caller must make sure that the filesystem
* isn't mounted, and that there are no active dependents. If the file system
* does not exist this function does nothing.
2008-11-20 23:01:55 +03:00
*/
int
2009-08-18 22:43:27 +04:00
zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
2008-11-20 23:01:55 +03:00
{
zfs_cmd_t zc = {"\0"};
2008-11-20 23:01:55 +03:00
if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
int error = lzc_destroy_bookmarks(nv, NULL);
fnvlist_free(nv);
if (error != 0) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
return (0);
}
2008-11-20 23:01:55 +03:00
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (ZFS_IS_VOLUME(zhp)) {
zc.zc_objset_type = DMU_OST_ZVOL;
} else {
zc.zc_objset_type = DMU_OST_ZFS;
}
2009-08-18 22:43:27 +04:00
zc.zc_defer_destroy = defer;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY, &zc) != 0 &&
errno != ENOENT) {
2008-11-20 23:01:55 +03:00
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
remove_mountpoint(zhp);
return (0);
}
struct destroydata {
nvlist_t *nvl;
const char *snapname;
2008-11-20 23:01:55 +03:00
};
static int
zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
2008-11-20 23:01:55 +03:00
{
struct destroydata *dd = arg;
char name[ZFS_MAXNAMELEN];
int rv = 0;
2008-11-20 23:01:55 +03:00
(void) snprintf(name, sizeof (name),
"%s@%s", zhp->zfs_name, dd->snapname);
2008-11-20 23:01:55 +03:00
if (lzc_exists(name))
verify(nvlist_add_boolean(dd->nvl, name) == 0);
2008-11-20 23:01:55 +03:00
rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd);
zfs_close(zhp);
2008-11-20 23:01:55 +03:00
return (rv);
}
/*
* Destroys all snapshots with the given name in zhp & descendants.
*/
int
2009-08-18 22:43:27 +04:00
zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
2008-11-20 23:01:55 +03:00
{
int ret;
struct destroydata dd = { 0 };
dd.snapname = snapname;
verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0);
(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
2008-11-20 23:01:55 +03:00
if (nvlist_empty(dd.nvl)) {
ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
2008-11-20 23:01:55 +03:00
dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
zhp->zfs_name, snapname);
} else {
ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer);
2008-11-20 23:01:55 +03:00
}
nvlist_free(dd.nvl);
return (ret);
}
/*
* Destroys all the snapshots named in the nvlist.
*/
int
zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer)
{
int ret;
nvlist_t *errlist = NULL;
nvpair_t *pair;
2008-11-20 23:01:55 +03:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
ret = lzc_destroy_snaps(snaps, defer, &errlist);
2008-11-20 23:01:55 +03:00
if (ret == 0) {
nvlist_free(errlist);
return (0);
}
2008-11-20 23:01:55 +03:00
if (nvlist_empty(errlist)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshots"));
ret = zfs_standard_error(hdl, ret, errbuf);
}
for (pair = nvlist_next_nvpair(errlist, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"),
nvpair_name(pair));
switch (fnvpair_value_int32(pair)) {
case EEXIST:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "snapshot is cloned"));
ret = zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
ret = zfs_standard_error(hdl, errno, errbuf);
break;
2008-11-20 23:01:55 +03:00
}
}
nvlist_free(errlist);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
return (ret);
2008-11-20 23:01:55 +03:00
}
/*
* Clones the given dataset. The target must be of the same type as the source.
*/
int
zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
{
char parent[ZFS_MAXNAMELEN];
int ret;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t zoned;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), target);
/* validate the target/clone name */
2008-11-20 23:01:55 +03:00
if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
/* do the clone */
if (props) {
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
zfs_type_t type;
if (ZFS_IS_VOLUME(zhp)) {
type = ZFS_TYPE_VOLUME;
} else {
type = ZFS_TYPE_FILESYSTEM;
}
if ((props = zfs_valid_proplist(hdl, type, props, zoned,
zhp, zhp->zpool_hdl, errbuf)) == NULL)
2008-11-20 23:01:55 +03:00
return (-1);
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
ret = lzc_clone(target, zhp->zfs_name, props);
nvlist_free(props);
2008-11-20 23:01:55 +03:00
if (ret != 0) {
switch (errno) {
case ENOENT:
/*
* The parent doesn't exist. We should have caught this
* above, but there may a race condition that has since
* destroyed the parent.
*
* At this point, we don't know whether it's the source
* that doesn't exist anymore, or whether the target
* dataset doesn't exist.
*/
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
case EXDEV:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"source and target pools differ"));
return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
errbuf));
default:
return (zfs_standard_error(zhp->zfs_hdl, errno,
errbuf));
}
}
return (ret);
}
/*
* Promotes the given clone fs to be the clone parent.
*/
int
zfs_promote(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zfs_cmd_t zc = {"\0"};
2008-11-20 23:01:55 +03:00
char parent[MAXPATHLEN];
int ret;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot promote '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots can not be promoted"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
(void) strlcpy(parent, zhp->zfs_dmustats.dds_origin, sizeof (parent));
if (parent[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a cloned filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
(void) strlcpy(zc.zc_value, zhp->zfs_dmustats.dds_origin,
sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
ret = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (ret != 0) {
int save_errno = errno;
switch (save_errno) {
case EEXIST:
Remove ZFC_IOC_*_MINOR ioctl()s Early versions of ZFS coordinated the creation and destruction of device minors from userspace. This was inherently racy and in late 2009 these ioctl()s were removed leaving everything up to the kernel. This significantly simplified the code. However, we never picked up these changes in ZoL since we'd already significantly adjusted this code for Linux. This patch aims to rectify that by finally removing ZFC_IOC_*_MINOR ioctl()s and moving all the functionality down in to the kernel. Since this cleanup will change the kernel/user ABI it's being done in the same tag as the previous libzfs_core ABI changes. This will minimize, but not eliminate, the disruption to end users. Once merged ZoL, Illumos, and FreeBSD will basically be back in sync in regards to handling ZVOLs in the common code. While each platform must have its own custom zvol.c implemenation the interfaces provided are consistent. NOTES: 1) This patch introduces one subtle change in behavior which could not be easily avoided. Prior to this change callers of 'zfs create -V ...' were guaranteed that upon exit the /dev/zvol/ block device link would be created or an error returned. That's no longer the case. The utilities will no longer block waiting for the symlink to be created. Callers are now responsible for blocking, this is why a 'udev_wait' call was added to the 'label' function in scripts/common.sh. 2) The read-only behavior of a ZVOL now solely depends on if the ZVOL_RDONLY bit is set in zv->zv_flags. The redundant policy setting in the gendisk structure was removed. This both simplifies the code and allows us to safely leverage set_disk_ro() to issue a KOBJ_CHANGE uevent. See the comment in the code for futher details on this. 3) Because __zvol_create_minor() and zvol_alloc() may now be called in a sync task they must use KM_PUSHPAGE. References: illumos/illumos-gate@681d9761e8516a7dc5ab6589e2dfe717777e1123 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #1969
2013-12-07 02:20:22 +04:00
/* There is a conflicting snapshot name. */
2008-11-20 23:01:55 +03:00
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"conflicting snapshot '%s' from parent '%s'"),
zc.zc_string, parent);
2008-11-20 23:01:55 +03:00
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
default:
return (zfs_standard_error(hdl, save_errno, errbuf));
}
}
2008-11-20 23:01:55 +03:00
return (ret);
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
typedef struct snapdata {
nvlist_t *sd_nvl;
const char *sd_snapname;
} snapdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snapdata_t *sd = arg;
char name[ZFS_MAXNAMELEN];
int rv = 0;
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) {
(void) snprintf(name, sizeof (name),
"%s@%s", zfs_get_name(zhp), sd->sd_snapname);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
fnvlist_add_boolean(sd->sd_nvl, name);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
zfs_close(zhp);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
return (rv);
}
2008-11-20 23:01:55 +03:00
/*
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
* Creates snapshots. The keys in the snaps nvlist are the snapshots to be
* created.
2008-11-20 23:01:55 +03:00
*/
int
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props)
2008-11-20 23:01:55 +03:00
{
int ret;
char errbuf[1024];
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
nvpair_t *elem;
nvlist_t *errors;
2008-11-20 23:01:55 +03:00
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
"cannot create snapshots "));
2008-11-20 23:01:55 +03:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
elem = NULL;
while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) {
const char *snapname = nvpair_name(elem);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
/* validate the target name */
if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT,
B_TRUE)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), snapname);
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
/*
* get pool handle for prop validation. assumes all snaps are in the
* same pool, as does lzc_snapshot (below).
*/
char pool[MAXNAMELEN];
elem = nvlist_next_nvpair(snaps, NULL);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
zpool_handle_t *zpool_hdl = zpool_open(hdl, pool);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
if (props != NULL &&
(props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
props, B_FALSE, NULL, zpool_hdl, errbuf)) == NULL) {
zpool_close(zpool_hdl);
2008-11-20 23:01:55 +03:00
return (-1);
}
zpool_close(zpool_hdl);
2008-11-20 23:01:55 +03:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
ret = lzc_snapshot(snaps, props, &errors);
2008-11-20 23:01:55 +03:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
if (ret != 0) {
boolean_t printed = B_FALSE;
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), nvpair_name(elem));
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
printed = B_TRUE;
}
if (!printed) {
switch (ret) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple snapshots of same "
"fs not allowed"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
}
2008-11-20 23:01:55 +03:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
nvlist_free(props);
nvlist_free(errors);
return (ret);
}
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
int
zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
nvlist_t *props)
{
int ret;
snapdata_t sd = { 0 };
char fsname[ZFS_MAXNAMELEN];
char *cp;
zfs_handle_t *zhp;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot snapshot %s"), path);
if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
Remove ZFC_IOC_*_MINOR ioctl()s Early versions of ZFS coordinated the creation and destruction of device minors from userspace. This was inherently racy and in late 2009 these ioctl()s were removed leaving everything up to the kernel. This significantly simplified the code. However, we never picked up these changes in ZoL since we'd already significantly adjusted this code for Linux. This patch aims to rectify that by finally removing ZFC_IOC_*_MINOR ioctl()s and moving all the functionality down in to the kernel. Since this cleanup will change the kernel/user ABI it's being done in the same tag as the previous libzfs_core ABI changes. This will minimize, but not eliminate, the disruption to end users. Once merged ZoL, Illumos, and FreeBSD will basically be back in sync in regards to handling ZVOLs in the common code. While each platform must have its own custom zvol.c implemenation the interfaces provided are consistent. NOTES: 1) This patch introduces one subtle change in behavior which could not be easily avoided. Prior to this change callers of 'zfs create -V ...' were guaranteed that upon exit the /dev/zvol/ block device link would be created or an error returned. That's no longer the case. The utilities will no longer block waiting for the symlink to be created. Callers are now responsible for blocking, this is why a 'udev_wait' call was added to the 'label' function in scripts/common.sh. 2) The read-only behavior of a ZVOL now solely depends on if the ZVOL_RDONLY bit is set in zv->zv_flags. The redundant policy setting in the gendisk structure was removed. This both simplifies the code and allows us to safely leverage set_disk_ro() to issue a KOBJ_CHANGE uevent. See the comment in the code for futher details on this. 3) Because __zvol_create_minor() and zvol_alloc() may now be called in a sync task they must use KM_PUSHPAGE. References: illumos/illumos-gate@681d9761e8516a7dc5ab6589e2dfe717777e1123 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Ned Bass <bass6@llnl.gov> Signed-off-by: Tim Chase <tim@chase2k.com> Closes #1969
2013-12-07 02:20:22 +04:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
(void) strlcpy(fsname, path, sizeof (fsname));
cp = strchr(fsname, '@');
*cp = '\0';
sd.sd_snapname = cp + 1;
2008-11-20 23:01:55 +03:00
Illumos #2882, #2883, #2900 2882 implement libzfs_core 2883 changing "canmount" property to "on" should not always remount dataset 2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once Reviewed by: George Wilson <george.wilson@delphix.com> Reviewed by: Chris Siden <christopher.siden@delphix.com> Reviewed by: Garrett D'Amore <garrett@damore.org> Reviewed by: Bill Pijewski <wdp@joyent.com> Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com> Approved by: Eric Schrock <Eric.Schrock@delphix.com> References: https://www.illumos.org/issues/2882 https://www.illumos.org/issues/2883 https://www.illumos.org/issues/2900 illumos/illumos-gate@4445fffbbb1ea25fd0e9ea68b9380dd7a6709025 Ported-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1293 Porting notes: WARNING: This patch changes the user/kernel ABI. That means that the zfs/zpool utilities built from master are NOT compatible with the 0.6.2 kernel modules. Ensure you load the matching kernel modules from master after updating the utilities. Otherwise the zfs/zpool commands will be unable to interact with your pool and you will see errors similar to the following: $ zpool list failed to read pool configuration: bad address no pools available $ zfs list no datasets available Add zvol minor device creation to the new zfs_snapshot_nvl function. Remove the logging of the "release" operation in dsl_dataset_user_release_sync(). The logging caused a null dereference because ds->ds_dir is zeroed in dsl_dataset_destroy_sync() and the logging functions try to get the ds name via the dsl_dataset_name() function. I've got no idea why this particular code would have worked in Illumos. This code has subsequently been completely reworked in Illumos commit 3b2aab1 (3464 zfs synctask code needs restructuring). Squash some "may be used uninitialized" warning/erorrs. Fix some printf format warnings for %lld and %llu. Apply a few spa_writeable() changes that were made to Illumos in illumos/illumos-gate.git@cd1c8b8 as part of the 3112, 3113, 3114 and 3115 fixes. Add a missing call to fnvlist_free(nvl) in log_internal() that was added in Illumos to fix issue 3085 but couldn't be ported to ZoL at the time (zfsonlinux/zfs@9e11c73) because it depended on future work.
2013-08-28 15:45:09 +04:00
if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
return (-1);
}
verify(nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) == 0);
if (recursive) {
(void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd);
} else {
fnvlist_add_boolean(sd.sd_nvl, path);
}
ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
zfs_close(zhp);
2008-11-20 23:01:55 +03:00
return (ret);
}
/*
* Destroy any more recent snapshots. We invoke this callback on any dependents
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
* is a dependent and we should just destroy it without checking the transaction
* group.
*/
typedef struct rollback_data {
const char *cb_target; /* the snapshot */
uint64_t cb_create; /* creation time reference */
boolean_t cb_error;
boolean_t cb_force;
} rollback_data_t;
static int
rollback_destroy_dependent(zfs_handle_t *zhp, void *data)
2008-11-20 23:01:55 +03:00
{
rollback_data_t *cbp = data;
prop_changelist_t *clp;
/* We must destroy this clone; first unmount it */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
cbp->cb_force ? MS_FORCE: 0);
if (clp == NULL || changelist_prefix(clp) != 0) {
cbp->cb_error = B_TRUE;
zfs_close(zhp);
return (0);
}
if (zfs_destroy(zhp, B_FALSE) != 0)
cbp->cb_error = B_TRUE;
else
changelist_remove(clp, zhp->zfs_name);
(void) changelist_postfix(clp);
changelist_free(clp);
2008-11-20 23:01:55 +03:00
zfs_close(zhp);
return (0);
}
2008-11-20 23:01:55 +03:00
static int
rollback_destroy(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
2008-11-20 23:01:55 +03:00
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE,
rollback_destroy_dependent, cbp);
2008-11-20 23:01:55 +03:00
cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
2008-11-20 23:01:55 +03:00
}
zfs_close(zhp);
return (0);
}
/*
* Given a dataset, rollback to a specific snapshot, discarding any
* data changes since then and making it the active dataset.
*
* Any snapshots and bookmarks more recent than the target are
* destroyed, along with their dependents (i.e. clones).
2008-11-20 23:01:55 +03:00
*/
int
zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
{
rollback_data_t cb = { 0 };
int err;
boolean_t restore_resv = 0;
uint64_t old_volsize = 0, new_volsize;
zfs_prop_t resv_prop = { 0 };
2008-11-20 23:01:55 +03:00
assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
zhp->zfs_type == ZFS_TYPE_VOLUME);
/*
* Destroy all recent snapshots and their dependents.
2008-11-20 23:01:55 +03:00
*/
cb.cb_force = force;
cb.cb_target = snap->zfs_name;
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
(void) zfs_iter_snapshots(zhp, B_FALSE, rollback_destroy, &cb);
(void) zfs_iter_bookmarks(zhp, rollback_destroy, &cb);
2008-11-20 23:01:55 +03:00
if (cb.cb_error)
return (-1);
/*
* Now that we have verified that the snapshot is the latest,
* rollback to the given snapshot.
*/
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
restore_resv =
(old_volsize == zfs_prop_get_int(zhp, resv_prop));
}
/*
* We rely on zfs_iter_children() to verify that there are no
* newer snapshots for the given dataset. Therefore, we can
* simply pass the name on to the ioctl() call. There is still
* an unlikely race condition where the user has taken a
* snapshot since we verified that this was the most recent.
*/
err = lzc_rollback(zhp->zfs_name, NULL, 0);
if (err != 0) {
2008-11-20 23:01:55 +03:00
(void) zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
zhp->zfs_name);
return (err);
}
/*
* For volumes, if the pre-rollback volsize matched the pre-
* rollback reservation and the volsize has changed then set
* the reservation property to the post-rollback volsize.
* Make a new handle since the rollback closed the dataset.
*/
if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
(zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
if (restore_resv) {
new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (old_volsize != new_volsize)
err = zfs_prop_set_int(zhp, resv_prop,
new_volsize);
}
zfs_close(zhp);
}
return (err);
}
/*
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target, boolean_t recursive,
boolean_t force_unmount)
2008-11-20 23:01:55 +03:00
{
int ret;
zfs_cmd_t zc = {"\0"};
2008-11-20 23:01:55 +03:00
char *delim;
prop_changelist_t *cl = NULL;
zfs_handle_t *zhrp = NULL;
char *parentname = NULL;
char parent[ZFS_MAXNAMELEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
/* if we have the same exact name, just return success */
if (strcmp(zhp->zfs_name, target) == 0)
return (0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), target);
/*
* Make sure the target name is valid
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
if ((strchr(target, '@') == NULL) ||
*target == '@') {
/*
* Snapshot target name is abbreviated,
* reconstruct full dataset name
*/
(void) strlcpy(parent, zhp->zfs_name,
sizeof (parent));
delim = strchr(parent, '@');
if (strchr(target, '@') == NULL)
*(++delim) = '\0';
else
*delim = '\0';
(void) strlcat(parent, target, sizeof (parent));
target = parent;
} else {
/*
* Make sure we're renaming within the same dataset.
*/
delim = strchr(target, '@');
if (strncmp(zhp->zfs_name, target, delim - target)
!= 0 || zhp->zfs_name[delim - target] != '@') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots must be part of same "
"dataset"));
return (zfs_error(hdl, EZFS_CROSSTARGET,
errbuf));
}
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents */
if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
2008-11-20 23:01:55 +03:00
return (-1);
/* make sure we're in the same pool */
verify((delim = strchr(target, '/')) != NULL);
if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
zhp->zfs_name[delim - target] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"datasets must be within same pool"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
/* new name cannot be a child of the current dataset name */
if (is_descendant(zhp->zfs_name, target)) {
2008-11-20 23:01:55 +03:00
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"New dataset name cannot be a descendant of "
2008-11-20 23:01:55 +03:00
"current dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
if (getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
if (recursive) {
parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
if (parentname == NULL) {
ret = -1;
goto error;
}
delim = strchr(parentname, '@');
*delim = '\0';
zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET);
if (zhrp == NULL) {
ret = -1;
goto error;
}
} else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0,
force_unmount ? MS_FORCE : 0)) == NULL)
2008-11-20 23:01:55 +03:00
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
ret = -1;
2008-11-20 23:01:55 +03:00
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
zc.zc_objset_type = DMU_OST_ZFS;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = recursive;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename '%s'"), zc.zc_name);
if (recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
if (cl != NULL)
2008-11-20 23:01:55 +03:00
(void) changelist_postfix(cl);
} else {
if (cl != NULL) {
2008-11-20 23:01:55 +03:00
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
}
error:
if (parentname != NULL) {
2008-11-20 23:01:55 +03:00
free(parentname);
}
if (zhrp != NULL) {
2008-11-20 23:01:55 +03:00
zfs_close(zhrp);
}
if (cl != NULL) {
2008-11-20 23:01:55 +03:00
changelist_free(cl);
}
return (ret);
}
nvlist_t *
zfs_get_user_props(zfs_handle_t *zhp)
2008-11-20 23:01:55 +03:00
{
return (zhp->zfs_user_props);
2008-11-20 23:01:55 +03:00
}
/*
* This function is used by 'zfs list' to determine the exact set of columns to
* display, and their maximum widths. This does two main things:
*
* - If this is a list of all properties, then expand the list to include
* all native properties, and set a flag so that for each dataset we look
* for new unique user properties and add them to the list.
*
* - For non fixed-width properties, keep track of the maximum width seen
* so that we can size the column appropriately. If the user has
* requested received property values, we also need to compute the width
* of the RECEIVED column.
2008-11-20 23:01:55 +03:00
*/
int
zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received,
boolean_t literal)
2008-11-20 23:01:55 +03:00
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zprop_list_t *entry;
zprop_list_t **last, **start;
nvlist_t *userprops, *propval;
nvpair_t *elem;
char *strval;
char buf[ZFS_MAXPROPLEN];
if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
return (-1);
userprops = zfs_get_user_props(zhp);
entry = *plp;
if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
/*
* Go through and add any user properties as necessary. We
* start by incrementing our list pointer to the first
* non-native property.
*/
start = plp;
while (*start != NULL) {
if ((*start)->pl_prop == ZPROP_INVAL)
break;
start = &(*start)->pl_next;
}
elem = NULL;
while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
/*
* See if we've already found this property in our list.
*/
for (last = start; *last != NULL;
last = &(*last)->pl_next) {
if (strcmp((*last)->pl_user_prop,
nvpair_name(elem)) == 0)
break;
}
if (*last == NULL) {
if ((entry = zfs_alloc(hdl,
sizeof (zprop_list_t))) == NULL ||
((entry->pl_user_prop = zfs_strdup(hdl,
nvpair_name(elem)))) == NULL) {
free(entry);
return (-1);
}
entry->pl_prop = ZPROP_INVAL;
entry->pl_width = strlen(nvpair_name(elem));
entry->pl_all = B_TRUE;
*last = entry;
}
}
}
/*
* Now go through and check the width of any non-fixed columns
*/
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
2008-11-20 23:01:55 +03:00
continue;
if (entry->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, entry->pl_prop,
buf, sizeof (buf), NULL, NULL, 0, literal) == 0) {
2008-11-20 23:01:55 +03:00
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (received && zfs_prop_get_recvd(zhp,
zfs_prop_to_name(entry->pl_prop),
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
} else {
if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
&propval) == 0) {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
if (strlen(strval) > entry->pl_width)
entry->pl_width = strlen(strval);
}
if (received && zfs_prop_get_recvd(zhp,
entry->pl_user_prop,
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
2008-11-20 23:01:55 +03:00
}
}
return (0);
}
2009-07-03 02:44:48 +04:00
void
zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
{
nvpair_t *curr;
nvpair_t *next;
2009-07-03 02:44:48 +04:00
/*
* Keep a reference to the props-table against which we prune the
* properties.
*/
zhp->zfs_props_table = props;
curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
while (curr) {
zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
next = nvlist_next_nvpair(zhp->zfs_props, curr);
2009-07-03 02:44:48 +04:00
/*
* User properties will result in ZPROP_INVAL, and since we
* only know how to prune standard ZFS properties, we always
* leave these in the list. This can also happen if we
* encounter an unknown DSL property (when running older
* software, for example).
2009-07-03 02:44:48 +04:00
*/
if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE)
(void) nvlist_remove(zhp->zfs_props,
nvpair_name(curr), nvpair_type(curr));
curr = next;
}
}
static int
zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
{
zfs_cmd_t zc = {"\0"};
2009-07-03 02:44:48 +04:00
nvlist_t *nvlist = NULL;
int error;
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
zc.zc_cookie = (uint64_t)cmd;
if (cmd == ZFS_SMB_ACL_RENAME) {
if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (0);
2009-07-03 02:44:48 +04:00
}
}
switch (cmd) {
case ZFS_SMB_ACL_ADD:
case ZFS_SMB_ACL_REMOVE:
(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
break;
case ZFS_SMB_ACL_RENAME:
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
resource1) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
resource2) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) {
nvlist_free(nvlist);
return (-1);
}
break;
case ZFS_SMB_ACL_PURGE:
break;
default:
return (-1);
}
error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
nvlist_free(nvlist);
2009-07-03 02:44:48 +04:00
return (error);
}
int
zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
resource, NULL));
}
int
zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
resource, NULL));
}
int
zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
NULL, NULL));
}
int
zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
char *oldname, char *newname)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
oldname, newname));
}
int
zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
zfs_userspace_cb_t func, void *arg)
{
zfs_cmd_t zc = {"\0"};
2009-07-03 02:44:48 +04:00
zfs_useracct_t buf[100];
libzfs_handle_t *hdl = zhp->zfs_hdl;
int ret;
2009-07-03 02:44:48 +04:00
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
2009-07-03 02:44:48 +04:00
zc.zc_objset_type = type;
zc.zc_nvlist_dst = (uintptr_t)buf;
for (;;) {
2009-07-03 02:44:48 +04:00
zfs_useracct_t *zua = buf;
zc.zc_nvlist_dst_size = sizeof (buf);
if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
char errbuf[ZFS_MAXNAMELEN + 32];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot get used/quota for %s"), zc.zc_name);
return (zfs_standard_error_fmt(hdl, errno, errbuf));
}
if (zc.zc_nvlist_dst_size == 0)
2009-07-03 02:44:48 +04:00
break;
while (zc.zc_nvlist_dst_size > 0) {
if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
zua->zu_space)) != 0)
return (ret);
2009-07-03 02:44:48 +04:00
zua++;
zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
}
}
return (0);
2009-07-03 02:44:48 +04:00
}
2009-08-18 22:43:27 +04:00
struct holdarg {
nvlist_t *nvl;
const char *snapname;
const char *tag;
boolean_t recursive;
int error;
};
static int
zfs_hold_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAXNAMELEN];
int rv = 0;
(void) snprintf(name, sizeof (name),
"%s@%s", zhp->zfs_name, ha->snapname);
if (lzc_exists(name))
fnvlist_add_string(ha->nvl, name, ha->tag);
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_hold_one, ha);
zfs_close(zhp);
return (rv);
}
2009-08-18 22:43:27 +04:00
int
zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive, int cleanup_fd)
2009-08-18 22:43:27 +04:00
{
int ret;
struct holdarg ha;
2009-08-18 22:43:27 +04:00
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
(void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
char errbuf[1024];
fnvlist_free(ha.nvl);
ret = ENOENT;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s@%s'"),
zhp->zfs_name, snapname);
(void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
return (ret);
}
ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl);
fnvlist_free(ha.nvl);
return (ret);
}
int
zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds)
{
int ret;
nvlist_t *errors;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
nvpair_t *elem;
errors = NULL;
ret = lzc_hold(holds, cleanup_fd, &errors);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
2009-08-18 22:43:27 +04:00
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot hold"));
switch (ret) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
2009-08-18 22:43:27 +04:00
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s'"), nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case E2BIG:
/*
* Temporary tags wind up having the ds object id
* prepended. So even if we passed the length check
* above, it's still possible for the tag to wind
* up being slightly too long.
*/
(void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf);
break;
2009-08-18 22:43:27 +04:00
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
2009-08-18 22:43:27 +04:00
case EEXIST:
(void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf);
break;
2009-08-18 22:43:27 +04:00
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
2009-08-18 22:43:27 +04:00
}
}
fnvlist_free(errors);
return (ret);
}
static int
zfs_release_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAXNAMELEN];
int rv = 0;
nvlist_t *existing_holds;
(void) snprintf(name, sizeof (name),
"%s@%s", zhp->zfs_name, ha->snapname);
if (lzc_get_holds(name, &existing_holds) != 0) {
ha->error = ENOENT;
} else if (!nvlist_exists(existing_holds, ha->tag)) {
ha->error = ESRCH;
} else {
nvlist_t *torelease = fnvlist_alloc();
fnvlist_add_boolean(torelease, ha->tag);
fnvlist_add_nvlist(ha->nvl, name, torelease);
fnvlist_free(torelease);
}
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_release_one, ha);
zfs_close(zhp);
return (rv);
2009-08-18 22:43:27 +04:00
}
int
zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive)
{
int ret;
struct holdarg ha;
nvlist_t *errors = NULL;
nvpair_t *elem;
2009-08-18 22:43:27 +04:00
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
2009-08-18 22:43:27 +04:00
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
ha.error = 0;
(void) zfs_release_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
fnvlist_free(ha.nvl);
ret = ha.error;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s@%s'"),
zhp->zfs_name, snapname);
if (ret == ESRCH) {
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
} else {
(void) zfs_standard_error(hdl, ret, errbuf);
}
return (ret);
}
ret = lzc_release(ha.nvl, &errors);
fnvlist_free(ha.nvl);
2009-08-18 22:43:27 +04:00
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
2009-08-18 22:43:27 +04:00
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot release"));
2009-08-18 22:43:27 +04:00
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zfs_standard_error_fmt(hdl, errno, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s'"),
nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case ESRCH:
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
break;
2009-08-18 22:43:27 +04:00
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
2009-08-18 22:43:27 +04:00
default:
(void) zfs_standard_error_fmt(hdl,
fnvpair_value_int32(elem), errbuf);
2009-08-18 22:43:27 +04:00
}
}
fnvlist_free(errors);
return (ret);
2009-08-18 22:43:27 +04:00
}
int
zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
int nvsz = 2048;
void *nvbuf;
int err = 0;
char errbuf[1024];
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
tryagain:
nvbuf = malloc(nvsz);
if (nvbuf == NULL) {
err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
goto out;
}
zc.zc_nvlist_dst_size = nvsz;
zc.zc_nvlist_dst = (uintptr_t)nvbuf;
(void) strlcpy(zc.zc_name, zhp->zfs_name, ZFS_MAXNAMELEN);
if (ioctl(hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOMEM:
free(nvbuf);
nvsz = zc.zc_nvlist_dst_size;
goto tryagain;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error_fmt(hdl, errno, errbuf);
break;
}
} else {
/* success */
int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
if (rc) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(
TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
err = zfs_standard_error_fmt(hdl, rc, errbuf);
}
}
free(nvbuf);
out:
return (err);
}
int
zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *nvbuf;
char errbuf[1024];
size_t nvsz;
int err;
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
assert(err == 0);
nvbuf = malloc(nvsz);
err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
assert(err == 0);
zc.zc_nvlist_src_size = nvsz;
zc.zc_nvlist_src = (uintptr_t)nvbuf;
zc.zc_perm_action = un;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error_fmt(hdl, errno, errbuf);
break;
}
}
free(nvbuf);
return (err);
}
int
zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
{
int err;
char errbuf[1024];
err = lzc_get_holds(zhp->zfs_name, nvl);
if (err != 0) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
zhp->zfs_name);
switch (err) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error_fmt(hdl, errno, errbuf);
break;
}
}
return (err);
}
/*
* Convert the zvol's volume size to an appropriate reservation.
* Note: If this routine is updated, it is necessary to update the ZFS test
* suite's shell version in reservation.kshlib.
*/
uint64_t
zvol_volsize_to_reservation(uint64_t volsize, nvlist_t *props)
{
uint64_t numdb;
uint64_t nblocks, volblocksize;
int ncopies;
char *strval;
if (nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
ncopies = atoi(strval);
else
ncopies = 1;
if (nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize) != 0)
volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
nblocks = volsize/volblocksize;
/* start with metadnode L0-L6 */
numdb = 7;
/* calculate number of indirects */
while (nblocks > 1) {
nblocks += DNODES_PER_LEVEL - 1;
nblocks /= DNODES_PER_LEVEL;
numdb += nblocks;
}
numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
volsize *= ncopies;
/*
* this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
* compressed, but in practice they compress down to about
* 1100 bytes
*/
numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
volsize += numdb;
return (volsize);
}