Illumos #1748: desire support for reguid in zfs

Reviewed by: George Wilson <gwilson@zfsmail.com>
Reviewed by: Igor Kozhukhov <ikozhukhov@gmail.com>
Reviewed by: Alexander Eremin <alexander.eremin@nexenta.com>
Reviewed by: Alexander Stetsenko <ams@nexenta.com>
Approved by: Richard Lowe <richlowe@richlowe.net>

References:
  https://www.illumos.org/issues/1748

This commit modifies the user to kernel space ioctl ABI.  Extra
care should be taken when updating to ensure both the kernel
modules and utilities are updated.  If only the user space
component is updated both the 'zpool events' command and the
'zpool reguid' command will not work until the kernel modules
are updated.

Ported by:     Martin Matuska <martin@matuska.org>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #665
This commit is contained in:
Garrett D'Amore 2011-11-11 14:07:54 -08:00 committed by Brian Behlendorf
parent ba9b5428fd
commit 3541dc6d02
15 changed files with 225 additions and 15 deletions

View File

@ -69,6 +69,8 @@ static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **); static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **); static int zpool_do_clear(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **); static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **); static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **); static int zpool_do_replace(int, char **);
@ -128,7 +130,8 @@ typedef enum {
HELP_EVENTS, HELP_EVENTS,
HELP_GET, HELP_GET,
HELP_SET, HELP_SET,
HELP_SPLIT HELP_SPLIT,
HELP_REGUID
} zpool_help_t; } zpool_help_t;
@ -172,6 +175,7 @@ static zpool_command_t command_table[] = {
{ "import", zpool_do_import, HELP_IMPORT }, { "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT }, { "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE }, { "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL }, { NULL },
{ "history", zpool_do_history, HELP_HISTORY }, { "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS }, { "events", zpool_do_events, HELP_EVENTS },
@ -254,6 +258,8 @@ get_usage(zpool_help_t idx) {
return (gettext("\tsplit [-n] [-R altroot] [-o mntopts]\n" return (gettext("\tsplit [-n] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> " "\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n")); "[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
} }
abort(); abort();
@ -3206,6 +3212,52 @@ zpool_do_clear(int argc, char **argv)
return (ret); return (ret);
} }
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
typedef struct scrub_cbdata { typedef struct scrub_cbdata {
int cb_type; int cb_type;
int cb_argc; int cb_argc;

View File

@ -21,6 +21,7 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/ */
/* /*
@ -260,6 +261,7 @@ ztest_func_t ztest_vdev_LUN_growth;
ztest_func_t ztest_vdev_add_remove; ztest_func_t ztest_vdev_add_remove;
ztest_func_t ztest_vdev_aux_add_remove; ztest_func_t ztest_vdev_aux_add_remove;
ztest_func_t ztest_split_pool; ztest_func_t ztest_split_pool;
ztest_func_t ztest_reguid;
uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */ uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */ uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
@ -290,6 +292,7 @@ ztest_info_t ztest_info[] = {
{ ztest_fault_inject, 1, &zopt_sometimes }, { ztest_fault_inject, 1, &zopt_sometimes },
{ ztest_ddt_repair, 1, &zopt_sometimes }, { ztest_ddt_repair, 1, &zopt_sometimes },
{ ztest_dmu_snapshot_hold, 1, &zopt_sometimes }, { ztest_dmu_snapshot_hold, 1, &zopt_sometimes },
{ ztest_reguid, 1, &zopt_sometimes },
{ ztest_spa_rename, 1, &zopt_rarely }, { ztest_spa_rename, 1, &zopt_rarely },
{ ztest_scrub, 1, &zopt_rarely }, { ztest_scrub, 1, &zopt_rarely },
{ ztest_dsl_dataset_promote_busy, 1, &zopt_rarely }, { ztest_dsl_dataset_promote_busy, 1, &zopt_rarely },
@ -326,6 +329,7 @@ typedef struct ztest_shared {
uint64_t zs_vdev_aux; uint64_t zs_vdev_aux;
uint64_t zs_alloc; uint64_t zs_alloc;
uint64_t zs_space; uint64_t zs_space;
uint64_t zs_guid;
kmutex_t zs_vdev_lock; kmutex_t zs_vdev_lock;
krwlock_t zs_name_lock; krwlock_t zs_name_lock;
ztest_info_t zs_info[ZTEST_FUNCS]; ztest_info_t zs_info[ZTEST_FUNCS];
@ -4804,7 +4808,7 @@ ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
object = od[0].od_object; object = od[0].od_object;
blocksize = od[0].od_blocksize; blocksize = od[0].od_blocksize;
pattern = spa_guid(spa) ^ dmu_objset_fsid_guid(os); pattern = zs->zs_guid ^ dmu_objset_fsid_guid(os);
ASSERT(object != 0); ASSERT(object != 0);
@ -4876,6 +4880,31 @@ ztest_scrub(ztest_ds_t *zd, uint64_t id)
(void) spa_scan(spa, POOL_SCAN_SCRUB); (void) spa_scan(spa, POOL_SCAN_SCRUB);
} }
/*
* Change the guid for the pool.
*/
/* ARGSUSED */
void
ztest_reguid(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = zs->zs_spa;
uint64_t orig, load;
orig = spa_guid(spa);
load = spa_load_guid(spa);
if (spa_change_guid(spa) != 0)
return;
if (zopt_verbose >= 3) {
(void) printf("Changed guid old %llu -> %llu\n",
(u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
}
VERIFY3U(orig, !=, spa_guid(spa));
VERIFY3U(load, ==, spa_load_guid(spa));
}
/* /*
* Rename the pool to a different name and then rename it back. * Rename the pool to a different name and then rename it back.
*/ */
@ -5307,6 +5336,7 @@ ztest_run(ztest_shared_t *zs)
{ {
kt_did_t *tid; kt_did_t *tid;
spa_t *spa; spa_t *spa;
objset_t *os;
kthread_t *resume_thread; kthread_t *resume_thread;
uint64_t object; uint64_t object;
int error; int error;
@ -5340,6 +5370,10 @@ ztest_run(ztest_shared_t *zs)
spa->spa_debug = B_TRUE; spa->spa_debug = B_TRUE;
zs->zs_spa = spa; zs->zs_spa = spa;
VERIFY3U(0, ==, dmu_objset_hold(zs->zs_pool, FTAG, &os));
zs->zs_guid = dmu_objset_fsid_guid(os);
dmu_objset_rele(os, FTAG);
spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN; spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
/* /*

View File

@ -21,7 +21,8 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Nexenta Systems, Inc. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
*/ */
#ifndef _LIBZFS_H #ifndef _LIBZFS_H
@ -250,6 +251,7 @@ typedef struct splitflags {
*/ */
extern int zpool_scan(zpool_handle_t *, pool_scan_func_t); extern int zpool_scan(zpool_handle_t *, pool_scan_func_t);
extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *); extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
extern int zpool_reguid(zpool_handle_t *);
extern int zpool_vdev_online(zpool_handle_t *, const char *, int, extern int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *); vdev_state_t *);

View File

@ -39,6 +39,7 @@ extern "C" {
#define FM_EREPORT_ZFS_CONFIG_SYNC "config.sync" #define FM_EREPORT_ZFS_CONFIG_SYNC "config.sync"
#define FM_EREPORT_ZFS_POOL "zpool" #define FM_EREPORT_ZFS_POOL "zpool"
#define FM_EREPORT_ZFS_POOL_DESTROY "zpool.destroy" #define FM_EREPORT_ZFS_POOL_DESTROY "zpool.destroy"
#define FM_EREPORT_ZFS_POOL_REGUID "zpool.reguid"
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown" #define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed" #define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"
#define FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA "vdev.corrupt_data" #define FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA "vdev.corrupt_data"

View File

@ -22,6 +22,7 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/ */
/* Portions Copyright 2010 Robert Milkowski */ /* Portions Copyright 2010 Robert Milkowski */
@ -786,6 +787,7 @@ typedef enum zfs_ioc {
ZFS_IOC_DIFF, ZFS_IOC_DIFF,
ZFS_IOC_TMP_SNAPSHOT, ZFS_IOC_TMP_SNAPSHOT,
ZFS_IOC_OBJ_TO_STATS, ZFS_IOC_OBJ_TO_STATS,
ZFS_IOC_POOL_REGUID,
ZFS_IOC_EVENTS_NEXT, ZFS_IOC_EVENTS_NEXT,
ZFS_IOC_EVENTS_CLEAR, ZFS_IOC_EVENTS_CLEAR,
} zfs_ioc_t; } zfs_ioc_t;
@ -855,6 +857,7 @@ typedef enum {
* ESC_ZFS_RESILVER_START * ESC_ZFS_RESILVER_START
* ESC_ZFS_RESILVER_END * ESC_ZFS_RESILVER_END
* ESC_ZFS_POOL_DESTROY * ESC_ZFS_POOL_DESTROY
* ESC_ZFS_POOL_REGUID
* *
* ZFS_EV_POOL_NAME DATA_TYPE_STRING * ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64 * ZFS_EV_POOL_GUID DATA_TYPE_UINT64

View File

@ -21,6 +21,7 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/ */
#ifndef _SYS_SPA_H #ifndef _SYS_SPA_H
@ -579,6 +580,7 @@ extern void spa_altroot(spa_t *, char *, size_t);
extern int spa_sync_pass(spa_t *spa); extern int spa_sync_pass(spa_t *spa);
extern char *spa_name(spa_t *spa); extern char *spa_name(spa_t *spa);
extern uint64_t spa_guid(spa_t *spa); extern uint64_t spa_guid(spa_t *spa);
extern uint64_t spa_load_guid(spa_t *spa);
extern uint64_t spa_last_synced_txg(spa_t *spa); extern uint64_t spa_last_synced_txg(spa_t *spa);
extern uint64_t spa_first_txg(spa_t *spa); extern uint64_t spa_first_txg(spa_t *spa);
extern uint64_t spa_syncing_txg(spa_t *spa); extern uint64_t spa_syncing_txg(spa_t *spa);
@ -612,6 +614,7 @@ extern uint64_t spa_get_random(uint64_t range);
extern uint64_t spa_generate_guid(spa_t *spa); extern uint64_t spa_generate_guid(spa_t *spa);
extern void sprintf_blkptr(char *buf, const blkptr_t *bp); extern void sprintf_blkptr(char *buf, const blkptr_t *bp);
extern void spa_freeze(spa_t *spa); extern void spa_freeze(spa_t *spa);
extern int spa_change_guid(spa_t *spa);
extern void spa_upgrade(spa_t *spa, uint64_t version); extern void spa_upgrade(spa_t *spa, uint64_t version);
extern void spa_evict_all(void); extern void spa_evict_all(void);
extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid, extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,

View File

@ -21,6 +21,7 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/ */
#ifndef _SYS_SPA_IMPL_H #ifndef _SYS_SPA_IMPL_H
@ -136,7 +137,8 @@ struct spa {
objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */ objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */
txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */ txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
vdev_t *spa_root_vdev; /* top-level vdev container */ vdev_t *spa_root_vdev; /* top-level vdev container */
uint64_t spa_load_guid; /* initial guid for spa_load */ uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
list_t spa_config_dirty_list; /* vdevs with dirty config */ list_t spa_config_dirty_list; /* vdevs with dirty config */
list_t spa_state_dirty_list; /* vdevs with dirty state */ list_t spa_state_dirty_list; /* vdevs with dirty state */
spa_aux_vdev_t spa_spares; /* hot spares */ spa_aux_vdev_t spa_spares; /* hot spares */

View File

@ -21,6 +21,7 @@
/* /*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved. * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms. * Use is subject to license terms.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/ */
#ifndef _SYS_SYSEVENT_EVENTDEFS_H #ifndef _SYS_SYSEVENT_EVENTDEFS_H

View File

@ -21,6 +21,8 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
*/ */
#include <ctype.h> #include <ctype.h>
@ -2948,6 +2950,26 @@ zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
return (zpool_standard_error(hdl, errno, msg)); return (zpool_standard_error(hdl, errno, msg));
} }
/*
* Change the GUID for a pool.
*/
int
zpool_reguid(zpool_handle_t *zhp)
{
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = { "\0", "\0", "\0", "\0", 0 };
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/* /*
* Convert from a devid string to a path. * Convert from a devid string to a path.
*/ */

View File

@ -3,7 +3,8 @@
.\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing. .\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with the .\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner] .\" fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner]
.TH zpool 8 "8 Apr 2011" "ZFS pool 28, filesystem 5" "System Administration Commands" .\" Copyright 2011 Nexenta Systems, Inc. All rights reserved.
.TH zpool 8 "10 July 2012" "ZFS pool 28, filesystem 5" "System Administration Commands"
.SH NAME .SH NAME
zpool \- configures ZFS storage pools zpool \- configures ZFS storage pools
.SH SYNOPSIS .SH SYNOPSIS
@ -95,6 +96,11 @@ zpool \- configures ZFS storage pools
\fBzpool online\fR \fIpool\fR \fIdevice\fR ... \fBzpool online\fR \fIpool\fR \fIdevice\fR ...
.fi .fi
.LP
.nf
\fBzpool reguid\fR \fIpool\fR
.fi
.LP .LP
.nf .nf
\fBzpool remove\fR \fIpool\fR \fIdevice\fR ... \fBzpool remove\fR \fIpool\fR \fIdevice\fR ...
@ -1376,6 +1382,17 @@ Expand the device to use all available space. If the device is part of a mirror
.ne 2 .ne 2
.mk .mk
.na .na
\fB\fBzpool reguid\fR \fIpool\fR
.ad
.sp .6
.RS 4n
Generates a new unique identifier for the pool. You must ensure that all devices in this pool are online and
healthy before performing this action.
.RE
.sp
.ne 2
.na
\fB\fBzpool remove\fR \fIpool\fR \fIdevice\fR ...\fR \fB\fBzpool remove\fR \fIpool\fR \fIdevice\fR ...\fR
.ad .ad
.sp .6 .sp .6

View File

@ -20,6 +20,8 @@
*/ */
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
*/ */
/* /*
@ -1284,7 +1286,7 @@ arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
ASSERT(BUF_EMPTY(hdr)); ASSERT(BUF_EMPTY(hdr));
hdr->b_size = size; hdr->b_size = size;
hdr->b_type = type; hdr->b_type = type;
hdr->b_spa = spa_guid(spa); hdr->b_spa = spa_load_guid(spa);
hdr->b_state = arc_anon; hdr->b_state = arc_anon;
hdr->b_arc_access = 0; hdr->b_arc_access = 0;
buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
@ -2056,7 +2058,7 @@ arc_flush(spa_t *spa)
uint64_t guid = 0; uint64_t guid = 0;
if (spa) if (spa)
guid = spa_guid(spa); guid = spa_load_guid(spa);
while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
@ -2887,7 +2889,7 @@ arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_buf_t *buf = NULL; arc_buf_t *buf = NULL;
kmutex_t *hash_lock; kmutex_t *hash_lock;
zio_t *rzio; zio_t *rzio;
uint64_t guid = spa_guid(spa); uint64_t guid = spa_load_guid(spa);
top: top:
hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
@ -4522,7 +4524,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
boolean_t have_lock, full; boolean_t have_lock, full;
l2arc_write_callback_t *cb; l2arc_write_callback_t *cb;
zio_t *pio, *wzio; zio_t *pio, *wzio;
uint64_t guid = spa_guid(spa); uint64_t guid = spa_load_guid(spa);
int try; int try;
ASSERT(dev->l2ad_vdev != NULL); ASSERT(dev->l2ad_vdev != NULL);

View File

@ -21,9 +21,8 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved. * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
*/ */
/* /*
@ -571,6 +570,43 @@ spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
} }
} }
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
uint64_t oldguid, newguid;
uint64_t txg;
if (!(spa_mode_global & FWRITE))
return (EROFS);
txg = spa_vdev_enter(spa);
if (spa->spa_root_vdev->vdev_state != VDEV_STATE_HEALTHY)
return (spa_vdev_exit(spa, NULL, txg, ENXIO));
oldguid = spa_guid(spa);
newguid = spa_generate_guid(NULL);
ASSERT3U(oldguid, !=, newguid);
spa->spa_root_vdev->vdev_guid = newguid;
spa->spa_root_vdev->vdev_guid_sum += (newguid - oldguid);
vdev_config_dirty(spa->spa_root_vdev);
spa_event_notify(spa, NULL, FM_EREPORT_ZFS_POOL_REGUID);
return (spa_vdev_exit(spa, NULL, txg, 0));
}
/* /*
* ========================================================================== * ==========================================================================
* SPA state manipulation (open/create/destroy/import/export) * SPA state manipulation (open/create/destroy/import/export)
@ -1773,7 +1809,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
spa_guid_exists(pool_guid, 0)) { spa_guid_exists(pool_guid, 0)) {
error = EEXIST; error = EEXIST;
} else { } else {
spa->spa_load_guid = pool_guid; spa->spa_config_guid = pool_guid;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
&nvl) == 0) { &nvl) == 0) {

View File

@ -21,6 +21,7 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved. * Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -1290,13 +1291,24 @@ spa_guid(spa_t *spa)
/* /*
* If we fail to parse the config during spa_load(), we can go through * If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root * the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_load_guid' to handle * vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case. * this case.
*/ */
if (spa->spa_root_vdev != NULL) if (spa->spa_root_vdev != NULL)
return (spa->spa_root_vdev->vdev_guid); return (spa->spa_root_vdev->vdev_guid);
else else
return (spa->spa_load_guid); return (spa->spa_config_guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
} }
uint64_t uint64_t

View File

@ -21,6 +21,8 @@
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -291,6 +293,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
if (spa->spa_root_vdev == NULL) { if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops); ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd; spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
} }
if (guid == 0 && ops != &vdev_hole_ops) { if (guid == 0 && ops != &vdev_hole_ops) {

View File

@ -24,6 +24,10 @@
* Portions Copyright 2012 Pawel Jakub Dawidek <pawel@dawidek.net> * Portions Copyright 2012 Pawel Jakub Dawidek <pawel@dawidek.net>
* Copyright (c) 2012, Joyent, Inc. All rights reserved. * Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/ */
/*
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
*/
#include <sys/types.h> #include <sys/types.h>
#include <sys/param.h> #include <sys/param.h>
@ -1427,6 +1431,20 @@ zfs_ioc_pool_get_history(zfs_cmd_t *zc)
return (error); return (error);
} }
static int
zfs_ioc_pool_reguid(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error == 0) {
error = spa_change_guid(spa);
spa_close(spa, FTAG);
}
return (error);
}
static int static int
zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc) zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc)
{ {
@ -4698,10 +4716,12 @@ static zfs_ioc_vec_t zfs_ioc_vec[] = {
B_FALSE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY }, B_FALSE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_obj_to_stats, zfs_secpolicy_diff, DATASET_NAME, B_FALSE, { zfs_ioc_obj_to_stats, zfs_secpolicy_diff, DATASET_NAME, B_FALSE,
POOL_CHECK_SUSPENDED }, POOL_CHECK_SUSPENDED },
{ zfs_ioc_pool_reguid, zfs_secpolicy_config, POOL_NAME, B_TRUE,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY },
{ zfs_ioc_events_next, zfs_secpolicy_config, NO_NAME, B_FALSE, { zfs_ioc_events_next, zfs_secpolicy_config, NO_NAME, B_FALSE,
POOL_CHECK_NONE }, POOL_CHECK_NONE },
{ zfs_ioc_events_clear, zfs_secpolicy_config, NO_NAME, B_FALSE, { zfs_ioc_events_clear, zfs_secpolicy_config, NO_NAME, B_FALSE,
POOL_CHECK_NONE }, POOL_CHECK_NONE }
}; };
int int