spa_misc: add an API for spa_namespace_lock

This is useful as debugging support, as it lets namespace lock
operations be traced directly. It will also be useful for future work to
reduce the use of spa_namespace_lock, traditionally a source of
difficult deadlocks.

Sponsored-by: Klara, Inc.
Sponsored-by: Wasabi Technology, Inc.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
Closes #17906
This commit is contained in:
Rob Norris 2025-11-11 09:23:39 +11:00 committed by Brian Behlendorf
parent e305c7d596
commit ac0bc4cc00
21 changed files with 260 additions and 204 deletions

View File

@ -7899,11 +7899,11 @@ zdb_set_skip_mmp(char *target)
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
@ -10022,13 +10022,13 @@ main(int argc, char **argv)
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (!error) {
error = spa_open_rewind(target, &spa,

View File

@ -1228,10 +1228,10 @@ ztest_kill(ztest_shared_t *zs)
* See comment above spa_write_cachefile().
*/
if (raidz_expand_pause_point != RAIDZ_EXPAND_PAUSE_NONE) {
if (mutex_tryenter(&spa_namespace_lock)) {
if (spa_namespace_tryenter(FTAG)) {
spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE,
B_FALSE);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
ztest_scratch_state->zs_raidz_scratch_verify_pause =
raidz_expand_pause_point;
@ -1246,9 +1246,9 @@ ztest_kill(ztest_shared_t *zs)
return;
}
} else {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE, B_FALSE);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
(void) raise(SIGKILL);
@ -3689,10 +3689,10 @@ ztest_split_pool(ztest_ds_t *zd, uint64_t id)
if (error == 0) {
(void) printf("successful split - results:\n");
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
show_pool_stats(spa);
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
++zs->zs_splits;
--zs->zs_mirrors;
}
@ -3976,11 +3976,11 @@ raidz_scratch_verify(void)
kernel_init(SPA_MODE_READ);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa = spa_lookup(ztest_opts.zo_pool);
ASSERT(spa);
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
@ -7427,11 +7427,11 @@ ztest_walk_pool_directory(const char *header)
if (ztest_opts.zo_verbose >= 6)
(void) puts(header);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(spa)) != NULL)
if (ztest_opts.zo_verbose >= 6)
(void) printf("\t%s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
static void
@ -8546,11 +8546,11 @@ ztest_run(ztest_shared_t *zs)
/*
* Verify that we can loop over all pools.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
if (ztest_opts.zo_verbose > 3)
(void) printf("spa_next: found %s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
/*
* Verify that we can export the pool and reimport it under a

View File

@ -29,7 +29,7 @@
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, 2025, Klara, Inc.
* Copyright (c) 2019, Datto Inc.
*/
@ -867,10 +867,14 @@ uint_t spa_acq_allocator(spa_t *spa);
void spa_rel_allocator(spa_t *spa, uint_t allocator);
void spa_select_allocator(zio_t *zio);
/* spa namespace global mutex */
extern kmutex_t spa_namespace_lock;
extern avl_tree_t spa_namespace_avl;
extern kcondvar_t spa_namespace_cv;
/* spa namespace global lock */
extern void spa_namespace_enter(const void *tag);
extern boolean_t spa_namespace_tryenter(const void *tag);
extern int spa_namespace_enter_interruptible(const void *tag);
extern void spa_namespace_exit(const void *tag);
extern boolean_t spa_namespace_held(void);
extern void spa_namespace_wait(void);
extern void spa_namespace_broadcast(void);
/*
* SPA configuration functions in spa_config.c

View File

@ -903,7 +903,7 @@ spa_config_load(void)
* Iterate over all elements in the nvlist, creating a new spa_t for
* each one with the specified configuration.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
nvpair = NULL;
while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
@ -915,7 +915,7 @@ spa_config_load(void)
continue;
(void) spa_add(nvpair_name(nvpair), child, NULL);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
nvlist_free(nvlist);

View File

@ -193,7 +193,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
*/
config = spa_generate_rootconf(name);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if (config != NULL) {
pname = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
VERIFY0(strcmp(name, pname));
@ -204,7 +204,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
* e.g., after reboot -r.
*/
if (spa->spa_state == POOL_STATE_ACTIVE) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
fnvlist_free(config);
return (0);
}
@ -226,7 +226,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
} else if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
fnvlist_free(config);
cmn_err(CE_NOTE, "Cannot find the pool label for '%s'",
name);
@ -249,7 +249,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
VDEV_ALLOC_ROOTPOOL);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
fnvlist_free(config);
cmn_err(CE_NOTE, "Can not parse the config for pool '%s'",
name);
@ -259,7 +259,7 @@ spa_import_rootpool(const char *name, bool checkpointrewind)
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_free(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
fnvlist_free(config);
return (0);

View File

@ -108,11 +108,11 @@ zfs_ioc_nextboot(const char *unused, nvlist_t *innvl, nvlist_t *outnvl)
"command", &command) != 0)
return (EINVAL);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa = spa_by_guid(pool_guid, vdev_guid);
if (spa != NULL)
strcpy(name, spa_name(spa));
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (spa == NULL)
return (ENOENT);

View File

@ -282,8 +282,8 @@ retry:
* Take spa_namespace_lock to prevent lock inversion when
* zvols from one pool are opened as vdevs in another.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
if (!spa_namespace_held()) {
if (!spa_namespace_tryenter(FTAG)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
@ -295,7 +295,7 @@ retry:
}
err = zvol_first_open(zv, !(flag & FWRITE));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (err)
goto out_locked;
pp->mediasize = zv->zv_volsize;
@ -962,8 +962,8 @@ retry:
* Take spa_namespace_lock to prevent lock inversion when
* zvols from one pool are opened as vdevs in another.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
if (!spa_namespace_held()) {
if (!spa_namespace_tryenter(FTAG)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
@ -975,7 +975,7 @@ retry:
}
err = zvol_first_open(zv, !(flags & FWRITE));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (err)
goto out_locked;
}

View File

@ -809,8 +809,8 @@ retry:
* the kernel so the only option is to return the error for
* the caller to handle it.
*/
if (!mutex_owned(&spa_namespace_lock)) {
if (!mutex_tryenter(&spa_namespace_lock)) {
if (!spa_namespace_held()) {
if (!spa_namespace_tryenter(FTAG)) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
drop_suspend = B_FALSE;
@ -834,7 +834,7 @@ retry:
error = -zvol_first_open(zv, !(blk_mode_is_open_write(flag)));
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
if (error == 0) {

View File

@ -8548,7 +8548,7 @@ l2arc_dev_get_next(void)
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
@ -8591,7 +8591,7 @@ out:
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (next);
}
@ -10231,7 +10231,7 @@ l2arc_stop(void)
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
@ -10256,7 +10256,7 @@ l2arc_spa_rebuild_start(spa_t *spa)
void
l2arc_spa_rebuild_stop(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {

View File

@ -729,12 +729,12 @@ mmp_signal_all_threads(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(spa))) {
if (spa->spa_state == POOL_STATE_ACTIVE)
mmp_signal_thread(spa);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval,

View File

@ -1082,7 +1082,7 @@ spa_change_guid(spa_t *spa, const uint64_t *guidp)
int error;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if (guidp != NULL) {
guid = *guidp;
@ -1117,7 +1117,7 @@ spa_change_guid(spa_t *spa, const uint64_t *guidp)
}
out:
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
@ -2252,7 +2252,7 @@ spa_should_sync_time_logger_on_unload(spa_t *spa)
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
@ -5325,7 +5325,7 @@ spa_ld_read_checkpoint_txg(spa_t *spa)
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_load_thread == curthread);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
@ -5352,7 +5352,7 @@ spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
@ -5428,7 +5428,7 @@ spa_ld_checkpoint_rewind(spa_t *spa)
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
@ -5575,7 +5575,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
boolean_t update_config_cache = B_FALSE;
hrtime_t load_start = gethrtime();
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
@ -5622,7 +5622,7 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
* Drop the namespace lock for the rest of the function.
*/
spa->spa_load_thread = curthread;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
@ -5861,9 +5861,9 @@ spa_load_impl(spa_t *spa, spa_import_type_t type, const char **ereport)
spa_load_note(spa, "LOADED");
fail:
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa->spa_load_thread = NULL;
cv_broadcast(&spa_namespace_cv);
spa_namespace_broadcast();
return (error);
@ -6025,14 +6025,14 @@ spa_open_common(const char *pool, spa_t **spapp, const void *tag,
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
if (!spa_namespace_held()) {
spa_namespace_enter(FTAG);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(ENOENT));
}
@ -6069,7 +6069,7 @@ spa_open_common(const char *pool, spa_t **spapp, const void *tag,
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(ENOENT));
}
@ -6089,7 +6089,7 @@ spa_open_common(const char *pool, spa_t **spapp, const void *tag,
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
*spapp = NULL;
return (error);
}
@ -6113,7 +6113,7 @@ spa_open_common(const char *pool, spa_t **spapp, const void *tag,
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
if (firstopen)
@ -6146,13 +6146,13 @@ spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (spa);
}
@ -6160,9 +6160,9 @@ spa_inject_addref(char *name)
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
/*
@ -6406,14 +6406,14 @@ spa_get_stats(const char *name, nvlist_t **config,
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
} else {
spa_altroot(spa, altroot, buflen);
}
@ -6633,9 +6633,9 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(EEXIST));
}
@ -6653,7 +6653,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (error);
}
@ -6686,14 +6686,14 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (ENOTSUP);
}
@ -6759,7 +6759,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (error);
}
@ -6912,7 +6912,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
spa_import_os(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (0);
}
@ -6937,9 +6937,9 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(EEXIST));
}
@ -6966,7 +6966,7 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (0);
}
@ -7025,7 +7025,7 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (error);
}
@ -7093,7 +7093,7 @@ spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
zvol_create_minors(pool);
@ -7125,7 +7125,7 @@ spa_tryimport(nvlist_t *tryconfig)
(void) snprintf(name, MAXPATHLEN, "%s-%llx-%s",
TRYIMPORT_NAME, (u_longlong_t)(uintptr_t)curthread, poolname);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa = spa_add(name, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
kmem_free(name, MAXPATHLEN);
@ -7223,7 +7223,7 @@ spa_tryimport(nvlist_t *tryconfig)
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (config);
}
@ -7251,15 +7251,15 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
@ -7269,18 +7269,18 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
* and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa->spa_export_thread = curthread;
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
goto export_spa;
}
@ -7304,7 +7304,7 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
goto fail;
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
/*
* At this point we no longer hold the spa_namespace_lock and
* there were no references on the spa. Future spa_lookups will
@ -7323,7 +7323,7 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
goto fail;
}
@ -7398,7 +7398,7 @@ export_spa:
/*
* Take the namespace lock for the actual spa_t removal
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE, B_FALSE);
@ -7416,8 +7416,8 @@ export_spa:
/*
* Wake up any waiters in spa_lookup()
*/
cv_broadcast(&spa_namespace_cv);
mutex_exit(&spa_namespace_lock);
spa_namespace_broadcast();
spa_namespace_exit(FTAG);
return (0);
fail:
@ -7428,8 +7428,8 @@ fail:
/*
* Wake up any waiters in spa_lookup()
*/
cv_broadcast(&spa_namespace_cv);
mutex_exit(&spa_namespace_lock);
spa_namespace_broadcast();
spa_namespace_exit(FTAG);
return (error);
}
@ -7639,10 +7639,10 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroot, boolean_t check_ashift)
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (0);
}
@ -7759,7 +7759,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
@ -8143,7 +8143,7 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
@ -8347,28 +8347,28 @@ spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (error);
}
@ -8377,7 +8377,7 @@ static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
@ -8461,7 +8461,7 @@ spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
@ -8484,7 +8484,7 @@ spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
list_destroy(&vd_list);
@ -8495,7 +8495,7 @@ static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
@ -8582,7 +8582,7 @@ spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
@ -8605,7 +8605,7 @@ spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
list_destroy(&vd_list);
@ -8633,7 +8633,7 @@ spa_vdev_split_mirror(spa_t *spa, const char *newname, nvlist_t *config,
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
@ -9307,7 +9307,7 @@ spa_async_thread(void *arg)
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
@ -9325,7 +9325,7 @@ spa_async_thread(void *arg)
spa_embedded_log_class(spa));
new_space += metaslab_class_get_space(
spa_special_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
/*
* If the pool grew as a result of the config update,
@ -9394,49 +9394,49 @@ spa_async_thread(void *arg)
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
/*
@ -10588,18 +10588,18 @@ void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
taskq_t *
@ -10746,7 +10746,7 @@ spa_evict_all(void)
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
@ -10754,9 +10754,9 @@ spa_evict_all(void)
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
@ -10765,7 +10765,7 @@ spa_evict_all(void)
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
vdev_t *

View File

@ -161,7 +161,7 @@ spa_write_cachefile(spa_t *target, boolean_t removing, boolean_t postsysevent,
boolean_t ccw_failure;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
@ -287,7 +287,7 @@ spa_all_configs(uint64_t *generation, nvlist_t **pools)
if (*generation == spa_config_generation)
return (SET_ERROR(EEXIST));
int error = mutex_enter_interruptible(&spa_namespace_lock);
int error = spa_namespace_enter_interruptible(FTAG);
if (error)
return (SET_ERROR(EINTR));
@ -302,7 +302,7 @@ spa_all_configs(uint64_t *generation, nvlist_t **pools)
}
}
*generation = spa_config_generation;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (0);
}
@ -483,7 +483,7 @@ spa_config_update(spa_t *spa, int what)
uint64_t txg;
int c;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
txg = spa_last_synced_txg(spa) + 1;

View File

@ -28,7 +28,7 @@
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2023, 2024, Klara Inc.
* Copyright (c) 2023, 2024, 2025, Klara, Inc.
*/
#include <sys/zfs_context.h>
@ -237,9 +237,10 @@
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*/
avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
kcondvar_t spa_namespace_cv;
static avl_tree_t spa_namespace_avl;
static kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
static const int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
@ -608,6 +609,58 @@ spa_config_held(spa_t *spa, int locks, krw_t rw)
* ==========================================================================
*/
void
spa_namespace_enter(const void *tag)
{
(void) tag;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
mutex_enter(&spa_namespace_lock);
}
boolean_t
spa_namespace_tryenter(const void *tag)
{
(void) tag;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
return (mutex_tryenter(&spa_namespace_lock));
}
int
spa_namespace_enter_interruptible(const void *tag)
{
(void) tag;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
return (mutex_enter_interruptible(&spa_namespace_lock));
}
void
spa_namespace_exit(const void *tag)
{
(void) tag;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
mutex_exit(&spa_namespace_lock);
}
boolean_t
spa_namespace_held(void)
{
return (MUTEX_HELD(&spa_namespace_lock));
}
void
spa_namespace_wait(void)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
cv_wait(&spa_namespace_cv, &spa_namespace_lock);
}
void
spa_namespace_broadcast(void)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
cv_broadcast(&spa_namespace_cv);
}
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
@ -620,7 +673,7 @@ spa_lookup(const char *name)
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
retry:
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
@ -645,7 +698,7 @@ retry:
spa->spa_load_thread != curthread) ||
(spa->spa_export_thread != NULL &&
spa->spa_export_thread != curthread)) {
cv_wait(&spa_namespace_cv, &spa_namespace_lock);
spa_namespace_wait();
goto retry;
}
@ -697,7 +750,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
@ -747,7 +800,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
spa_config_lock_init(spa);
spa_stats_init(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
avl_add(&spa_namespace_avl, spa);
/*
@ -837,7 +890,7 @@ spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED);
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
ASSERT0(spa->spa_waiters);
@ -916,7 +969,7 @@ spa_remove(spa_t *spa)
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
@ -938,7 +991,7 @@ void
spa_open_ref(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock) ||
spa_namespace_held() ||
spa->spa_load_thread == curthread);
(void) zfs_refcount_add(&spa->spa_refcount, tag);
}
@ -951,7 +1004,7 @@ void
spa_close(spa_t *spa, const void *tag)
{
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock) ||
spa_namespace_held() ||
spa->spa_load_thread == curthread ||
spa->spa_export_thread == curthread);
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
@ -980,7 +1033,7 @@ spa_async_close(spa_t *spa, const void *tag)
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
@ -1227,7 +1280,7 @@ uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
ASSERT0(spa->spa_export_thread);
@ -1246,7 +1299,7 @@ uint64_t
spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
ASSERT0(spa->spa_export_thread);
@ -1270,7 +1323,7 @@ spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
@ -1285,7 +1338,7 @@ void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error,
const char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
int config_changed = B_FALSE;
@ -1374,7 +1427,7 @@ spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
vdev_rebuild_restart(spa);
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
@ -1452,9 +1505,9 @@ spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
return (error);
@ -1501,7 +1554,7 @@ spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
@ -1583,7 +1636,7 @@ spa_load_guid_exists(uint64_t guid)
{
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
for (spa_t *spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa_load_guid(spa) == guid)
@ -2200,10 +2253,10 @@ spa_set_deadman_ziotime(hrtime_t ns)
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_ziotime = ns;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
}
@ -2213,10 +2266,10 @@ spa_set_deadman_synctime(hrtime_t ns)
spa_t *spa = NULL;
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(spa)) != NULL)
spa->spa_deadman_synctime = ns;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
}
@ -3048,10 +3101,10 @@ param_set_deadman_failmode_common(const char *val)
return (SET_ERROR(EINVAL));
if (spa_mode_global != SPA_MODE_UNINIT) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
}
return (0);
@ -3135,7 +3188,6 @@ EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
EXPORT_SYMBOL(spa_trust_config);
EXPORT_SYMBOL(spa_missing_tvds_allowed);
EXPORT_SYMBOL(spa_set_missing_tvds);

View File

@ -685,7 +685,7 @@ vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
(void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
while ((vd = list_remove_head(vd_list)) != NULL) {
@ -728,7 +728,7 @@ vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state,
if (vd_list == NULL) {
vdev_initialize_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
vd->vdev_spa->spa_export_thread == curthread);
list_insert_tail(vd_list, vd);
}
@ -761,7 +761,7 @@ vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
spa_t *spa = vd->vdev_spa;
list_t vd_list;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
list_create(&vd_list, sizeof (vdev_t),
@ -781,7 +781,7 @@ vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
void
vdev_initialize_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
vd->vdev_spa->spa_load_thread == curthread);
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));

View File

@ -1079,7 +1079,7 @@ vdev_rebuild_restart_impl(vdev_t *vd)
void
vdev_rebuild_restart(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_load_thread == curthread);
vdev_rebuild_restart_impl(spa->spa_root_vdev);
@ -1094,7 +1094,7 @@ vdev_rebuild_stop_wait(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
if (vd == spa->spa_root_vdev) {

View File

@ -309,12 +309,12 @@ spa_vdev_noalloc(spa_t *spa, uint64_t guid)
uint64_t txg;
int error = 0;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
ASSERT(!spa_namespace_held());
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
@ -342,12 +342,12 @@ spa_vdev_alloc(spa_t *spa, uint64_t guid)
uint64_t txg;
int error = 0;
ASSERT(!MUTEX_HELD(&spa_namespace_lock));
ASSERT(!spa_namespace_held());
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
@ -2085,7 +2085,7 @@ vdev_remove_make_hole_and_free(vdev_t *vd)
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
vdev_free(vd);
@ -2113,7 +2113,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
ASSERT(vd->vdev_islog);
ASSERT(vd == vd->vdev_top);
ASSERT0P(vd->vdev_log_mg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
/*
* Stop allocating from this vdev.
@ -2140,7 +2140,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
* spa_namespace_lock held. Once this completes the device
* should no longer have any blocks allocated on it.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (vd->vdev_stat.vs_alloc != 0)
error = spa_reset_logs(spa);
@ -2189,7 +2189,7 @@ spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
sysevent_t *ev = spa_event_create(spa, vd, NULL,
ESC_ZFS_VDEV_REMOVE_DEV);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/* The top ZAP should have been destroyed by vdev_remove_empty. */
@ -2433,7 +2433,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
uint64_t txg = 0;
uint_t nspares, nl2cache;
int error = 0, error_log;
boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
boolean_t locked = spa_namespace_held();
sysevent_t *ev = NULL;
const char *vd_type = NULL;
char *vd_path = NULL;
@ -2443,7 +2443,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
if (!locked)
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;

View File

@ -1045,7 +1045,7 @@ vdev_trim_stop_wait(spa_t *spa, list_t *vd_list)
(void) spa;
vdev_t *vd;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
while ((vd = list_remove_head(vd_list)) != NULL) {
@ -1085,7 +1085,7 @@ vdev_trim_stop(vdev_t *vd, vdev_trim_state_t tgt_state, list_t *vd_list)
if (vd_list == NULL) {
vdev_trim_stop_wait_impl(vd);
} else {
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
vd->vdev_spa->spa_export_thread == curthread);
list_insert_tail(vd_list, vd);
}
@ -1122,7 +1122,7 @@ vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state)
list_t vd_list;
vdev_t *vd_l2cache;
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_export_thread == curthread);
list_create(&vd_list, sizeof (vdev_t),
@ -1156,7 +1156,7 @@ vdev_trim_stop_all(vdev_t *vd, vdev_trim_state_t tgt_state)
void
vdev_trim_restart(vdev_t *vd)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
vd->vdev_spa->spa_load_thread == curthread);
ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
@ -1582,7 +1582,7 @@ vdev_autotrim_stop_all(spa_t *spa)
void
vdev_autotrim_restart(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
ASSERT(spa_namespace_held() ||
spa->spa_load_thread == curthread);
if (spa->spa_autotrim)
vdev_autotrim(spa);
@ -1689,7 +1689,7 @@ vdev_trim_l2arc_thread(void *arg)
void
vdev_trim_l2arc(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_namespace_held());
/*
* Locate the spa's l2arc devices and kick off TRIM threads.

View File

@ -1580,10 +1580,10 @@ zfs_ereport_zvol_post(const char *subclass, const char *name,
nvlist_t *aux;
char *r;
boolean_t locked = mutex_owned(&spa_namespace_lock);
if (!locked) mutex_enter(&spa_namespace_lock);
boolean_t locked = spa_namespace_held();
if (!locked) spa_namespace_enter(FTAG);
spa_t *spa = spa_lookup(name);
if (!locked) mutex_exit(&spa_namespace_lock);
if (!locked) spa_namespace_exit(FTAG);
if (spa == NULL)
return;

View File

@ -3122,12 +3122,12 @@ zfs_ioc_pool_set_props(zfs_cmd_t *zc)
if (pair != NULL && strcmp(nvpair_name(pair),
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE)) == 0 &&
nvlist_next_nvpair(props, pair) == NULL) {
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if ((spa = spa_lookup(zc->zc_name)) != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (spa != NULL) {
nvlist_free(props);
return (0);
@ -3176,14 +3176,14 @@ zfs_ioc_pool_get_props(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
* get (such as altroot and cachefile), so attempt to get them
* anyway.
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
if ((spa = spa_lookup(pool)) != NULL) {
error = spa_prop_get(spa, outnvl);
if (error == 0 && props != NULL)
error = spa_prop_get_nvlist(spa, props, n_props,
outnvl);
}
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
} else {
error = spa_prop_get(spa, outnvl);
if (error == 0 && props != NULL)
@ -6121,10 +6121,10 @@ zfs_ioc_clear(zfs_cmd_t *zc)
/*
* On zpool clear we also fix up missing slogs
*/
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
spa = spa_lookup(zc->zc_name);
if (spa == NULL) {
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (SET_ERROR(EIO));
}
if (spa_get_log_state(spa) == SPA_LOG_MISSING) {
@ -6132,7 +6132,7 @@ zfs_ioc_clear(zfs_cmd_t *zc)
spa_set_log_state(spa, SPA_LOG_CLEAR);
}
spa->spa_last_open_failed = 0;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (zc->zc_cookie & ZPOOL_NO_REWIND) {
error = spa_open(zc->zc_name, &spa, FTAG);

View File

@ -1008,9 +1008,9 @@ zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
if (zio_pool_handler_exists(name, record->zi_cmd))
return (SET_ERROR(EEXIST));
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
boolean_t has_spa = spa_lookup(name) != NULL;
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
if (record->zi_cmd == ZINJECT_DELAY_IMPORT && has_spa)
return (SET_ERROR(EEXIST));
@ -1095,7 +1095,7 @@ zio_inject_list_next(int *id, char *name, size_t buflen,
inject_handler_t *handler;
int ret;
mutex_enter(&spa_namespace_lock);
spa_namespace_enter(FTAG);
rw_enter(&inject_lock, RW_READER);
for (handler = list_head(&inject_handlers); handler != NULL;
@ -1117,7 +1117,7 @@ zio_inject_list_next(int *id, char *name, size_t buflen,
}
rw_exit(&inject_lock);
mutex_exit(&spa_namespace_lock);
spa_namespace_exit(FTAG);
return (ret);
}

View File

@ -1233,7 +1233,7 @@ zvol_first_open(zvol_state_t *zv, boolean_t readonly)
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(mutex_owned(&spa_namespace_lock));
ASSERT(spa_namespace_held());
boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
@ -1303,7 +1303,7 @@ zvol_create_snap_minor_cb(const char *dsname, void *arg)
list_t *minors_list = j->list;
const char *name = j->name;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
ASSERT0(spa_namespace_held());
/* skip the designated dataset */
if (name && strcmp(dsname, name) == 0)
@ -1403,7 +1403,7 @@ zvol_create_minors_cb(const char *dsname, void *arg)
int error;
list_t *minors_list = arg;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
ASSERT0(spa_namespace_held());
error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
if (error)