Simplified the scope of the namespace lock

If we wait until after we check for no spa references to drop the
namespace lock, then we know that spa consumers will need to call
spa_lookup() and end up waiting on the spa_namespace_cv until we
finish.  This narrows the external checks to spa_lookup and we no
longer need to worry about the spa_vdev_enter case.

Sponsored-By: Klara Inc.
Sponsored-by: Wasabi Technology, Inc.
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Wilson <gwilson@delphix.com>
Signed-off-by: Don Brady <don.brady@klarasystems.com>
Closes #16153
This commit is contained in:
Don Brady 2024-05-05 14:57:33 +00:00 committed by Brian Behlendorf
parent 975a13259b
commit 89acef992b
2 changed files with 22 additions and 31 deletions

View File

@ -6994,15 +6994,11 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
mutex_enter(&spa_namespace_lock);
spa->spa_export_thread = curthread;
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
/*
* At this point we no longer hold the spa_namespace_lock and
* the spa_export_thread indicates that an export is in progress.
*/
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
mutex_exit(&spa_namespace_lock);
goto export_spa;
}
/*
* The pool will be in core if it's openable, in which case we can
@ -7024,6 +7020,14 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
goto fail;
}
mutex_exit(&spa_namespace_lock);
/*
* At this point we no longer hold the spa_namespace_lock and
* there were no references on the spa. Future spa_lookups will
* notice the spa->spa_export_thread and wait until we signal
* that we are finshed.
*/
if (spa->spa_sync_on) {
vdev_t *rvd = spa->spa_root_vdev;
/*
@ -7035,6 +7039,7 @@ spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
mutex_enter(&spa_namespace_lock);
goto fail;
}
@ -7102,6 +7107,9 @@ export_spa:
if (new_state == POOL_STATE_EXPORTED)
zio_handle_export_delay(spa, gethrtime() - export_start);
/*
* Take the namespace lock for the actual spa_t removal
*/
mutex_enter(&spa_namespace_lock);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
@ -7118,20 +7126,20 @@ export_spa:
}
/*
* Wake up any waiters on spa_namespace_lock
* They need to re-attempt a spa_lookup()
* Wake up any waiters in spa_lookup()
*/
cv_broadcast(&spa_namespace_cv);
mutex_exit(&spa_namespace_lock);
return (0);
fail:
mutex_enter(&spa_namespace_lock);
spa->spa_is_exporting = B_FALSE;
spa->spa_export_thread = NULL;
spa_async_resume(spa);
/* Wake up any waiters on spa_namespace_lock */
spa_async_resume(spa);
/*
* Wake up any waiters in spa_lookup()
*/
cv_broadcast(&spa_namespace_cv);
mutex_exit(&spa_namespace_lock);
return (error);

View File

@ -1240,20 +1240,7 @@ spa_vdev_enter(spa_t *spa)
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
/*
* We have a reference on the spa and a spa export could be
* starting but no longer holding the spa_namespace_lock. So
* check if there is an export and if so wait. It will fail
* fast (EBUSY) since we are still holding a spa reference.
*
* Note that we can be woken by a different spa transitioning
* through an import/export, so we must wait for our condition
* to change before proceeding.
*/
while (spa->spa_export_thread != NULL &&
spa->spa_export_thread != curthread) {
cv_wait(&spa_namespace_cv, &spa_namespace_lock);
}
ASSERT0(spa->spa_export_thread);
vdev_autotrim_stop_all(spa);
@ -1272,11 +1259,7 @@ spa_vdev_detach_enter(spa_t *spa, uint64_t guid)
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
/* See comment in spa_vdev_enter() */
while (spa->spa_export_thread != NULL &&
spa->spa_export_thread != curthread) {
cv_wait(&spa_namespace_cv, &spa_namespace_lock);
}
ASSERT0(spa->spa_export_thread);
vdev_autotrim_stop_all(spa);