mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 01:51:00 +03:00
Add mutex_enter_interruptible() for interruptible sleeping IOCTLs
Many long-running ZFS ioctls lock the spa_namespace_lock, forcing concurrent ioctls to sleep for the mutex. Previously, the only option is to call mutex_enter() which sleeps uninterruptibly. This is a usability issue for sysadmins, for example, if the admin runs `zpool status` while a slow `zpool import` is ongoing, the admin's shell will be locked in uninterruptible sleep for a long time. This patch resolves this admin usability issue by introducing mutex_enter_interruptible() which sleeps interruptibly while waiting to acquire a lock. It is implemented for both Linux and FreeBSD. The ZFS_IOC_POOL_CONFIGS ioctl, used by `zpool status`, is changed to use this new macro so that the command can be interrupted if it is issued during a concurrent `zpool import` (or other long-running operation). Reviewed-by: Tony Hutter <hutter2@llnl.gov> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Thomas Bertschinger <bertschinger@lanl.gov> Closes #15360
This commit is contained in:
parent
6a629f3234
commit
97a0b5be50
@ -64,6 +64,7 @@ typedef enum {
|
|||||||
} while (0)
|
} while (0)
|
||||||
#define mutex_destroy(lock) sx_destroy(lock)
|
#define mutex_destroy(lock) sx_destroy(lock)
|
||||||
#define mutex_enter(lock) sx_xlock(lock)
|
#define mutex_enter(lock) sx_xlock(lock)
|
||||||
|
#define mutex_enter_interruptible(lock) sx_xlock_sig(lock)
|
||||||
#define mutex_enter_nested(lock, type) sx_xlock(lock)
|
#define mutex_enter_nested(lock, type) sx_xlock(lock)
|
||||||
#define mutex_tryenter(lock) sx_try_xlock(lock)
|
#define mutex_tryenter(lock) sx_try_xlock(lock)
|
||||||
#define mutex_exit(lock) sx_xunlock(lock)
|
#define mutex_exit(lock) sx_xunlock(lock)
|
||||||
|
@ -128,7 +128,6 @@ spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
|
|||||||
|
|
||||||
#define NESTED_SINGLE 1
|
#define NESTED_SINGLE 1
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
||||||
#define mutex_enter_nested(mp, subclass) \
|
#define mutex_enter_nested(mp, subclass) \
|
||||||
{ \
|
{ \
|
||||||
ASSERT3P(mutex_owner(mp), !=, current); \
|
ASSERT3P(mutex_owner(mp), !=, current); \
|
||||||
@ -137,16 +136,22 @@ spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
|
|||||||
spl_mutex_lockdep_on_maybe(mp); \
|
spl_mutex_lockdep_on_maybe(mp); \
|
||||||
spl_mutex_set_owner(mp); \
|
spl_mutex_set_owner(mp); \
|
||||||
}
|
}
|
||||||
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
|
||||||
#define mutex_enter_nested(mp, subclass) \
|
#define mutex_enter_interruptible(mp) \
|
||||||
{ \
|
/* CSTYLED */ \
|
||||||
|
({ \
|
||||||
|
int _rc_; \
|
||||||
|
\
|
||||||
ASSERT3P(mutex_owner(mp), !=, current); \
|
ASSERT3P(mutex_owner(mp), !=, current); \
|
||||||
spl_mutex_lockdep_off_maybe(mp); \
|
spl_mutex_lockdep_off_maybe(mp); \
|
||||||
mutex_lock(MUTEX(mp)); \
|
_rc_ = mutex_lock_interruptible(MUTEX(mp)); \
|
||||||
spl_mutex_lockdep_on_maybe(mp); \
|
spl_mutex_lockdep_on_maybe(mp); \
|
||||||
spl_mutex_set_owner(mp); \
|
if (!_rc_) { \
|
||||||
}
|
spl_mutex_set_owner(mp); \
|
||||||
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
|
} \
|
||||||
|
\
|
||||||
|
_rc_; \
|
||||||
|
})
|
||||||
|
|
||||||
#define mutex_enter(mp) mutex_enter_nested((mp), 0)
|
#define mutex_enter(mp) mutex_enter_nested((mp), 0)
|
||||||
|
|
||||||
|
@ -837,7 +837,7 @@ extern kmutex_t spa_namespace_lock;
|
|||||||
|
|
||||||
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t, boolean_t);
|
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t, boolean_t);
|
||||||
extern void spa_config_load(void);
|
extern void spa_config_load(void);
|
||||||
extern nvlist_t *spa_all_configs(uint64_t *);
|
extern int spa_all_configs(uint64_t *generation, nvlist_t **pools);
|
||||||
extern void spa_config_set(spa_t *spa, nvlist_t *config);
|
extern void spa_config_set(spa_t *spa, nvlist_t *config);
|
||||||
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
|
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
|
||||||
int getstats);
|
int getstats);
|
||||||
|
@ -274,11 +274,13 @@ typedef struct kmutex {
|
|||||||
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
|
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
|
||||||
extern void mutex_destroy(kmutex_t *mp);
|
extern void mutex_destroy(kmutex_t *mp);
|
||||||
extern void mutex_enter(kmutex_t *mp);
|
extern void mutex_enter(kmutex_t *mp);
|
||||||
|
extern int mutex_enter_check_return(kmutex_t *mp);
|
||||||
extern void mutex_exit(kmutex_t *mp);
|
extern void mutex_exit(kmutex_t *mp);
|
||||||
extern int mutex_tryenter(kmutex_t *mp);
|
extern int mutex_tryenter(kmutex_t *mp);
|
||||||
|
|
||||||
#define NESTED_SINGLE 1
|
#define NESTED_SINGLE 1
|
||||||
#define mutex_enter_nested(mp, class) mutex_enter(mp)
|
#define mutex_enter_nested(mp, class) mutex_enter(mp)
|
||||||
|
#define mutex_enter_interruptible(mp) mutex_enter_check_return(mp)
|
||||||
/*
|
/*
|
||||||
* RW locks
|
* RW locks
|
||||||
*/
|
*/
|
||||||
|
@ -205,6 +205,15 @@ mutex_enter(kmutex_t *mp)
|
|||||||
mp->m_owner = pthread_self();
|
mp->m_owner = pthread_self();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
mutex_enter_check_return(kmutex_t *mp)
|
||||||
|
{
|
||||||
|
int error = pthread_mutex_lock(&mp->m_lock);
|
||||||
|
if (error == 0)
|
||||||
|
mp->m_owner = pthread_self();
|
||||||
|
return (error);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
mutex_tryenter(kmutex_t *mp)
|
mutex_tryenter(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
|
@ -367,23 +367,24 @@ spa_write_cachefile(spa_t *target, boolean_t removing, boolean_t postsysevent,
|
|||||||
* So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration
|
* So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration
|
||||||
* information for all pool visible within the zone.
|
* information for all pool visible within the zone.
|
||||||
*/
|
*/
|
||||||
nvlist_t *
|
int
|
||||||
spa_all_configs(uint64_t *generation)
|
spa_all_configs(uint64_t *generation, nvlist_t **pools)
|
||||||
{
|
{
|
||||||
nvlist_t *pools;
|
|
||||||
spa_t *spa = NULL;
|
spa_t *spa = NULL;
|
||||||
|
|
||||||
if (*generation == spa_config_generation)
|
if (*generation == spa_config_generation)
|
||||||
return (NULL);
|
return (SET_ERROR(EEXIST));
|
||||||
|
|
||||||
pools = fnvlist_alloc();
|
int error = mutex_enter_interruptible(&spa_namespace_lock);
|
||||||
|
if (error)
|
||||||
|
return (SET_ERROR(EINTR));
|
||||||
|
|
||||||
mutex_enter(&spa_namespace_lock);
|
*pools = fnvlist_alloc();
|
||||||
while ((spa = spa_next(spa)) != NULL) {
|
while ((spa = spa_next(spa)) != NULL) {
|
||||||
if (INGLOBALZONE(curproc) ||
|
if (INGLOBALZONE(curproc) ||
|
||||||
zone_dataset_visible(spa_name(spa), NULL)) {
|
zone_dataset_visible(spa_name(spa), NULL)) {
|
||||||
mutex_enter(&spa->spa_props_lock);
|
mutex_enter(&spa->spa_props_lock);
|
||||||
fnvlist_add_nvlist(pools, spa_name(spa),
|
fnvlist_add_nvlist(*pools, spa_name(spa),
|
||||||
spa->spa_config);
|
spa->spa_config);
|
||||||
mutex_exit(&spa->spa_props_lock);
|
mutex_exit(&spa->spa_props_lock);
|
||||||
}
|
}
|
||||||
@ -391,7 +392,7 @@ spa_all_configs(uint64_t *generation)
|
|||||||
*generation = spa_config_generation;
|
*generation = spa_config_generation;
|
||||||
mutex_exit(&spa_namespace_lock);
|
mutex_exit(&spa_namespace_lock);
|
||||||
|
|
||||||
return (pools);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -1582,8 +1582,9 @@ zfs_ioc_pool_configs(zfs_cmd_t *zc)
|
|||||||
nvlist_t *configs;
|
nvlist_t *configs;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if ((configs = spa_all_configs(&zc->zc_cookie)) == NULL)
|
error = spa_all_configs(&zc->zc_cookie, &configs);
|
||||||
return (SET_ERROR(EEXIST));
|
if (error)
|
||||||
|
return (error);
|
||||||
|
|
||||||
error = put_nvlist(zc, configs);
|
error = put_nvlist(zc, configs);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user