Remove adaptive mutex implementation

Since the Linux 2.6.29 kernel all mutexes have been adaptive mutexs.
There is no longer any point in keeping this code so it is being
removed to simplify the code.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2014-09-29 16:48:35 -04:00
parent 56cfabd3e8
commit a80d69caf0
4 changed files with 9 additions and 124 deletions

View File

@ -28,7 +28,6 @@ AC_DEFUN([SPL_AC_CONFIG_KERNEL], [
SPL_AC_TYPE_UINTPTR_T
SPL_AC_2ARGS_REGISTER_SYSCTL
SPL_AC_SHRINKER_CALLBACK
SPL_AC_TASK_CURR
SPL_AC_CTL_UNNUMBERED
SPL_AC_CTL_NAME
SPL_AC_VMALLOC_INFO
@ -997,23 +996,6 @@ AC_DEFUN([SPL_AC_SHRINKER_CALLBACK],[
EXTRA_KCFLAGS="$tmp_flags"
])
dnl #
dnl # Custom SPL patch may export this system it is not required
dnl #
AC_DEFUN([SPL_AC_TASK_CURR],
[AC_MSG_CHECKING([whether task_curr() is available])
SPL_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/sched.h>
], [
task_curr(NULL);
], [task_curr], [kernel/sched.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TASK_CURR, 1, [task_curr() is available])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.19 API change,
dnl # Use CTL_UNNUMBERED when binary sysctl is not required
@ -1220,7 +1202,9 @@ AC_DEFUN([SPL_AC_INODE_I_MUTEX], [
dnl #
dnl # 2.6.29 API change,
dnl # Adaptive mutexs introduced.
dnl # Adaptive mutexs were introduced which track the mutex owner. The
dnl # mutex wrappers leverage this functionality to avoid tracking the
dnl # owner multipe times.
dnl #
AC_DEFUN([SPL_AC_MUTEX_OWNER], [
AC_MSG_CHECKING([whether struct mutex has owner])

View File

@ -35,7 +35,8 @@ typedef enum {
MUTEX_ADAPTIVE = 2
} kmutex_type_t;
#if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
#if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP) && \
!defined(CONFIG_DEBUG_MUTEXES)
/*
* We define a 1-field struct rather than a straight typedef to enforce type
@ -82,7 +83,7 @@ mutex_owner(kmutex_t *mp)
({ \
ASSERT3P(mutex_owner(mp), !=, current); \
mutex_lock(&(mp)->m); \
})
})
#define mutex_exit(mp) mutex_unlock(&(mp)->m)
#else /* HAVE_MUTEX_OWNER */
@ -92,13 +93,6 @@ typedef struct {
kthread_t *m_owner;
} kmutex_t;
#ifdef HAVE_TASK_CURR
extern int spl_mutex_spin_max(void);
#else /* HAVE_TASK_CURR */
# define task_curr(owner) 0
# define spl_mutex_spin_max() 0
#endif /* HAVE_TASK_CURR */
#define MUTEX(mp) (&((mp)->m_mutex))
static inline void
@ -150,38 +144,10 @@ spl_mutex_clear_owner(kmutex_t *mp)
_rc_; \
})
/*
* Adaptive mutexs assume that the lock may be held by a task running
* on a different cpu. The expectation is that the task will drop the
* lock before leaving the head of the run queue. So the ideal thing
* to do is spin until we acquire the lock and avoid a context switch.
* However it is also possible the task holding the lock yields the
* processor with out dropping lock. In this case, we know it's going
* to be a while so we stop spinning and go to sleep waiting for the
* lock to be available. This should strike the optimum balance
* between spinning and sleeping waiting for a lock.
*/
#define mutex_enter(mp) \
({ \
kthread_t *_owner_; \
int _rc_, _count_; \
\
_rc_ = 0; \
_count_ = 0; \
_owner_ = mutex_owner(mp); \
ASSERT3P(_owner_, !=, current); \
\
while (_owner_ && task_curr(_owner_) && \
_count_ <= spl_mutex_spin_max()) { \
if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
break; \
\
_count_++; \
} \
\
if (!_rc_) \
ASSERT3P(mutex_owner(mp), !=, current); \
mutex_lock(MUTEX(mp)); \
\
spl_mutex_set_owner(mp); \
})

View File

@ -102,30 +102,6 @@ The system hostid file
Default value: \fB/etc/hostid\fR.
.RE
.sp
.ne 2
.na
\fBmutex_spin_max\fR (int)
.ad
.RS 12n
Spin a maximum of N times to acquire lock
.sp
.ne 2
.na
\fBPossible values:\fR
.sp
.RS 12n
\fB0\fR Never spin when trying to acquire lock
.sp
\fB-1\fR Spin until acquired or holder yields without dropping lock
.sp
\fB1-MAX_INT\fR Spin for N attempts before sleeping for lock
.RE
.sp
.ne -4
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na

View File

@ -32,46 +32,5 @@
#define DEBUG_SUBSYSTEM S_MUTEX
/*
* While a standard mutex implementation has been available in the kernel
* for quite some time. It was not until 2.6.29 and latter kernels that
* adaptive mutexs were embraced and integrated with the scheduler. This
* brought a significant performance improvement, but just as importantly
* it added a lock owner to the generic mutex outside CONFIG_DEBUG_MUTEXES
* builds. This is critical for correctly supporting the mutex_owner()
* Solaris primitive. When the owner is available we use a pure Linux
* mutex implementation. When the owner is not available we still use
* Linux mutexs as a base but also reserve space for an owner field right
* after the mutex structure.
*
* In the case when HAVE_MUTEX_OWNER is not defined your code may
* still me able to leverage adaptive mutexs. As long as the task_curr()
* symbol is exported this code will provide a poor mans adaptive mutex
* implementation. However, this is not required and if the symbol is
* unavailable we provide a standard mutex.
*/
#if !defined(HAVE_MUTEX_OWNER) || !defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
#ifdef HAVE_TASK_CURR
/*
* mutex_spin_max = { 0, -1, 1-MAX_INT }
* 0: Never spin when trying to acquire lock
* -1: Spin until acquired or holder yields without dropping lock
* 1-MAX_INT: Spin for N attempts before sleeping for lock
*/
int mutex_spin_max = 0;
module_param(mutex_spin_max, int, 0644);
MODULE_PARM_DESC(mutex_spin_max, "Spin a maximum of N times to acquire lock");
int
spl_mutex_spin_max(void)
{
return mutex_spin_max;
}
EXPORT_SYMBOL(spl_mutex_spin_max);
#endif /* HAVE_TASK_CURR */
#endif /* !HAVE_MUTEX_OWNER */
int spl_mutex_init(void) { return 0; }
void spl_mutex_fini(void) { }