mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-18 02:20:59 +03:00
bcd68186d8
configurable number of threads like the Solaris version and almost all of the options are supported. Unfortunately, it appears to have made absolutely no difference to our performance numbers. I need to keep looking for where we are bottle necking. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@93 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
186 lines
3.5 KiB
C
186 lines
3.5 KiB
C
#ifndef _SPL_MUTEX_H
|
|
#define _SPL_MUTEX_H
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/hardirq.h>
|
|
#include <sys/types.h>
|
|
|
|
/* See the "Big Theory Statement" in solaris mutex.c.
|
|
*
|
|
* Spin mutexes apparently aren't needed by zfs so we assert
|
|
* if ibc is non-zero.
|
|
*
|
|
* Our impementation of adaptive mutexes aren't really adaptive.
|
|
* They go to sleep every time.
|
|
*/
|
|
|
|
#define MUTEX_DEFAULT 0
|
|
#define MUTEX_HELD(x) (mutex_owned(x))
|
|
|
|
#define KM_MAGIC 0x42424242
|
|
#define KM_POISON 0x84
|
|
|
|
typedef struct {
|
|
int km_magic;
|
|
char *km_name;
|
|
struct task_struct *km_owner;
|
|
struct semaphore km_sem;
|
|
spinlock_t km_lock;
|
|
} kmutex_t;
|
|
|
|
#undef mutex_init
|
|
static __inline__ void
|
|
mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
|
{
|
|
ENTRY;
|
|
ASSERT(mp);
|
|
ASSERT(ibc == NULL); /* XXX - Spin mutexes not needed */
|
|
ASSERT(type == MUTEX_DEFAULT); /* XXX - Only default type supported */
|
|
|
|
mp->km_magic = KM_MAGIC;
|
|
spin_lock_init(&mp->km_lock);
|
|
sema_init(&mp->km_sem, 1);
|
|
mp->km_owner = NULL;
|
|
mp->km_name = NULL;
|
|
|
|
if (name) {
|
|
mp->km_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
|
|
if (mp->km_name)
|
|
strcpy(mp->km_name, name);
|
|
}
|
|
EXIT;
|
|
}
|
|
|
|
#undef mutex_destroy
|
|
static __inline__ void
|
|
mutex_destroy(kmutex_t *mp)
|
|
{
|
|
ENTRY;
|
|
ASSERT(mp);
|
|
ASSERT(mp->km_magic == KM_MAGIC);
|
|
spin_lock(&mp->km_lock);
|
|
|
|
if (mp->km_name)
|
|
kfree(mp->km_name);
|
|
|
|
memset(mp, KM_POISON, sizeof(*mp));
|
|
spin_unlock(&mp->km_lock);
|
|
EXIT;
|
|
}
|
|
|
|
static __inline__ void
|
|
mutex_enter(kmutex_t *mp)
|
|
{
|
|
ENTRY;
|
|
ASSERT(mp);
|
|
ASSERT(mp->km_magic == KM_MAGIC);
|
|
spin_lock(&mp->km_lock);
|
|
|
|
if (unlikely(in_atomic() && !current->exit_state)) {
|
|
spin_unlock(&mp->km_lock);
|
|
__CDEBUG_LIMIT(S_MUTEX, D_ERROR,
|
|
"May schedule while atomic: %s/0x%08x/%d\n",
|
|
current->comm, preempt_count(), current->pid);
|
|
SBUG();
|
|
}
|
|
|
|
spin_unlock(&mp->km_lock);
|
|
|
|
down(&mp->km_sem);
|
|
|
|
spin_lock(&mp->km_lock);
|
|
ASSERT(mp->km_owner == NULL);
|
|
mp->km_owner = current;
|
|
spin_unlock(&mp->km_lock);
|
|
EXIT;
|
|
}
|
|
|
|
/* Return 1 if we acquired the mutex, else zero. */
|
|
static __inline__ int
|
|
mutex_tryenter(kmutex_t *mp)
|
|
{
|
|
int rc;
|
|
ENTRY;
|
|
|
|
ASSERT(mp);
|
|
ASSERT(mp->km_magic == KM_MAGIC);
|
|
spin_lock(&mp->km_lock);
|
|
|
|
if (unlikely(in_atomic() && !current->exit_state)) {
|
|
spin_unlock(&mp->km_lock);
|
|
__CDEBUG_LIMIT(S_MUTEX, D_ERROR,
|
|
"May schedule while atomic: %s/0x%08x/%d\n",
|
|
current->comm, preempt_count(), current->pid);
|
|
SBUG();
|
|
}
|
|
|
|
spin_unlock(&mp->km_lock);
|
|
rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */
|
|
if (rc == 0) {
|
|
spin_lock(&mp->km_lock);
|
|
ASSERT(mp->km_owner == NULL);
|
|
mp->km_owner = current;
|
|
spin_unlock(&mp->km_lock);
|
|
RETURN(1);
|
|
}
|
|
|
|
RETURN(0);
|
|
}
|
|
|
|
static __inline__ void
|
|
mutex_exit(kmutex_t *mp)
|
|
{
|
|
ENTRY;
|
|
ASSERT(mp);
|
|
ASSERT(mp->km_magic == KM_MAGIC);
|
|
spin_lock(&mp->km_lock);
|
|
|
|
ASSERT(mp->km_owner == current);
|
|
mp->km_owner = NULL;
|
|
spin_unlock(&mp->km_lock);
|
|
up(&mp->km_sem);
|
|
EXIT;
|
|
}
|
|
|
|
/* Return 1 if mutex is held by current process, else zero. */
|
|
static __inline__ int
|
|
mutex_owned(kmutex_t *mp)
|
|
{
|
|
int rc;
|
|
ENTRY;
|
|
|
|
ASSERT(mp);
|
|
ASSERT(mp->km_magic == KM_MAGIC);
|
|
spin_lock(&mp->km_lock);
|
|
rc = (mp->km_owner == current);
|
|
spin_unlock(&mp->km_lock);
|
|
|
|
RETURN(rc);
|
|
}
|
|
|
|
/* Return owner if mutex is owned, else NULL. */
|
|
static __inline__ kthread_t *
|
|
mutex_owner(kmutex_t *mp)
|
|
{
|
|
kthread_t *thr;
|
|
ENTRY;
|
|
|
|
ASSERT(mp);
|
|
ASSERT(mp->km_magic == KM_MAGIC);
|
|
spin_lock(&mp->km_lock);
|
|
thr = mp->km_owner;
|
|
spin_unlock(&mp->km_lock);
|
|
|
|
RETURN(thr);
|
|
}
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|
|
|
|
#endif /* _SPL_MUTEX_H */
|