From d61e12af5af3ca6ba92a1b208e33c4e6cb99c9a3 Mon Sep 17 00:00:00 2001 From: behlendo Date: Tue, 15 Apr 2008 20:53:36 +0000 Subject: [PATCH] - Add some spinlocks to cover all the private data in the mutex. I don't think this should fix anything but it's a good idea regardless. - Drop the lock before calling the construct/destructor for the slab otherwise we can't sleep in a constructor/destructor and for long running functions we may NMI. - Do something braindead, but safe for the console debug logs for now. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@73 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c --- include/sys/mutex.h | 58 +++++++++++++++++++++++++++++++-------- modules/spl/spl-generic.c | 25 +++++++++-------- modules/spl/spl-kmem.c | 24 ++++++++++------ 3 files changed, 76 insertions(+), 31 deletions(-) diff --git a/include/sys/mutex.h b/include/sys/mutex.h index 068fde1ab..ae8b81a7f 100644 --- a/include/sys/mutex.h +++ b/include/sys/mutex.h @@ -29,16 +29,19 @@ typedef struct { char *km_name; struct task_struct *km_owner; struct semaphore km_sem; + spinlock_t km_lock; } kmutex_t; #undef mutex_init static __inline__ void mutex_init(kmutex_t *mp, char *name, int type, void *ibc) { + BUG_ON(mp == NULL); BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */ BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */ mp->km_magic = KM_MAGIC; + spin_lock_init(&mp->km_lock); sema_init(&mp->km_sem, 1); mp->km_owner = NULL; mp->km_name = NULL; @@ -54,49 +57,65 @@ mutex_init(kmutex_t *mp, char *name, int type, void *ibc) static __inline__ void mutex_destroy(kmutex_t *mp) { + BUG_ON(mp == NULL); + spin_lock(&mp->km_lock); BUG_ON(mp->km_magic != KM_MAGIC); if (mp->km_name) kfree(mp->km_name); memset(mp, KM_POISON, sizeof(*mp)); + spin_unlock(&mp->km_lock); } static __inline__ void mutex_enter(kmutex_t *mp) { + BUG_ON(mp == NULL); + spin_lock(&mp->km_lock); BUG_ON(mp->km_magic != KM_MAGIC); if (unlikely(in_atomic() && !current->exit_state)) { printk("May schedule while atomic: %s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); + spin_unlock(&mp->km_lock); BUG(); } - down(&mp->km_sem); /* Will check in_atomic() for us */ + spin_unlock(&mp->km_lock); + + down(&mp->km_sem); + + spin_lock(&mp->km_lock); BUG_ON(mp->km_owner != NULL); mp->km_owner = current; + spin_unlock(&mp->km_lock); } -/* Return 1 if we acquired the mutex, else zero. - */ +/* Return 1 if we acquired the mutex, else zero. */ static __inline__ int mutex_tryenter(kmutex_t *mp) { - int result; + int rc; + BUG_ON(mp == NULL); + spin_lock(&mp->km_lock); BUG_ON(mp->km_magic != KM_MAGIC); if (unlikely(in_atomic() && !current->exit_state)) { printk("May schedule while atomic: %s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); + spin_unlock(&mp->km_lock); BUG(); } - result = down_trylock(&mp->km_sem); /* returns 0 if acquired */ - if (result == 0) { + spin_unlock(&mp->km_lock); + rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */ + if (rc == 0) { + spin_lock(&mp->km_lock); BUG_ON(mp->km_owner != NULL); mp->km_owner = current; + spin_unlock(&mp->km_lock); return 1; } return 0; @@ -105,28 +124,43 @@ mutex_tryenter(kmutex_t *mp) static __inline__ void mutex_exit(kmutex_t *mp) { + BUG_ON(mp == NULL); + spin_lock(&mp->km_lock); BUG_ON(mp->km_magic != KM_MAGIC); BUG_ON(mp->km_owner != current); mp->km_owner = NULL; + spin_unlock(&mp->km_lock); up(&mp->km_sem); } -/* Return 1 if mutex is held by current process, else zero. - */ +/* Return 1 if mutex is held by current process, else zero. */ static __inline__ int mutex_owned(kmutex_t *mp) { + int rc; + + BUG_ON(mp == NULL); + spin_lock(&mp->km_lock); BUG_ON(mp->km_magic != KM_MAGIC); - return (mp->km_owner == current); + rc = (mp->km_owner == current); + spin_unlock(&mp->km_lock); + + return rc; } -/* Return owner if mutex is owned, else NULL. - */ +/* Return owner if mutex is owned, else NULL. */ static __inline__ kthread_t * mutex_owner(kmutex_t *mp) { + kthread_t *thr; + + BUG_ON(mp == NULL); + spin_lock(&mp->km_lock); BUG_ON(mp->km_magic != KM_MAGIC); - return mp->km_owner; + thr = mp->km_owner; + spin_unlock(&mp->km_lock); + + return thr; } #ifdef __cplusplus diff --git a/modules/spl/spl-generic.c b/modules/spl/spl-generic.c index 3faa4b6b3..8cd217cf1 100644 --- a/modules/spl/spl-generic.c +++ b/modules/spl/spl-generic.c @@ -10,7 +10,8 @@ /* * Generic support */ -static char spl_debug_buffer[MAXMSGLEN]; +static char spl_debug_buffer1[1024]; +static char spl_debug_buffer2[1024]; static spinlock_t spl_debug_lock = SPIN_LOCK_UNLOCKED; unsigned long spl_debug_mask = 0; @@ -83,12 +84,11 @@ EXPORT_SYMBOL(ddi_strtoul); void __dprintf(const char *file, const char *func, int line, const char *fmt, ...) { - char *sfp, *start, *ptr; + char *sfp; struct timeval tv; unsigned long flags; va_list ap; - start = ptr = spl_debug_buffer; sfp = strrchr(file, '/'); do_gettimeofday(&tv); @@ -98,18 +98,21 @@ __dprintf(const char *file, const char *func, int line, const char *fmt, ...) * reason why we really, really, need an internal debug log. */ spin_lock_irqsave(&spl_debug_lock, flags); - ptr += snprintf(ptr, MAXMSGLEN - 1, - "spl: %lu.%06lu:%d:%u:%s:%d:%s(): ", - tv.tv_sec, tv.tv_usec, current->pid, - smp_processor_id(), - sfp == NULL ? file : sfp + 1, - line, func); + memset(spl_debug_buffer1, 0, 1024); + memset(spl_debug_buffer2, 0, 1024); + + snprintf(spl_debug_buffer1, 1023, + "spl: %lu.%06lu:%d:%u:%s:%d:%s(): ", + tv.tv_sec, tv.tv_usec, current->pid, + smp_processor_id(), + sfp == NULL ? file : sfp + 1, + line, func); va_start(ap, fmt); - ptr += vsnprintf(ptr, MAXMSGLEN - (ptr - start) - 1, fmt, ap); + vsnprintf(spl_debug_buffer2, 1023, fmt, ap); va_end(ap); - printk("%s", start); + printk("%s%s", spl_debug_buffer1, spl_debug_buffer2); spin_unlock_irqrestore(&spl_debug_lock, flags); } EXPORT_SYMBOL(__dprintf); diff --git a/modules/spl/spl-kmem.c b/modules/spl/spl-kmem.c index 1b9eaafe6..7c88eda5f 100644 --- a/modules/spl/spl-kmem.c +++ b/modules/spl/spl-kmem.c @@ -50,7 +50,6 @@ typedef struct kmem_cache_cb { static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED; -//static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }; static LIST_HEAD(kmem_cache_cb_list); static struct shrinker *kmem_cache_shrinker; @@ -110,17 +109,22 @@ static void kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags) { kmem_cache_cb_t *kcc; + kmem_constructor_t constructor; + void *private; spin_lock(&kmem_cache_cb_lock); /* Callback list must be in sync with linux slab caches */ kcc = kmem_cache_find_cache_cb(cache); BUG_ON(!kcc); - - if (kcc->kcc_constructor) - kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags); + constructor = kcc->kcc_constructor; + private = kcc->kcc_private; spin_unlock(&kmem_cache_cb_lock); + + if (constructor) + constructor(ptr, private, (int)flags); + /* Linux constructor has no return code, silently eat it */ } @@ -128,18 +132,22 @@ static void kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags) { kmem_cache_cb_t *kcc; + kmem_destructor_t destructor; + void *private; spin_lock(&kmem_cache_cb_lock); /* Callback list must be in sync with linux slab caches */ kcc = kmem_cache_find_cache_cb(cache); BUG_ON(!kcc); - - /* Solaris destructor takes no flags, silently eat them */ - if (kcc->kcc_destructor) - kcc->kcc_destructor(ptr, kcc->kcc_private); + destructor = kcc->kcc_destructor; + private = kcc->kcc_private; spin_unlock(&kmem_cache_cb_lock); + + /* Solaris destructor takes no flags, silently eat them */ + if (destructor) + destructor(ptr, private); } /* XXX - Arguments are ignored */