By default disable extra KMEM and MUTEX debugging to aid performance.

They can easily be re-enabled when new stability issues are uncovered.



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@105 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-05-09 22:53:20 +00:00
parent 5c2bb9b2c3
commit c6dc93d6a8
5 changed files with 31 additions and 17 deletions

View File

@ -5,7 +5,8 @@
extern "C" {
#endif
#define DEBUG_KMEM
//#define DEBUG_KMEM
#undef DEBUG_KMEM
#undef DEBUG_KMEM_UNIMPLEMENTED
#include <linux/module.h>
@ -247,30 +248,25 @@ __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
vfree(ptr); \
})
#else
#else /* DEBUG_KMEM */
#define kmem_alloc(size, flags) kmalloc((size), (flags))
#define kmem_zalloc(size, flags) kzalloc((size), (flags))
#define kmem_free(ptr, size) \
({ \
ASSERT((ptr) || (size > 0)); \
kfree(ptr); \
})
#define kmem_free(ptr, size) kfree(ptr)
#define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
__GFP_HIGHMEM), PAGE_KERNEL)
#define vmem_zalloc(size, flags) __vmalloc((size), ((flags) | \
__GFP_HIGHMEM | __GFP_ZERO) \
PAGE_KERNEL)
#define vmem_free(ptr, size) \
#define vmem_zalloc(size, flags) \
({ \
ASSERT((ptr) || (size > 0)); \
vfree(ptr); \
void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
if (_ptr_) \
memset(_ptr_, 0, (size)); \
_ptr_; \
})
#define vmem_free(ptr, size) vfree(ptr)
#endif /* DEBUG_KMEM */
#ifdef DEBUG_KMEM_UNIMPLEMENTED
static __inline__ void *
kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)

View File

@ -10,7 +10,8 @@ extern "C" {
#include <sys/types.h>
#include <sys/kmem.h>
#define DEBUG_MUTEX
//#define DEBUG_MUTEX
#undef DEBUG_MUTEX
#define MUTEX_DEFAULT 0
#define MUTEX_SPIN 1

View File

@ -378,7 +378,9 @@ __kmem_cache_alloc(kmem_cache_t *cache, gfp_t flags)
restart:
rc = kmem_cache_alloc(cache, flags);
if ((rc == NULL) && (flags & KM_SLEEP)) {
#ifdef DEBUG_KMEM
atomic64_inc(&kmem_cache_alloc_failed);
#endif /* DEBUG_KMEM */
GOTO(restart, rc);
}
@ -428,7 +430,9 @@ kmem_init(void)
RETURN(0);
}
static char *sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
#ifdef DEBUG_KMEM
static char *
sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
{
int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
int i, flag = 1;
@ -468,6 +472,7 @@ static char *sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
return str;
}
#endif /* DEBUG_KMEM */
void
kmem_fini(void)

View File

@ -88,6 +88,12 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
return;
}
/* XXX - This appears to be a much more contended lock than I
* would have expected. To run with this debugging enabled and
* get reasonable performance we may need to be more clever and
* do something like hash the mutex ptr on to one of several
* lists to ease this single point of contention.
*/
spin_lock(&mutex_stats_lock);
list_add_tail(&mp->km_list, &mutex_stats_list);
spin_unlock(&mutex_stats_lock);

View File

@ -325,6 +325,7 @@ proc_console_backoff(struct ctl_table *table, int write, struct file *filp,
RETURN(rc);
}
#ifdef DEBUG_KMEM
static int
proc_doatomic64(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
@ -349,6 +350,7 @@ proc_doatomic64(struct ctl_table *table, int write, struct file *filp,
RETURN(rc);
}
#endif /* DEBUG_KMEM */
static int
proc_dohostid(struct ctl_table *table, int write, struct file *filp,
@ -829,7 +831,9 @@ proc_init(void)
RETURN(rc);
out2:
#ifdef DEBUG_MUTEX
remove_proc_entry("stats_per", proc_sys_spl_mutex);
#endif /* DEBUG_MUTEX */
out:
unregister_sysctl_table(spl_header);
#endif /* CONFIG_SYSCTL */
@ -843,7 +847,9 @@ proc_fini(void)
#ifdef CONFIG_SYSCTL
ASSERT(spl_header != NULL);
#ifdef DEBUG_MUTEX
remove_proc_entry("stats_per", proc_sys_spl_mutex);
#endif /* DEBUG_MUTEX */
unregister_sysctl_table(spl_header);
#endif
EXIT;