Fix KMEM_DEBUG support (enable by default)

Add vmem_alloc/vmem_free support (and test case)
Add missing time functions



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@46 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-03-14 19:04:41 +00:00
parent af828292e5
commit 79b31f3601
7 changed files with 146 additions and 28 deletions

2
FIXME
View File

@ -10,3 +10,5 @@ sys/acl.h _ All borrowed from libsolcompat
sys/acl_impl.h _ All borrowed from libsolcompat sys/acl_impl.h _ All borrowed from libsolcompat
* Implement solaris style atomic interfaces * Implement solaris style atomic interfaces
* Fully implement vnode support for ZPL layer to intergrate with VFS.

View File

@ -5,11 +5,12 @@
extern "C" { extern "C" {
#endif #endif
#undef DEBUG_KMEM #define DEBUG_KMEM
#undef DEBUG_KMEM_UNIMPLEMENTED #undef DEBUG_KMEM_UNIMPLEMENTED
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
/* /*
@ -23,12 +24,11 @@ extern "C" {
#define KM_FLAGS __GFP_BITS_MASK #define KM_FLAGS __GFP_BITS_MASK
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
/* Shim layer memory accounting */
extern atomic_t kmem_alloc_used; extern atomic_t kmem_alloc_used;
extern unsigned int kmem_alloc_max; extern unsigned int kmem_alloc_max;
#endif extern atomic_t vmem_alloc_used;
extern unsigned int vmem_alloc_max;
#ifdef DEBUG_KMEM
#define __kmem_alloc(size, flags, allocator) \ #define __kmem_alloc(size, flags, allocator) \
({ void *_ptr_; \ ({ void *_ptr_; \
\ \
@ -58,13 +58,40 @@ extern unsigned int kmem_alloc_max;
#define kmem_free(ptr, size) \ #define kmem_free(ptr, size) \
({ \ ({ \
BUG_ON(!ptr || size < 0); \ BUG_ON(!(ptr) || (size) < 0); \
atomic_sub((size), &kmem_alloc_used); \ atomic_sub((size), &kmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \ memset(ptr, 0x5a, (size)); /* Poison */ \
kfree(ptr); \ kfree(ptr); \
(ptr) = (void *)0xdeadbeef; \
}) })
#define __vmem_alloc(size, flags) \
({ void *_ptr_; \
\
BUG_ON(flags != KM_SLEEP); \
\
_ptr_ = (void *)vmalloc((size)); \
if (_ptr_ == NULL) { \
printk("Warning: vmem_alloc(%d, 0x%x) failed at %s:%d " \
"(%d/%d)\n", (int)(size), (int)(flags), \
__FILE__, __LINE__, \
atomic_read(&vmem_alloc_used), vmem_alloc_max); \
atomic_add((size), &vmem_alloc_used); \
if (unlikely(atomic_read(&vmem_alloc_used) > vmem_alloc_max)) \
vmem_alloc_max = atomic_read(&vmem_alloc_used); \
} \
\
_ptr_; \
})
#define vmem_alloc(size, flags) __vmem_alloc(size, flags)
#define vmem_free(ptr, size) \
({ \
BUG_ON(!(ptr) || (size) < 0); \
atomic_sub((size), &vmem_alloc_used); \
memset(ptr, 0x5a, (size)); /* Poison */ \
vfree(ptr); \
})
#else #else
@ -72,10 +99,17 @@ extern unsigned int kmem_alloc_max;
#define kmem_zalloc(size, flags) kzalloc(size, flags) #define kmem_zalloc(size, flags) kzalloc(size, flags)
#define kmem_free(ptr, size) \ #define kmem_free(ptr, size) \
({ \ ({ \
BUG_ON(!ptr || size < 0); \ BUG_ON(!(ptr) || (size) < 0); \
kfree(ptr); \ kfree(ptr); \
}) })
#define vmem_alloc(size, flags) vmalloc(size)
#define vmem_free(ptr, size) \
({ \
BUG_ON(!(ptr) || (size) < 0); \
vfree(ptr); \
})
#endif /* DEBUG_KMEM */ #endif /* DEBUG_KMEM */

View File

@ -14,11 +14,6 @@ extern "C" {
#include <linux/time.h> #include <linux/time.h>
#include <sys/types.h> #include <sys/types.h>
extern unsigned long long monotonic_clock(void);
extern void __gethrestime(timestruc_t *);
#define gethrestime(ts) __gethrestime(ts)
#define TIME32_MAX INT32_MAX #define TIME32_MAX INT32_MAX
#define TIME32_MIN INT32_MIN #define TIME32_MIN INT32_MIN
@ -27,12 +22,37 @@ extern void __gethrestime(timestruc_t *);
#define MICROSEC 1000000 #define MICROSEC 1000000
#define NANOSEC 1000000000 #define NANOSEC 1000000000
/* Already defined in include/linux/time.h */
#undef CLOCK_THREAD_CPUTIME_ID
#undef CLOCK_REALTIME
#undef CLOCK_MONOTONIC
#undef CLOCK_PROCESS_CPUTIME_ID
typedef enum clock_type {
__CLOCK_REALTIME0 = 0, /* obsolete; same as CLOCK_REALTIME */
CLOCK_VIRTUAL = 1, /* thread's user-level CPU clock */
CLOCK_THREAD_CPUTIME_ID = 2, /* thread's user+system CPU clock */
CLOCK_REALTIME = 3, /* wall clock */
CLOCK_MONOTONIC = 4, /* high resolution monotonic clock */
CLOCK_PROCESS_CPUTIME_ID = 5, /* process's user+system CPU clock */
CLOCK_HIGHRES = CLOCK_MONOTONIC, /* alternate name */
CLOCK_PROF = CLOCK_THREAD_CPUTIME_ID,/* alternate name */
} clock_type_t;
#define hz \ #define hz \
({ \ ({ \
BUG_ON(HZ < 100 || HZ > MICROSEC); \ BUG_ON(HZ < 100 || HZ > MICROSEC); \
HZ; \ HZ; \
}) })
extern void __gethrestime(timestruc_t *);
extern int __clock_gettime(clock_type_t, timespec_t *);
extern hrtime_t __gethrtime(void);
#define gethrestime(ts) __gethrestime(ts)
#define clock_gettime(fl, tp) __clock_gettime(fl, tp)
#define gethrtime() __gethrtime()
static __inline__ time_t static __inline__ time_t
gethrestime_sec(void) gethrestime_sec(void)
{ {
@ -42,19 +62,6 @@ gethrestime_sec(void)
return now.tv_sec; return now.tv_sec;
} }
static __inline__ hrtime_t
gethrtime(void) {
/* BUG_ON(cur_timer == timer_none); */
/* Solaris expects a long long here but monotonic_clock() returns an
* unsigned long long. Note that monotonic_clock() returns the number
* of nanoseconds passed since kernel initialization. Even for a signed
* long long this will not "go negative" for ~292 years.
*/
return monotonic_clock();
}
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@ -23,6 +23,7 @@ typedef struct task_struct kthread_t;
typedef struct vmem { } vmem_t; typedef struct vmem { } vmem_t;
typedef short pri_t; typedef short pri_t;
typedef struct timespec timestruc_t; /* definition per SVr4 */ typedef struct timespec timestruc_t; /* definition per SVr4 */
typedef struct timespec timespec_t;
typedef longlong_t hrtime_t; typedef longlong_t hrtime_t;
typedef unsigned short ushort_t; typedef unsigned short ushort_t;
typedef u_longlong_t len_t; typedef u_longlong_t len_t;

View File

@ -7,6 +7,13 @@
/* Shim layer memory accounting */ /* Shim layer memory accounting */
atomic_t kmem_alloc_used; atomic_t kmem_alloc_used;
unsigned int kmem_alloc_max; unsigned int kmem_alloc_max;
atomic_t vmem_alloc_used;
unsigned int vmem_alloc_max;
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used);
EXPORT_SYMBOL(vmem_alloc_max);
#endif #endif
/* /*

View File

@ -7,5 +7,31 @@ __gethrestime(timestruc_t *ts)
{ {
getnstimeofday((struct timespec *)ts); getnstimeofday((struct timespec *)ts);
} }
EXPORT_SYMBOL(__gethrestime); EXPORT_SYMBOL(__gethrestime);
int
__clock_gettime(clock_type_t type, timespec_t *tp)
{
/* Only support CLOCK_REALTIME+__CLOCK_REALTIME0 for now */
BUG_ON(!((type == CLOCK_REALTIME) || (type == __CLOCK_REALTIME0)));
getnstimeofday(tp);
return 0;
}
EXPORT_SYMBOL(__clock_gettime);
/* This function may not be as fast as using monotonic_clock() but it
* should be much more portable, if performance becomes as issue we can
* look at using monotonic_clock() for x86_64 and x86 arches.
*/
hrtime_t
__gethrtime(void) {
timespec_t tv;
hrtime_t rc;
do_posix_clock_monotonic_gettime(&tv);
rc = (NSEC_PER_SEC * (hrtime_t)tv.tv_sec) + (hrtime_t)tv.tv_nsec;
return rc;
}
EXPORT_SYMBOL(__gethrtime);

View File

@ -20,7 +20,13 @@
#define SPLAT_KMEM_TEST4_NAME "slab_reap" #define SPLAT_KMEM_TEST4_NAME "slab_reap"
#define SPLAT_KMEM_TEST4_DESC "Slab reaping test" #define SPLAT_KMEM_TEST4_DESC "Slab reaping test"
#define SPLAT_KMEM_TEST5_ID 0x0105
#define SPLAT_KMEM_TEST5_NAME "vmem_alloc"
#define SPLAT_KMEM_TEST5_DESC "Memory allocation test (vmem_alloc)"
#define SPLAT_KMEM_ALLOC_COUNT 10 #define SPLAT_KMEM_ALLOC_COUNT 10
#define SPLAT_VMEM_ALLOC_COUNT 10
/* XXX - This test may fail under tight memory conditions */ /* XXX - This test may fail under tight memory conditions */
static int static int
splat_kmem_test1(struct file *file, void *arg) splat_kmem_test1(struct file *file, void *arg)
@ -29,7 +35,7 @@ splat_kmem_test1(struct file *file, void *arg)
int size = PAGE_SIZE; int size = PAGE_SIZE;
int i, count, rc = 0; int i, count, rc = 0;
while ((!rc) && (size < (PAGE_SIZE * 16))) { while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0; count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
@ -61,7 +67,7 @@ splat_kmem_test2(struct file *file, void *arg)
int size = PAGE_SIZE; int size = PAGE_SIZE;
int i, j, count, rc = 0; int i, j, count, rc = 0;
while ((!rc) && (size < (PAGE_SIZE * 16))) { while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0; count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
@ -317,6 +323,38 @@ splat_kmem_test4(struct file *file, void *arg)
return rc; return rc;
} }
static int
splat_kmem_test5(struct file *file, void *arg)
{
void *ptr[SPLAT_VMEM_ALLOC_COUNT];
int size = PAGE_SIZE;
int i, count, rc = 0;
while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
count = 0;
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
ptr[i] = vmem_alloc(size, KM_SLEEP);
if (ptr[i])
count++;
}
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
if (ptr[i])
vmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST5_NAME,
"%d byte allocations, %d/%d successful\n",
size, count, SPLAT_VMEM_ALLOC_COUNT);
if (count != SPLAT_VMEM_ALLOC_COUNT)
rc = -ENOMEM;
size *= 2;
}
return rc;
}
splat_subsystem_t * splat_subsystem_t *
splat_kmem_init(void) splat_kmem_init(void)
{ {
@ -342,6 +380,8 @@ splat_kmem_init(void)
SPLAT_KMEM_TEST3_ID, splat_kmem_test3); SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC, SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
SPLAT_KMEM_TEST4_ID, splat_kmem_test4); SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
return sub; return sub;
} }
@ -350,6 +390,7 @@ void
splat_kmem_fini(splat_subsystem_t *sub) splat_kmem_fini(splat_subsystem_t *sub)
{ {
ASSERT(sub); ASSERT(sub);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);