mirror_zfs/modules/splat/splat-kmem.c
behlendo 4afaaefa05 Implement per-cpu local caches. This seems to have bough me another
factor of 10x improvement on SMP system due to reduced lock contention.
This may put me in the ballpark of what is needed.  We can still further
improve things on NUMA systems by creating an additional L3 cache per 
memory node instead of the current global pool.  With luck this won't
be needed.  I should also take another look at the locking now that
everything is working.  There's a good chance I can tighten it up a
little bit and improve things a little more.

   kmem_lock: time (sec)        slabs           objs            hash
   kmem_lock:                   tot/max/calc    tot/max/calc    size/depth
   kmem_lock:  0.000999926      6/6/1           192/192/32      32768/0
   kmem_lock:  0.000999926      4/4/2           128/128/64      32768/0
   kmem_lock:  0.000999926      4/4/4           128/128/128     32768/0
   kmem_lock:  0.000999926      4/4/8           128/128/256     32768/0
   kmem_lock:  0.000999926      4/4/16          128/128/512     32768/0
   kmem_lock:  0.000999926      4/4/32          128/128/1024    32768/0
   kmem_lock:  0.000999926      4/4/64          128/128/2048    32768/0
   kmem_lock:  0.000999926      8/8/128         256/256/4096    32768/0
   kmem_lock:  0.003999704      24/23/256       768/736/8192    32768/1
   kmem_lock:  0.012999038      44/41/512       1408/1312/16384 32768/1
   kmem_lock:  0.051996153      96/93/1024      3072/2976/32768 32768/2
   kmem_lock:  0.181986536      187/184/2048    5984/5888/65536 32768/3
   kmem_lock:  0.655951469      342/339/4096    10944/10848/131072 32768/4



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@136 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
2008-06-25 20:57:45 +00:00

707 lines
19 KiB
C

/*
* This file is part of the SPL: Solaris Porting Layer.
*
* Copyright (c) 2008 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory
* Written by:
* Brian Behlendorf <behlendorf1@llnl.gov>,
* Herb Wartens <wartens2@llnl.gov>,
* Jim Garlick <garlick@llnl.gov>
* UCRL-CODE-235197
*
* This is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "splat-internal.h"
#define SPLAT_SUBSYSTEM_KMEM 0x0100
#define SPLAT_KMEM_NAME "kmem"
#define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
#define SPLAT_KMEM_TEST1_ID 0x0101
#define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
#define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
#define SPLAT_KMEM_TEST2_ID 0x0102
#define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
#define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
#define SPLAT_KMEM_TEST3_ID 0x0103
#define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
#define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
#define SPLAT_KMEM_TEST4_ID 0x0104
#define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
#define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
#define SPLAT_KMEM_TEST5_ID 0x0105
#define SPLAT_KMEM_TEST5_NAME "kmem_cache1"
#define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
#define SPLAT_KMEM_TEST6_ID 0x0106
#define SPLAT_KMEM_TEST6_NAME "kmem_cache2"
#define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
#define SPLAT_KMEM_TEST7_ID 0x0107
#define SPLAT_KMEM_TEST7_NAME "kmem_reap"
#define SPLAT_KMEM_TEST7_DESC "Slab reaping test"
#define SPLAT_KMEM_TEST8_ID 0x0108
#define SPLAT_KMEM_TEST8_NAME "kmem_lock"
#define SPLAT_KMEM_TEST8_DESC "Slab locking test"
#define SPLAT_KMEM_ALLOC_COUNT 10
#define SPLAT_VMEM_ALLOC_COUNT 10
/* Not exported from the kernel, but we need it for timespec_sub. Be very
* * careful here we are using the kernel prototype, so that must not change.
* */
void
set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
{
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
/* XXX - This test may fail under tight memory conditions */
static int
splat_kmem_test1(struct file *file, void *arg)
{
void *ptr[SPLAT_KMEM_ALLOC_COUNT];
int size = PAGE_SIZE;
int i, count, rc = 0;
/* We are intentionally going to push kmem_alloc to its max
* allocation size, so suppress the console warnings for now */
kmem_set_warning(0);
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
ptr[i] = kmem_alloc(size, KM_SLEEP);
if (ptr[i])
count++;
}
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
if (ptr[i])
kmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
"%d byte allocations, %d/%d successful\n",
size, count, SPLAT_KMEM_ALLOC_COUNT);
if (count != SPLAT_KMEM_ALLOC_COUNT)
rc = -ENOMEM;
size *= 2;
}
kmem_set_warning(1);
return rc;
}
static int
splat_kmem_test2(struct file *file, void *arg)
{
void *ptr[SPLAT_KMEM_ALLOC_COUNT];
int size = PAGE_SIZE;
int i, j, count, rc = 0;
/* We are intentionally going to push kmem_alloc to its max
* allocation size, so suppress the console warnings for now */
kmem_set_warning(0);
while ((!rc) && (size <= (PAGE_SIZE * 32))) {
count = 0;
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
ptr[i] = kmem_zalloc(size, KM_SLEEP);
if (ptr[i])
count++;
}
/* Ensure buffer has been zero filled */
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
for (j = 0; j < size; j++) {
if (((char *)ptr[i])[j] != '\0') {
splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
"%d-byte allocation was "
"not zeroed\n", size);
rc = -EFAULT;
}
}
}
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
if (ptr[i])
kmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
"%d byte allocations, %d/%d successful\n",
size, count, SPLAT_KMEM_ALLOC_COUNT);
if (count != SPLAT_KMEM_ALLOC_COUNT)
rc = -ENOMEM;
size *= 2;
}
kmem_set_warning(1);
return rc;
}
static int
splat_kmem_test3(struct file *file, void *arg)
{
void *ptr[SPLAT_VMEM_ALLOC_COUNT];
int size = PAGE_SIZE;
int i, count, rc = 0;
while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
count = 0;
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
ptr[i] = vmem_alloc(size, KM_SLEEP);
if (ptr[i])
count++;
}
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
if (ptr[i])
vmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"%d byte allocations, %d/%d successful\n",
size, count, SPLAT_VMEM_ALLOC_COUNT);
if (count != SPLAT_VMEM_ALLOC_COUNT)
rc = -ENOMEM;
size *= 2;
}
return rc;
}
static int
splat_kmem_test4(struct file *file, void *arg)
{
void *ptr[SPLAT_VMEM_ALLOC_COUNT];
int size = PAGE_SIZE;
int i, j, count, rc = 0;
while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
count = 0;
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
ptr[i] = vmem_zalloc(size, KM_SLEEP);
if (ptr[i])
count++;
}
/* Ensure buffer has been zero filled */
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
for (j = 0; j < size; j++) {
if (((char *)ptr[i])[j] != '\0') {
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"%d-byte allocation was "
"not zeroed\n", size);
rc = -EFAULT;
}
}
}
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
if (ptr[i])
vmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"%d byte allocations, %d/%d successful\n",
size, count, SPLAT_VMEM_ALLOC_COUNT);
if (count != SPLAT_VMEM_ALLOC_COUNT)
rc = -ENOMEM;
size *= 2;
}
return rc;
}
#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
#define SPLAT_KMEM_CACHE_NAME "kmem_test"
#define SPLAT_KMEM_OBJ_COUNT 128
#define SPLAT_KMEM_OBJ_RECLAIM 16
typedef struct kmem_cache_data {
unsigned long kcd_magic;
int kcd_flag;
char kcd_buf[0];
} kmem_cache_data_t;
typedef struct kmem_cache_priv {
unsigned long kcp_magic;
struct file *kcp_file;
kmem_cache_t *kcp_cache;
kmem_cache_data_t *kcp_kcd[SPLAT_KMEM_OBJ_COUNT];
spinlock_t kcp_lock;
wait_queue_head_t kcp_waitq;
int kcp_size;
int kcp_count;
int kcp_threads;
int kcp_alloc;
int kcp_rc;
} kmem_cache_priv_t;
static int
splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
if (kcd) {
if (kcp) {
kcd->kcd_magic = kcp->kcp_magic;
kcp->kcp_count++;
}
memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
kcd->kcd_flag = 1;
}
return 0;
}
static void
splat_kmem_cache_test_destructor(void *ptr, void *priv)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
if (kcd) {
if (kcp) {
kcd->kcd_magic = 0;
kcp->kcp_count--;
}
memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
kcd->kcd_flag = 0;
}
return;
}
static int
splat_kmem_cache_size_test(struct file *file, void *arg,
char *name, int size, int flags)
{
kmem_cache_t *cache = NULL;
kmem_cache_data_t *kcd = NULL;
kmem_cache_priv_t kcp;
int rc = 0, max;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
kcp.kcp_size = size;
kcp.kcp_count = 0;
kcp.kcp_rc = 0;
cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
splat_kmem_cache_test_constructor,
splat_kmem_cache_test_destructor,
NULL, &kcp, NULL, flags);
if (!cache) {
splat_vprint(file, name,
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
kcd = kmem_cache_alloc(cache, KM_SLEEP);
if (!kcd) {
splat_vprint(file, name,
"Unable to allocate from '%s'\n",
SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
if (!kcd->kcd_flag) {
splat_vprint(file, name,
"Failed to run contructor for '%s'\n",
SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
if (kcd->kcd_magic != kcp.kcp_magic) {
splat_vprint(file, name,
"Failed to pass private data to constructor "
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
max = kcp.kcp_count;
kmem_cache_free(cache, kcd);
/* Destroy the entire cache which will force destructors to
* run and we can verify one was called for every object */
kmem_cache_destroy(cache);
if (kcp.kcp_count) {
splat_vprint(file, name,
"Failed to run destructor on all slab objects "
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
}
splat_vprint(file, name,
"Successfully ran ctors/dtors for %d elements in '%s'\n",
max, SPLAT_KMEM_CACHE_NAME);
return rc;
out_free:
if (kcd)
kmem_cache_free(cache, kcd);
kmem_cache_destroy(cache);
return rc;
}
static int
splat_kmem_test5(struct file *file, void *arg)
{
return splat_kmem_cache_size_test(file, arg, SPLAT_KMEM_TEST5_NAME,
sizeof(kmem_cache_data_t) * 1, 0);
}
static int
splat_kmem_test6(struct file *file, void *arg)
{
return splat_kmem_cache_size_test(file, arg, SPLAT_KMEM_TEST6_NAME,
sizeof(kmem_cache_data_t) * 1024, 0);
}
static void
splat_kmem_cache_test_reclaim(void *priv)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
int i, count;
count = min(SPLAT_KMEM_OBJ_RECLAIM, kcp->kcp_count);
splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST7_NAME,
"Reaping %d objects from '%s'\n", count,
SPLAT_KMEM_CACHE_NAME);
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
if (kcp->kcp_kcd[i]) {
kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
kcp->kcp_kcd[i] = NULL;
if (--count == 0)
break;
}
}
return;
}
static int
splat_kmem_test7(struct file *file, void *arg)
{
kmem_cache_t *cache;
kmem_cache_priv_t kcp;
int i, rc = 0;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
kcp.kcp_size = 256;
kcp.kcp_count = 0;
kcp.kcp_rc = 0;
cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
splat_kmem_cache_test_constructor,
splat_kmem_cache_test_destructor,
splat_kmem_cache_test_reclaim,
&kcp, NULL, 0);
if (!cache) {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
kcp.kcp_cache = cache;
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
/* All allocations need not succeed */
kcp.kcp_kcd[i] = kmem_cache_alloc(cache, KM_SLEEP);
if (!kcp.kcp_kcd[i]) {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Unable to allocate from '%s'\n",
SPLAT_KMEM_CACHE_NAME);
}
}
ASSERT(kcp.kcp_count > 0);
/* Request the slab cache free any objects it can. For a few reasons
* this may not immediately result in more free memory even if objects
* are freed. First off, due to fragmentation we may not be able to
* reclaim any slabs. Secondly, even if we do we fully clear some
* slabs we will not want to immedately reclaim all of them because
* we may contend with cache allocs and thrash. What we want to see
* is slab size decrease more gradually as it becomes clear they
* will not be needed. This should be acheivable in less than minute
* if it takes longer than this something has gone wrong.
*/
for (i = 0; i < 60; i++) {
kmem_cache_reap_now(cache);
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"%s cache objects %d, slabs %u/%u objs %u/%u\n",
SPLAT_KMEM_CACHE_NAME, kcp.kcp_count,
(unsigned)cache->skc_slab_alloc,
(unsigned)cache->skc_slab_total,
(unsigned)cache->skc_obj_alloc,
(unsigned)cache->skc_obj_total);
if (cache->skc_obj_total == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
}
if (cache->skc_obj_total == 0) {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Successfully created %d objects "
"in cache %s and reclaimed them\n",
SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
} else {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Failed to reclaim %u/%d objects from cache %s\n",
(unsigned)cache->skc_obj_total, SPLAT_KMEM_OBJ_COUNT,
SPLAT_KMEM_CACHE_NAME);
rc = -ENOMEM;
}
/* Cleanup our mess (for failure case of time expiring) */
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
if (kcp.kcp_kcd[i])
kmem_cache_free(cache, kcp.kcp_kcd[i]);
kmem_cache_destroy(cache);
return rc;
}
static void
splat_kmem_test8_thread(void *arg)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
int count = kcp->kcp_alloc, rc = 0, i;
void **objs;
ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
objs = vmem_zalloc(count * sizeof(void *), KM_SLEEP);
if (!objs) {
splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST8_NAME,
"Unable to alloc objp array for cache '%s'\n",
kcp->kcp_cache->skc_name);
rc = -ENOMEM;
goto out;
}
for (i = 0; i < count; i++) {
objs[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
if (!objs[i]) {
splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST8_NAME,
"Unable to allocate from cache '%s'\n",
kcp->kcp_cache->skc_name);
rc = -ENOMEM;
break;
}
}
for (i = 0; i < count; i++)
if (objs[i])
kmem_cache_free(kcp->kcp_cache, objs[i]);
vmem_free(objs, count * sizeof(void *));
out:
spin_lock(&kcp->kcp_lock);
kcp->kcp_threads--;
if (!kcp->kcp_rc)
kcp->kcp_rc = rc;
spin_unlock(&kcp->kcp_lock);
wake_up(&kcp->kcp_waitq);
thread_exit();
}
static int
splat_kmem_test8_count(kmem_cache_priv_t *kcp, int threads)
{
int ret;
spin_lock(&kcp->kcp_lock);
ret = (kcp->kcp_threads == threads);
spin_unlock(&kcp->kcp_lock);
return ret;
}
/* This test will always pass and is simply here so I can easily
* eyeball the slab cache locking overhead to ensure it is reasonable.
*/
static int
splat_kmem_test8(struct file *file, void *arg)
{
kmem_cache_priv_t kcp;
kthread_t *thr;
struct timespec start, stop, delta;
char cache_name[16];
int alloc, i;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%s",
"time (sec)\tslabs \tobjs \thash\n");
splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%s",
" \ttot/max/calc\ttot/max/calc\tsize/depth\n");
for (alloc = 1; alloc <= 4096; alloc *= 2) {
kcp.kcp_size = 256;
kcp.kcp_count = 0;
kcp.kcp_threads = 0;
kcp.kcp_alloc = alloc;
kcp.kcp_rc = 0;
spin_lock_init(&kcp.kcp_lock);
init_waitqueue_head(&kcp.kcp_waitq);
sprintf(cache_name, "%s-%d", SPLAT_KMEM_CACHE_NAME, alloc);
kcp.kcp_cache = kmem_cache_create(cache_name, kcp.kcp_size, 0,
splat_kmem_cache_test_constructor,
splat_kmem_cache_test_destructor,
NULL, &kcp, NULL, 0);
if (!kcp.kcp_cache) {
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
"Unable to create '%s' cache\n",
SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
start = current_kernel_time();
for (i = 0; i < 32; i++) {
thr = thread_create(NULL, 0, splat_kmem_test8_thread,
&kcp, 0, &p0, TS_RUN, minclsyspri);
ASSERT(thr != NULL);
kcp.kcp_threads++;
}
/* Sleep until the thread sets kcp.kcp_threads == 0 */
wait_event(kcp.kcp_waitq, splat_kmem_test8_count(&kcp, 0));
stop = current_kernel_time();
delta = timespec_sub(stop, start);
splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%2ld.%09ld\t"
"%lu/%lu/%lu\t%lu/%lu/%lu\t%lu/%lu\n",
delta.tv_sec, delta.tv_nsec,
(unsigned long)kcp.kcp_cache->skc_slab_total,
(unsigned long)kcp.kcp_cache->skc_slab_max,
(unsigned long)(kcp.kcp_alloc * 32 / SPL_KMEM_CACHE_OBJ_PER_SLAB),
(unsigned long)kcp.kcp_cache->skc_obj_total,
(unsigned long)kcp.kcp_cache->skc_obj_max,
(unsigned long)(kcp.kcp_alloc * 32),
(unsigned long)kcp.kcp_cache->skc_hash_size,
(unsigned long)kcp.kcp_cache->skc_hash_depth);
kmem_cache_destroy(kcp.kcp_cache);
if (kcp.kcp_rc)
break;
}
return kcp.kcp_rc;
}
splat_subsystem_t *
splat_kmem_init(void)
{
splat_subsystem_t *sub;
sub = kmalloc(sizeof(*sub), GFP_KERNEL);
if (sub == NULL)
return NULL;
memset(sub, 0, sizeof(*sub));
strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
INIT_LIST_HEAD(&sub->subsystem_list);
INIT_LIST_HEAD(&sub->test_list);
spin_lock_init(&sub->test_lock);
sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
return sub;
}
void
splat_kmem_fini(splat_subsystem_t *sub)
{
ASSERT(sub);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
kfree(sub);
}
int
splat_kmem_id(void) {
return SPLAT_SUBSYSTEM_KMEM;
}