1) Ensure mutex_init() never fails in the case of ENOMEM by retrying
   forever.  I don't think I've ever seen this happen but it was clear
   after code inspection that if it did we would immediately crash.

2) Enable full debugging in check.sh for sanity tests.  Might as well
   get as much debug as we can in the case of a failure.

3) Reworked list of kmem caches tracked by SPL in to a hash with the
   key based on the address of the kmem_cache_t.  This should speed
   up the constructor/destructor/shrinker lookup needed now for newer
   kernel which removed the destructor support.

4) Updated kmem_cache_create to handle the case where CONFIG_SLUB
   is defined.  The slub would occasionally merge slab caches which
   resulted in non-unique keys for our hash lookup in 3).  To fix this
   we detect if the slub is enabled and then set the needed flag
   to prevent this merging from ever occuring.

5) New kernels removed the proc_dir_entry pointer from items
   registered by sysctl.  This means we can no long be sneaky and
   manually insert things in to the sysctl tree simply by walking
   the proc tree.  So I'm forced to create a seperate tree for
   all the things I can't easily support via sysctl interface.
   I don't like it but it will do for now.



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@124 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-06-04 06:00:46 +00:00
parent 691d2bd733
commit c30df9c863
10 changed files with 163 additions and 114 deletions

View File

@ -280,6 +280,17 @@ AC_DEFUN([SPL_CHECK_SYMBOL_EXPORT],
fi
])
dnl #
dnl # 2.6.x API change
dnl # Slab can now be implemented in terms of the Slub which provides
dnl # slightly different semantics in terms of merged caches.
dnl #
AC_DEFUN([SPL_AC_SLUB], [
SPL_LINUX_CONFIG([SLUB],
[AC_DEFINE(HAVE_SLUB, 1, [slub support configured])],
[])
])
dnl #
dnl # 2.6.x API change
dnl # check if uintptr_t typedef is defined

View File

@ -43,6 +43,7 @@ SPL_AC_DEBUG_KMEM
SPL_AC_DEBUG_MUTEX
SPL_AC_DEBUG_KSTAT
SPL_AC_DEBUG_CALLB
SPL_AC_SLUB
SPL_AC_TYPE_UINTPTR_T
SPL_AC_TYPE_KMEM_CACHE_T
SPL_AC_KMEM_CACHE_DESTROY_INT

View File

@ -291,7 +291,7 @@ do { \
if (unlikely(!(cond))) { \
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
__FILE__, __FUNCTION__, __LINE__, \
"ASSERTION(" #cond ") failed:" fmt, \
"ASSERTION(" #cond ") failed: " fmt, \
## a); \
SBUG(); \
} \

View File

@ -78,7 +78,7 @@ extern struct list_head mutex_stats_list;
int spl_mutex_init(void);
void spl_mutex_fini(void);
extern void __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc);
extern int __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc);
extern void __spl_mutex_destroy(kmutex_t *mp);
extern int __mutex_tryenter(kmutex_t *mp);
extern void __mutex_enter(kmutex_t *mp);
@ -91,10 +91,11 @@ extern kthread_t *__spl_mutex_owner(kmutex_t *mp);
#define mutex_init(mp, name, type, ibc) \
({ \
/* May never fail or all subsequent mutex_* calls will ASSERT */\
if ((name) == NULL) \
__spl_mutex_init(mp, #mp, type, ibc); \
while(__spl_mutex_init(mp, #mp, type, ibc)); \
else \
__spl_mutex_init(mp, name, type, ibc); \
while(__spl_mutex_init(mp, name, type, ibc)); \
})
#define mutex_destroy(mp) __spl_mutex_destroy(mp)
#define mutex_tryenter(mp) __mutex_tryenter(mp)

View File

@ -49,7 +49,7 @@
#endif /* CONFIG_SYSCTL */
#ifdef DEBUG_KSTAT
extern struct proc_dir_entry *proc_sys_spl_kstat;
extern struct proc_dir_entry *proc_spl_kstat;
struct proc_dir_entry *proc_dir_entry_find(struct proc_dir_entry *root,
const char *str);
int proc_dir_entries(struct proc_dir_entry *root);

View File

@ -92,6 +92,7 @@ EXPORT_SYMBOL(kmem_set_warning);
typedef struct kmem_cache_cb {
int kcc_magic;
struct hlist_node kcc_hlist;
struct list_head kcc_list;
kmem_cache_t * kcc_cache;
kmem_constructor_t kcc_constructor;
@ -102,8 +103,13 @@ typedef struct kmem_cache_cb {
atomic_t kcc_ref;
} kmem_cache_cb_t;
static struct rw_semaphore kmem_cache_cb_sem;
static struct list_head kmem_cache_cb_list;
#define KMEM_CACHE_HASH_BITS 10
#define KMEM_CACHE_TABLE_SIZE (1 << KMEM_CACHE_HASH_BITS)
struct hlist_head kmem_cache_table[KMEM_CACHE_TABLE_SIZE];
struct list_head kmem_cache_list;
static struct rw_semaphore kmem_cache_sem;
#ifdef HAVE_SET_SHRINKER
static struct shrinker *kmem_cache_shrinker;
#else
@ -114,20 +120,23 @@ static struct shrinker kmem_cache_shrinker = {
};
#endif
/* Function must be called while holding the kmem_cache_cb_sem
/* Function must be called while holding the kmem_cache_sem
* Because kmem_cache_t is an opaque datatype we're forced to
* match pointers to identify specific cache entires.
*/
static kmem_cache_cb_t *
kmem_cache_find_cache_cb(kmem_cache_t *cache)
{
struct hlist_head *head;
struct hlist_node *node;
kmem_cache_cb_t *kcc;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
ASSERT(rwsem_is_locked(&kmem_cache_cb_sem));
ASSERT(rwsem_is_locked(&kmem_cache_sem));
#endif
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
if (cache == kcc->kcc_cache)
head = &kmem_cache_table[hash_ptr(cache, KMEM_CACHE_HASH_BITS)];
hlist_for_each_entry_rcu(kcc, node, head, kcc_hlist)
if (kcc->kcc_cache == cache)
return kcc;
return NULL;
@ -152,9 +161,11 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
kcc->kcc_private = priv;
kcc->kcc_vmp = vmp;
atomic_set(&kcc->kcc_ref, 0);
down_write(&kmem_cache_cb_sem);
list_add(&kcc->kcc_list, &kmem_cache_cb_list);
up_write(&kmem_cache_cb_sem);
down_write(&kmem_cache_sem);
hlist_add_head_rcu(&kcc->kcc_hlist, &kmem_cache_table[
hash_ptr(cache, KMEM_CACHE_HASH_BITS)]);
list_add_tail(&kcc->kcc_list, &kmem_cache_list);
up_write(&kmem_cache_sem);
}
return kcc;
@ -163,12 +174,13 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
static void
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
{
down_write(&kmem_cache_cb_sem);
down_write(&kmem_cache_sem);
ASSERT(atomic_read(&kcc->kcc_ref) == 0);
list_del(&kcc->kcc_list);
up_write(&kmem_cache_cb_sem);
hlist_del_init(&kcc->kcc_hlist);
list_del_init(&kcc->kcc_list);
up_write(&kmem_cache_sem);
if (kcc){
if (kcc) {
memset(kcc, KCC_POISON, sizeof(*kcc));
kfree(kcc);
}
@ -208,7 +220,7 @@ kmem_cache_generic_constructor(kmem_cache_t *cache, void *ptr)
/* We can be called with interrupts disabled so it is critical that
* this function and the registered constructor never sleep.
*/
while (!down_read_trylock(&kmem_cache_cb_sem));
while (!down_read_trylock(&kmem_cache_sem));
/* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache);
@ -219,7 +231,7 @@ kmem_cache_generic_constructor(kmem_cache_t *cache, void *ptr)
constructor = kcc->kcc_constructor;
private = kcc->kcc_private;
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
if (constructor)
constructor(ptr, private, (int)flags);
@ -242,7 +254,7 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
/* We can be called with interrupts disabled so it is critical that
* this function and the registered constructor never sleep.
*/
while (!down_read_trylock(&kmem_cache_cb_sem));
while (!down_read_trylock(&kmem_cache_sem));
/* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache);
@ -253,7 +265,7 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
destructor = kcc->kcc_destructor;
private = kcc->kcc_private;
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
/* Solaris destructor takes no flags, silently eat them */
if (destructor)
@ -276,9 +288,9 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
* function in the shim layer for all slab caches. And we always
* attempt to shrink all caches when this generic shrinker is called.
*/
down_read(&kmem_cache_cb_sem);
down_read(&kmem_cache_sem);
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
list_for_each_entry(kcc, &kmem_cache_list, kcc_list) {
ASSERT(kcc);
ASSERT(kcc->kcc_magic == KCC_MAGIC);
@ -312,7 +324,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
* was registered with the generic shrinker. This should fake out
* the linux VM when it attempts to shrink caches.
*/
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
return total;
}
@ -349,6 +361,25 @@ __kmem_cache_create(char *name, size_t size, size_t align,
strcpy(cache_name, name);
/* When your slab is implemented in terms of the slub it
* is possible similarly sized slab caches will be merged.
* For our implementation we must make sure this never
* happens because we require a unique cache address to
* use as a hash key when looking up the constructor,
* destructor, and shrinker registered for each unique
* type of slab cache. Passing any of the following flags
* will prevent the slub merging.
*
* SLAB_RED_ZONE
* SLAB_POISON
* SLAB_STORE_USER
* SLAB_TRACE
* SLAB_DESTROY_BY_RCU
*/
#ifdef HAVE_SLUB
flags |= SLAB_STORE_USER;
#endif
#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
cache = kmem_cache_create(cache_name, size, align, flags,
kmem_cache_generic_constructor,
@ -360,22 +391,21 @@ __kmem_cache_create(char *name, size_t size, size_t align,
RETURN(NULL);
/* Register shared shrinker function on initial cache create */
down_read(&kmem_cache_cb_sem);
if (list_empty(&kmem_cache_cb_list)) {
down_read(&kmem_cache_sem);
if (list_empty(&kmem_cache_list)) {
#ifdef HAVE_SET_SHRINKER
kmem_cache_shrinker =
set_shrinker(KMC_DEFAULT_SEEKS,
kmem_cache_generic_shrinker);
kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
kmem_cache_generic_shrinker);
if (kmem_cache_shrinker == NULL) {
kmem_cache_destroy(cache);
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
RETURN(NULL);
}
#else
register_shrinker(&kmem_cache_shrinker);
#endif
}
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
reclaim, priv, vmp);
@ -405,14 +435,14 @@ __kmem_cache_destroy(kmem_cache_t *cache)
int rc;
ENTRY;
down_read(&kmem_cache_cb_sem);
down_read(&kmem_cache_sem);
kcc = kmem_cache_find_cache_cb(cache);
if (kcc == NULL) {
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
RETURN(-EINVAL);
}
atomic_inc(&kcc->kcc_ref);
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
name = (char *)kmem_cache_name(cache);
@ -428,15 +458,15 @@ __kmem_cache_destroy(kmem_cache_t *cache)
kfree(name);
/* Unregister generic shrinker on removal of all caches */
down_read(&kmem_cache_cb_sem);
if (list_empty(&kmem_cache_cb_list))
down_read(&kmem_cache_sem);
if (list_empty(&kmem_cache_list))
#ifdef HAVE_SET_SHRINKER
remove_shrinker(kmem_cache_shrinker);
#else
unregister_shrinker(&kmem_cache_shrinker);
#endif
up_read(&kmem_cache_cb_sem);
up_read(&kmem_cache_sem);
RETURN(rc);
}
EXPORT_SYMBOL(__kmem_cache_destroy);
@ -463,18 +493,18 @@ restart:
GOTO(restart, obj);
}
/* When destructor support is removed we must be careful not to
* use the provided constructor which will end up being called
* more often than the destructor which we only call on free. Thus
* we many call the proper constructor when there is no destructor.
*/
/* When destructor support is removed we must be careful not to
* use the provided constructor which will end up being called
* more often than the destructor which we only call on free. Thus
* we many call the proper constructor when there is no destructor.
*/
#ifndef HAVE_KMEM_CACHE_CREATE_DTOR
#ifdef HAVE_3ARG_KMEM_CACHE_CREATE_CTOR
kmem_cache_generic_constructor(obj, cache, flags);
#else
kmem_cache_generic_constructor(cache, obj);
#endif
#endif
#endif /* HAVE_KMEM_CACHE_CREATE_DTOR */
#endif /* HAVE_3ARG_KMEM_CACHE_CREATE_CTOR */
RETURN(obj);
}
@ -504,30 +534,32 @@ EXPORT_SYMBOL(__kmem_reap);
int
kmem_init(void)
{
int i;
ENTRY;
init_rwsem(&kmem_cache_cb_sem);
INIT_LIST_HEAD(&kmem_cache_cb_list);
init_rwsem(&kmem_cache_sem);
INIT_LIST_HEAD(&kmem_cache_list);
for (i = 0; i < KMEM_CACHE_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kmem_cache_table[i]);
#ifdef DEBUG_KMEM
{
int i;
atomic64_set(&kmem_alloc_used, 0);
atomic64_set(&vmem_alloc_used, 0);
atomic64_set(&kmem_alloc_used, 0);
atomic64_set(&vmem_alloc_used, 0);
spin_lock_init(&kmem_lock);
INIT_LIST_HEAD(&kmem_list);
spin_lock_init(&kmem_lock);
INIT_LIST_HEAD(&kmem_list);
for (i = 0; i < KMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
for (i = 0; i < KMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
spin_lock_init(&vmem_lock);
INIT_LIST_HEAD(&vmem_list);
spin_lock_init(&vmem_lock);
INIT_LIST_HEAD(&vmem_list);
for (i = 0; i < VMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&vmem_table[i]);
for (i = 0; i < VMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&vmem_table[i]);
atomic64_set(&kmem_cache_alloc_failed, 0);
}
atomic64_set(&kmem_cache_alloc_failed, 0);
#endif
RETURN(0);
}

View File

@ -416,9 +416,9 @@ __kstat_install(kstat_t *ksp)
list_add_tail(&ksp->ks_list, &kstat_list);
spin_unlock(&kstat_lock);
de_module = proc_dir_entry_find(proc_sys_spl_kstat, ksp->ks_module);
de_module = proc_dir_entry_find(proc_spl_kstat, ksp->ks_module);
if (de_module == NULL) {
de_module = proc_mkdir(ksp->ks_module, proc_sys_spl_kstat);
de_module = proc_mkdir(ksp->ks_module, proc_spl_kstat);
if (de_module == NULL)
GOTO(out, rc = -EUNATCH);
}

View File

@ -59,7 +59,7 @@ spinlock_t mutex_stats_lock;
struct list_head mutex_stats_list;
#endif
void
int
__spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
{
int flags = KM_SLEEP;
@ -69,8 +69,6 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
ASSERT(ibc == NULL);
ASSERT(mp->km_magic != KM_MAGIC); /* Never double init */
mp->km_magic = KM_MAGIC;
mp->km_owner = NULL;
mp->km_name = NULL;
mp->km_name_size = strlen(name) + 1;
@ -95,12 +93,12 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
/* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
mp->km_sem = kmem_alloc(sizeof(struct semaphore), flags);
if (mp->km_sem == NULL)
return;
return -ENOMEM;
mp->km_name = kmem_alloc(mp->km_name_size, flags);
if (mp->km_name == NULL) {
kmem_free(mp->km_sem, sizeof(struct semaphore));
return;
return -ENOMEM;
}
sema_init(mp->km_sem, 1);
@ -111,7 +109,7 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
if (mp->km_stats == NULL) {
kmem_free(mp->km_name, mp->km_name_size);
kmem_free(mp->km_sem, sizeof(struct semaphore));
return;
return -ENOMEM;
}
/* XXX - This appears to be a much more contended lock than I
@ -124,6 +122,10 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
list_add_tail(&mp->km_list, &mutex_stats_list);
spin_unlock(&mutex_stats_lock);
#endif
mp->km_magic = KM_MAGIC;
mp->km_owner = NULL;
return 0;
}
EXPORT_SYMBOL(__spl_mutex_init);

View File

@ -39,21 +39,21 @@ static unsigned long table_max = ~0;
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *spl_header = NULL;
#endif /* CONFIG_SYSCTL */
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
static struct proc_dir_entry *proc_sys = NULL;
static struct proc_dir_entry *proc_sys_spl = NULL;
#endif
static struct proc_dir_entry *proc_spl = NULL;
#ifdef DEBUG_MUTEX
static struct proc_dir_entry *proc_sys_spl_mutex = NULL;
static struct proc_dir_entry *proc_sys_spl_mutex_stats = NULL;
#endif
static struct proc_dir_entry *proc_spl_mutex = NULL;
static struct proc_dir_entry *proc_spl_mutex_stats = NULL;
#endif /* DEBUG_MUTEX */
#ifdef DEBUG_KMEM
static struct proc_dir_entry *proc_sys_spl_kmem = NULL;
#endif
static struct proc_dir_entry *proc_spl_kmem = NULL;
#endif /* DEBUG_KMEM */
#ifdef DEBUG_KSTAT
struct proc_dir_entry *proc_sys_spl_kstat = NULL;
#endif
#endif
struct proc_dir_entry *proc_spl_kstat = NULL;
#endif /* DEBUG_KSTAT */
#endif /* DEBUG_MUTEX || DEBUG_KMEM || DEBUG_KSTAT */
#ifdef HAVE_CTL_UNNUMBERED
@ -877,54 +877,50 @@ proc_init(void)
spl_header = spl_register_sysctl_table(spl_root, 0);
if (spl_header == NULL)
RETURN(-EUNATCH);
#endif /* CONFIG_SYSCTL */
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
proc_sys = proc_dir_entry_find(&proc_root, "sys");
if (proc_sys == NULL)
proc_spl = proc_mkdir("spl", NULL);
if (proc_spl == NULL)
GOTO(out, rc = -EUNATCH);
proc_sys_spl = proc_dir_entry_find(proc_sys, "spl");
if (proc_sys_spl == NULL)
GOTO(out, rc = -EUNATCH);
#endif
#ifdef DEBUG_MUTEX
proc_sys_spl_mutex = proc_dir_entry_find(proc_sys_spl, "mutex");
if (proc_sys_spl_mutex == NULL)
proc_spl_mutex = proc_mkdir("mutex", proc_spl);
if (proc_spl_mutex == NULL)
GOTO(out, rc = -EUNATCH);
proc_sys_spl_mutex_stats = create_proc_entry("stats_per", 0444,
proc_sys_spl_mutex);
if (proc_sys_spl_mutex_stats == NULL)
proc_spl_mutex_stats = create_proc_entry("stats_per", 0444,
proc_spl_mutex);
if (proc_spl_mutex_stats == NULL)
GOTO(out, rc = -EUNATCH);
proc_sys_spl_mutex_stats->proc_fops = &proc_mutex_operations;
proc_spl_mutex_stats->proc_fops = &proc_mutex_operations;
#endif /* DEBUG_MUTEX */
#ifdef DEBUG_KMEM
proc_sys_spl_kmem = proc_dir_entry_find(proc_sys_spl, "kmem");
if (proc_sys_spl_kmem == NULL)
GOTO(out2, rc = -EUNATCH);
proc_spl_kmem = proc_mkdir("kmem", proc_spl);
if (proc_spl_kmem == NULL)
GOTO(out, rc = -EUNATCH);
#endif /* DEBUG_KMEM */
#ifdef DEBUG_KSTAT
proc_sys_spl_kstat = proc_dir_entry_find(proc_sys_spl, "kstat");
if (proc_sys_spl_kstat == NULL)
GOTO(out2, rc = -EUNATCH);
proc_spl_kstat = proc_mkdir("kstat", proc_spl);
if (proc_spl_kstat == NULL)
GOTO(out, rc = -EUNATCH);
#endif /* DEBUG_KSTAT */
RETURN(rc);
#if defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
out2:
#endif
#ifdef DEBUG_MUTEX
remove_proc_entry("stats_per", proc_sys_spl_mutex);
#endif /* DEBUG_MUTEX */
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
out:
#endif
spl_unregister_sysctl_table(spl_header);
if (rc) {
remove_proc_entry("kstat", proc_spl);
remove_proc_entry("kmem", proc_spl);
remove_proc_entry("stats_per", proc_spl_mutex);
remove_proc_entry("mutex", proc_spl);
#ifdef CONFIG_SYSCTL
spl_unregister_sysctl_table(spl_header);
#endif /* CONFIG_SYSCTL */
}
#endif /* DEBUG_MUTEX || DEBUG_KMEM || DEBUG_KSTAT */
RETURN(rc);
}
@ -933,12 +929,17 @@ proc_fini(void)
{
ENTRY;
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
remove_proc_entry("kstat", proc_spl);
remove_proc_entry("kmem", proc_spl);
remove_proc_entry("stats_per", proc_spl_mutex);
remove_proc_entry("mutex", proc_spl);
#endif /* DEBUG_MUTEX || DEBUG_KMEM || DEBUG_KSTAT */
#ifdef CONFIG_SYSCTL
ASSERT(spl_header != NULL);
#ifdef DEBUG_MUTEX
remove_proc_entry("stats_per", proc_sys_spl_mutex);
#endif /* DEBUG_MUTEX */
spl_unregister_sysctl_table(spl_header);
#endif
#endif /* CONFIG_SYSCTL */
EXIT;
}

View File

@ -37,8 +37,9 @@ if [ ! -f ${spl_module} ] || [ ! -f ${splat_module} ]; then
die "Source tree must be built, run 'make'"
fi
spl_module_params="spl_debug_mask=-1 spl_debug_subsys=-1"
echo "Loading ${spl_module}"
/sbin/insmod ${spl_module} || die "Failed to load ${spl_module}"
/sbin/insmod ${spl_module} ${spl_module_params} || die "Failed to load ${spl_module}"
echo "Loading ${splat_module}"
/sbin/insmod ${splat_module} || die "Unable to load ${splat_module}"