diff --git a/config/spl-build.m4 b/config/spl-build.m4 index 51f7ea376..c52ea5db4 100644 --- a/config/spl-build.m4 +++ b/config/spl-build.m4 @@ -68,6 +68,7 @@ AC_DEFUN([SPL_AC_CONFIG_KERNEL], [ SPL_AC_4ARGS_VFS_RENAME SPL_AC_CRED_STRUCT SPL_AC_GROUPS_SEARCH + SPL_AC_PUT_TASK_STRUCT ]) AC_DEFUN([SPL_AC_MODULE_SYMVERS], [ @@ -1263,7 +1264,7 @@ AC_DEFUN([SPL_AC_CRED_STRUCT], [ ]) dnl # -dnl # Custom SPL patch may export this symbol +dnl # Custom SPL patch may export this symbol. dnl # AC_DEFUN([SPL_AC_GROUPS_SEARCH], [ SPL_CHECK_SYMBOL_EXPORT( @@ -1273,3 +1274,16 @@ AC_DEFUN([SPL_AC_GROUPS_SEARCH], [ [groups_search() is available])], []) ]) + +dnl # +dnl # 2.6.x API change, +dnl # __put_task_struct() was exported in RHEL5 but unavailable elsewhere. +dnl # +AC_DEFUN([SPL_AC_PUT_TASK_STRUCT], [ + SPL_CHECK_SYMBOL_EXPORT( + [__put_task_struct], + [], + [AC_DEFINE(HAVE_PUT_TASK_STRUCT, 1, + [__put_task_struct() is available])], + []) +]) diff --git a/configure b/configure index 3ed6af191..6b9ea364e 100755 --- a/configure +++ b/configure @@ -21916,6 +21916,47 @@ _ACEOF fi + + echo "$as_me:$LINENO: checking whether symbol __put_task_struct is exported" >&5 +echo $ECHO_N "checking whether symbol __put_task_struct is exported... $ECHO_C" >&6 + grep -q -E '[[:space:]]__put_task_struct[[:space:]]' \ + $LINUX_OBJ/Module*.symvers 2>/dev/null + rc=$? + if test $rc -ne 0; then + export=0 + for file in ; do + grep -q -E "EXPORT_SYMBOL.*(__put_task_struct)" \ + "$LINUX_OBJ/$file" 2>/dev/null + rc=$? + if test $rc -eq 0; then + export=1 + break; + fi + done + if test $export -eq 0; then + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + + else + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +cat >>confdefs.h <<\_ACEOF +#define HAVE_PUT_TASK_STRUCT 1 +_ACEOF + + fi + else + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +cat >>confdefs.h <<\_ACEOF +#define HAVE_PUT_TASK_STRUCT 1 +_ACEOF + + fi + + ;; user) ;; all) @@ -24884,6 +24925,47 @@ _ACEOF + echo "$as_me:$LINENO: checking whether symbol __put_task_struct is exported" >&5 +echo $ECHO_N "checking whether symbol __put_task_struct is exported... $ECHO_C" >&6 + grep -q -E '[[:space:]]__put_task_struct[[:space:]]' \ + $LINUX_OBJ/Module*.symvers 2>/dev/null + rc=$? + if test $rc -ne 0; then + export=0 + for file in ; do + grep -q -E "EXPORT_SYMBOL.*(__put_task_struct)" \ + "$LINUX_OBJ/$file" 2>/dev/null + rc=$? + if test $rc -eq 0; then + export=1 + break; + fi + done + if test $export -eq 0; then + echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6 + + else + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +cat >>confdefs.h <<\_ACEOF +#define HAVE_PUT_TASK_STRUCT 1 +_ACEOF + + fi + else + echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6 + +cat >>confdefs.h <<\_ACEOF +#define HAVE_PUT_TASK_STRUCT 1 +_ACEOF + + fi + + + ;; *) echo "$as_me:$LINENO: result: Error!" >&5 diff --git a/include/sys/rwlock.h b/include/sys/rwlock.h index 2e8624191..c6d66ceaa 100644 --- a/include/sys/rwlock.h +++ b/include/sys/rwlock.h @@ -1,7 +1,7 @@ /* * This file is part of the SPL: Solaris Porting Layer. * - * Copyright (c) 2008 Lawrence Livermore National Security, LLC. + * Copyright (c) 2009 Lawrence Livermore National Security, LLC. * Produced at Lawrence Livermore National Laboratory * Written by: * Brian Behlendorf , @@ -30,68 +30,89 @@ #include #include #include -#include #include -#include - -#ifdef __cplusplus -extern "C" { -#endif typedef enum { - RW_DRIVER = 2, /* driver (DDI) rwlock */ - RW_DEFAULT = 4 /* kernel default rwlock */ + RW_DRIVER = 2, + RW_DEFAULT = 4 } krw_type_t; typedef enum { - RW_WRITER, - RW_READER + RW_NONE = 0, + RW_WRITER = 1, + RW_READER = 2 } krw_t; +typedef struct rw_semaphore krwlock_t; -#define RW_MAGIC 0x3423645a -#define RW_POISON 0xa6 - -typedef struct { - int32_t rw_magic; - int32_t rw_name_size; - char *rw_name; - struct rw_semaphore rw_sem; - struct task_struct *rw_owner; /* holder of the write lock */ -} krwlock_t; - -extern void __rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg); -extern void __rw_destroy(krwlock_t *rwlp); -extern int __rw_tryenter(krwlock_t *rwlp, krw_t rw); -extern void __rw_enter(krwlock_t *rwlp, krw_t rw); -extern void __rw_exit(krwlock_t *rwlp); -extern void __rw_downgrade(krwlock_t *rwlp); -extern int __rw_tryupgrade(krwlock_t *rwlp); -extern kthread_t *__rw_owner(krwlock_t *rwlp); -extern int __rw_read_held(krwlock_t *rwlp); -extern int __rw_write_held(krwlock_t *rwlp); -extern int __rw_lock_held(krwlock_t *rwlp); - -#define rw_init(rwlp, name, type, arg) \ -({ \ - if ((name) == NULL) \ - __rw_init(rwlp, #rwlp, type, arg); \ - else \ - __rw_init(rwlp, name, type, arg); \ -}) -#define rw_destroy(rwlp) __rw_destroy(rwlp) -#define rw_tryenter(rwlp, rw) __rw_tryenter(rwlp, rw) -#define rw_enter(rwlp, rw) __rw_enter(rwlp, rw) -#define rw_exit(rwlp) __rw_exit(rwlp) -#define rw_downgrade(rwlp) __rw_downgrade(rwlp) -#define rw_tryupgrade(rwlp) __rw_tryupgrade(rwlp) -#define rw_owner(rwlp) __rw_owner(rwlp) -#define RW_READ_HELD(rwlp) __rw_read_held(rwlp) -#define RW_WRITE_HELD(rwlp) __rw_write_held(rwlp) -#define RW_LOCK_HELD(rwlp) __rw_lock_held(rwlp) - -#ifdef __cplusplus -} +#define rw_init(rwlp, name, type, arg) init_rwsem(rwlp) +#define rw_destroy(rwlp) ((void)0) +#define rw_downgrade(rwlp) downgrade_write(rwlp) +#define RW_LOCK_HELD(rwlp) rwsem_is_locked(rwlp) +/* + * the rw-semaphore definition + * - if activity/count is 0 then there are no active readers or writers + * - if activity/count is +ve then that is the number of active readers + * - if activity/count is -1 then there is one active writer + */ +#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK) +# define RW_COUNT(rwlp) ((rwlp)->activity) +# define RW_READ_HELD(rwlp) ((RW_COUNT(rwlp) > 0) ? RW_COUNT(rwlp) : 0) +# define RW_WRITE_HELD(rwlp) ((RW_COUNT(rwlp) < 0)) +# define rw_exit_locked(rwlp) __up_read_locked(rwlp) +# define rw_tryenter_locked(rwlp) __down_write_trylock_locked(rwlp) +void __up_read_locked(struct rw_semaphore *); +int __down_write_trylock_locked(struct rw_semaphore *); +#else +# define RW_COUNT(rwlp) ((rwlp)->count & RWSEM_ACTIVE_MASK) +# define RW_READ_HELD(rwlp) ((RW_COUNT(rwlp) > 0) ? RW_COUNT(rwlp) : 0) +# define RW_WRITE_HELD(rwlp) ((RW_COUNT(rwlp) < 0)) +# define rw_exit_locked(rwlp) up_read(rwlp) +# define rw_tryenter_locked(rwlp) down_write_trylock(rwlp) #endif +#define rw_tryenter(rwlp, rw) \ +({ \ + int _rc_ = 0; \ + switch (rw) { \ + case RW_READER: _rc_ = down_read_trylock(rwlp); break; \ + case RW_WRITER: _rc_ = down_write_trylock(rwlp); break; \ + default: SBUG(); \ + } \ + _rc_; \ +}) + +#define rw_enter(rwlp, rw) \ +({ \ + switch (rw) { \ + case RW_READER: down_read(rwlp); break; \ + case RW_WRITER: down_write(rwlp); break; \ + default: SBUG(); \ + } \ +}) + +#define rw_exit(rwlp) \ +({ \ + if (RW_READ_HELD(rwlp)) \ + up_read(rwlp); \ + else if (RW_WRITE_HELD(rwlp)) \ + up_write(rwlp); \ + else \ + SBUG(); \ +}) + +#define rw_tryupgrade(rwlp) \ +({ \ + unsigned long flags; \ + int _rc_ = 0; \ + spin_lock_irqsave(&(rwlp)->wait_lock, flags); \ + if (list_empty(&(rwlp)->wait_list) && (RW_READ_HELD(rwlp) == 1)) { \ + rw_exit_locked(rwlp); \ + _rc_ = rw_tryenter_locked(rwlp); \ + ASSERT(_rc_); \ + } \ + spin_unlock_irqrestore(&(rwlp)->wait_lock, flags); \ + _rc_; \ +}) + #endif /* _SPL_RWLOCK_H */ diff --git a/module/spl/spl-generic.c b/module/spl/spl-generic.c index b81dabc56..8e0ef9263 100644 --- a/module/spl/spl-generic.c +++ b/module/spl/spl-generic.c @@ -253,6 +253,22 @@ ddi_copyout(const void *from, void *to, size_t len, int flags) } EXPORT_SYMBOL(ddi_copyout); +#ifndef HAVE_PUT_TASK_STRUCT +/* + * This is only a stub function which should never be used. The SPL should + * never be putting away the last reference on a task structure so this will + * not be called. However, we still need to define it so the module does not + * have undefined symbol at load time. That all said if this impossible + * thing does somehow happen SBUG() immediately so we know about it. + */ +void +__put_task_struct(struct task_struct *t) +{ + SBUG(); +} +EXPORT_SYMBOL(__put_task_struct); +#endif /* HAVE_PUT_TASK_STRUCT */ + struct new_utsname *__utsname(void) { #ifdef HAVE_INIT_UTSNAME diff --git a/module/spl/spl-rwlock.c b/module/spl/spl-rwlock.c index 07fc2aae4..8ff66bf9f 100644 --- a/module/spl/spl-rwlock.c +++ b/module/spl/spl-rwlock.c @@ -33,6 +33,11 @@ #define DEBUG_SUBSYSTEM S_RWLOCK #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK + +/* + * From lib/rwsem-spinlock.c but modified such that the caller is + * responsible for acquiring and dropping the sem->wait_lock. + */ struct rwsem_waiter { struct list_head list; struct task_struct *task; @@ -40,322 +45,49 @@ struct rwsem_waiter { #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; + /* wake a single writer */ static struct rw_semaphore * __rwsem_wake_one_writer_locked(struct rw_semaphore *sem) { - struct rwsem_waiter *waiter; - struct task_struct *tsk; + struct rwsem_waiter *waiter; + struct task_struct *tsk; - sem->activity = -1; + sem->activity = -1; - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - list_del(&waiter->list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); + list_del(&waiter->list); - tsk = waiter->task; - smp_mb(); - waiter->task = NULL; - wake_up_process(tsk); - put_task_struct(tsk); - return sem; + tsk = waiter->task; + smp_mb(); + waiter->task = NULL; + wake_up_process(tsk); + put_task_struct(tsk); + return sem; } /* release a read lock on the semaphore */ -static void +void __up_read_locked(struct rw_semaphore *sem) { - if (--sem->activity == 0 && !list_empty(&sem->wait_list)) - (void)__rwsem_wake_one_writer_locked(sem); + if (--sem->activity == 0 && !list_empty(&sem->wait_list)) + (void)__rwsem_wake_one_writer_locked(sem); } +EXPORT_SYMBOL(__up_read_locked); /* trylock for writing -- returns 1 if successful, 0 if contention */ -static int +int __down_write_trylock_locked(struct rw_semaphore *sem) { - int ret = 0; + int ret = 0; - if (sem->activity == 0 && list_empty(&sem->wait_list)) { - /* granted */ - sem->activity = -1; - ret = 1; - } + if (sem->activity == 0 && list_empty(&sem->wait_list)) { + sem->activity = -1; + ret = 1; + } - return ret; + return ret; } +EXPORT_SYMBOL(__down_write_trylock_locked); + #endif - -void -__rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg) -{ - int flags = KM_SLEEP; - - ASSERT(rwlp); - ASSERT(name); - ASSERT(type == RW_DEFAULT); /* XXX no irq handler use */ - ASSERT(arg == NULL); /* XXX no irq handler use */ - - rwlp->rw_magic = RW_MAGIC; - rwlp->rw_owner = NULL; - rwlp->rw_name = NULL; - rwlp->rw_name_size = strlen(name) + 1; - - /* We may be called when there is a non-zero preempt_count or - * interrupts are disabled is which case we must not sleep. - */ - if (current_thread_info()->preempt_count || irqs_disabled()) - flags = KM_NOSLEEP; - - rwlp->rw_name = kmem_alloc(rwlp->rw_name_size, flags); - if (rwlp->rw_name == NULL) - return; - - init_rwsem(&rwlp->rw_sem); - strcpy(rwlp->rw_name, name); -} -EXPORT_SYMBOL(__rw_init); - -void -__rw_destroy(krwlock_t *rwlp) -{ - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - ASSERT(rwlp->rw_owner == NULL); - spin_lock(&rwlp->rw_sem.wait_lock); - ASSERT(list_empty(&rwlp->rw_sem.wait_list)); - spin_unlock(&rwlp->rw_sem.wait_lock); - - kmem_free(rwlp->rw_name, rwlp->rw_name_size); - - memset(rwlp, RW_POISON, sizeof(krwlock_t)); -} -EXPORT_SYMBOL(__rw_destroy); - -/* Return 0 if the lock could not be obtained without blocking. */ -int -__rw_tryenter(krwlock_t *rwlp, krw_t rw) -{ - int rc = 0; - ENTRY; - - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - - switch (rw) { - /* these functions return 1 if success, 0 if contention */ - case RW_READER: - /* Here the Solaris code would return 0 - * if there were any write waiters. Specifically - * thinking about the case where readers may have - * the lock and we would also allow this thread - * to grab the read lock with a writer waiting in the - * queue. This doesn't seem like a correctness - * issue, so just call down_read_trylock() - * for the test. We may have to revisit this if - * it becomes an issue */ - rc = down_read_trylock(&rwlp->rw_sem); - break; - case RW_WRITER: - rc = down_write_trylock(&rwlp->rw_sem); - if (rc) { - /* there better not be anyone else - * holding the write lock here */ - ASSERT(rwlp->rw_owner == NULL); - rwlp->rw_owner = current; - } - break; - default: - SBUG(); - } - - RETURN(rc); -} -EXPORT_SYMBOL(__rw_tryenter); - -void -__rw_enter(krwlock_t *rwlp, krw_t rw) -{ - ENTRY; - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - - switch (rw) { - case RW_READER: - /* Here the Solaris code would block - * if there were any write waiters. Specifically - * thinking about the case where readers may have - * the lock and we would also allow this thread - * to grab the read lock with a writer waiting in the - * queue. This doesn't seem like a correctness - * issue, so just call down_read() - * for the test. We may have to revisit this if - * it becomes an issue */ - down_read(&rwlp->rw_sem); - break; - case RW_WRITER: - down_write(&rwlp->rw_sem); - - /* there better not be anyone else - * holding the write lock here */ - ASSERT(rwlp->rw_owner == NULL); - rwlp->rw_owner = current; - break; - default: - SBUG(); - } - EXIT; -} -EXPORT_SYMBOL(__rw_enter); - -void -__rw_exit(krwlock_t *rwlp) -{ - ENTRY; - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - - /* rw_owner is held by current - * thread iff it is a writer */ - if (rwlp->rw_owner == current) { - rwlp->rw_owner = NULL; - up_write(&rwlp->rw_sem); - } else { - up_read(&rwlp->rw_sem); - } - EXIT; -} -EXPORT_SYMBOL(__rw_exit); - -void -__rw_downgrade(krwlock_t *rwlp) -{ - ENTRY; - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - ASSERT(rwlp->rw_owner == current); - - rwlp->rw_owner = NULL; - downgrade_write(&rwlp->rw_sem); - EXIT; -} -EXPORT_SYMBOL(__rw_downgrade); - -/* Return 0 if unable to perform the upgrade. - * Might be wise to fix the caller - * to acquire the write lock first? - */ -int -__rw_tryupgrade(krwlock_t *rwlp) -{ - int rc = 0; - ENTRY; - - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - - spin_lock(&rwlp->rw_sem.wait_lock); - - /* Check if there is anyone waiting for the - * lock. If there is, then we know we should - * not try to upgrade the lock */ - if (!list_empty(&rwlp->rw_sem.wait_list)) { - spin_unlock(&rwlp->rw_sem.wait_lock); - RETURN(0); - } -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK - /* Note that activity is protected by - * the wait_lock. Don't try to upgrade - * if there are multiple readers currently - * holding the lock */ - if (rwlp->rw_sem.activity > 1) { -#else - /* Don't try to upgrade - * if there are multiple readers currently - * holding the lock */ - if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) { -#endif - spin_unlock(&rwlp->rw_sem.wait_lock); - RETURN(0); - } - -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK - /* Here it should be safe to drop the - * read lock and reacquire it for writing since - * we know there are no waiters */ - __up_read_locked(&rwlp->rw_sem); - - /* returns 1 if success, 0 if contention */ - rc = __down_write_trylock_locked(&rwlp->rw_sem); -#else - /* Here it should be safe to drop the - * read lock and reacquire it for writing since - * we know there are no waiters */ - up_read(&rwlp->rw_sem); - - /* returns 1 if success, 0 if contention */ - rc = down_write_trylock(&rwlp->rw_sem); -#endif - - /* Check if upgrade failed. Should not ever happen - * if we got to this point */ - ASSERT(rc); - ASSERT(rwlp->rw_owner == NULL); - rwlp->rw_owner = current; - spin_unlock(&rwlp->rw_sem.wait_lock); - - RETURN(1); -} -EXPORT_SYMBOL(__rw_tryupgrade); - -kthread_t * -__rw_owner(krwlock_t *rwlp) -{ - ENTRY; - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - RETURN(rwlp->rw_owner); -} -EXPORT_SYMBOL(__rw_owner); - -int -__rw_read_held(krwlock_t *rwlp) -{ - ENTRY; - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - RETURN(__rw_lock_held(rwlp) && rwlp->rw_owner == NULL); -} -EXPORT_SYMBOL(__rw_read_held); - -int -__rw_write_held(krwlock_t *rwlp) -{ - ENTRY; - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - RETURN(rwlp->rw_owner == current); -} -EXPORT_SYMBOL(__rw_write_held); - -int -__rw_lock_held(krwlock_t *rwlp) -{ - int rc = 0; - ENTRY; - - ASSERT(rwlp); - ASSERT(rwlp->rw_magic == RW_MAGIC); - - spin_lock_irq(&(rwlp->rw_sem.wait_lock)); -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK - if (rwlp->rw_sem.activity != 0) { -#else - if (rwlp->rw_sem.count != 0) { -#endif - rc = 1; - } - - spin_unlock_irq(&(rwlp->rw_sem.wait_lock)); - - RETURN(rc); -} -EXPORT_SYMBOL(__rw_lock_held); diff --git a/module/splat/splat-internal.h b/module/splat/splat-internal.h index c1c84d8d1..fea78e043 100644 --- a/module/splat/splat-internal.h +++ b/module/splat/splat-internal.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -195,6 +196,15 @@ typedef struct splat_info { #define splat_vprint(file, test, format, args...) \ splat_print(file, "%*s: " format, SPLAT_NAME_SIZE, test, args) +#define splat_locked_test(lock, test) \ +({ \ + int _rc_; \ + spin_lock(lock); \ + _rc_ = (test) ? 1 : 0; \ + spin_unlock(lock); \ + _rc_; \ +}) + splat_subsystem_t *splat_condvar_init(void); splat_subsystem_t *splat_kmem_init(void); splat_subsystem_t *splat_mutex_init(void); diff --git a/module/splat/splat-rwlock.c b/module/splat/splat-rwlock.c index 7f19dfb32..cf3670370 100644 --- a/module/splat/splat-rwlock.c +++ b/module/splat/splat-rwlock.c @@ -4,9 +4,9 @@ * Copyright (c) 2008 Lawrence Livermore National Security, LLC. * Produced at Lawrence Livermore National Laboratory * Written by: - * Brian Behlendorf , - * Herb Wartens , - * Jim Garlick + * Brian Behlendorf , + * Herb Wartens , + * Jim Garlick * UCRL-CODE-235197 * * This is free software; you can redistribute it and/or modify it @@ -30,210 +30,168 @@ #define SPLAT_RWLOCK_DESC "Kernel RW Lock Tests" #define SPLAT_RWLOCK_TEST1_ID 0x0701 -#define SPLAT_RWLOCK_TEST1_NAME "rwtest1" -#define SPLAT_RWLOCK_TEST1_DESC "Multiple Readers One Writer" +#define SPLAT_RWLOCK_TEST1_NAME "N-rd/1-wr" +#define SPLAT_RWLOCK_TEST1_DESC "Multiple readers one writer" #define SPLAT_RWLOCK_TEST2_ID 0x0702 -#define SPLAT_RWLOCK_TEST2_NAME "rwtest2" -#define SPLAT_RWLOCK_TEST2_DESC "Multiple Writers" +#define SPLAT_RWLOCK_TEST2_NAME "0-rd/N-wr" +#define SPLAT_RWLOCK_TEST2_DESC "Multiple writers" #define SPLAT_RWLOCK_TEST3_ID 0x0703 -#define SPLAT_RWLOCK_TEST3_NAME "rwtest3" -#define SPLAT_RWLOCK_TEST3_DESC "Owner Verification" +#define SPLAT_RWLOCK_TEST3_NAME "held" +#define SPLAT_RWLOCK_TEST3_DESC "RW_{LOCK|READ|WRITE}_HELD" #define SPLAT_RWLOCK_TEST4_ID 0x0704 -#define SPLAT_RWLOCK_TEST4_NAME "rwtest4" -#define SPLAT_RWLOCK_TEST4_DESC "Trylock Test" +#define SPLAT_RWLOCK_TEST4_NAME "tryenter" +#define SPLAT_RWLOCK_TEST4_DESC "Tryenter" #define SPLAT_RWLOCK_TEST5_ID 0x0705 -#define SPLAT_RWLOCK_TEST5_NAME "rwtest5" -#define SPLAT_RWLOCK_TEST5_DESC "Write Downgrade Test" +#define SPLAT_RWLOCK_TEST5_NAME "rw_downgrade" +#define SPLAT_RWLOCK_TEST5_DESC "Write downgrade" #define SPLAT_RWLOCK_TEST6_ID 0x0706 -#define SPLAT_RWLOCK_TEST6_NAME "rwtest6" -#define SPLAT_RWLOCK_TEST6_DESC "Read Upgrade Test" +#define SPLAT_RWLOCK_TEST6_NAME "rw_tryupgrade" +#define SPLAT_RWLOCK_TEST6_DESC "Read upgrade" #define SPLAT_RWLOCK_TEST_MAGIC 0x115599DDUL #define SPLAT_RWLOCK_TEST_NAME "rwlock_test" +#define SPLAT_RWLOCK_TEST_TASKQ "rwlock_taskq" #define SPLAT_RWLOCK_TEST_COUNT 8 #define SPLAT_RWLOCK_RELEASE_INIT 0 -#define SPLAT_RWLOCK_RELEASE_WRITERS 1 -#define SPLAT_RWLOCK_RELEASE_READERS 2 +#define SPLAT_RWLOCK_RELEASE_WR 1 +#define SPLAT_RWLOCK_RELEASE_RD 2 typedef struct rw_priv { - unsigned long rw_magic; - struct file *rw_file; - krwlock_t rwl; - spinlock_t rw_priv_lock; + unsigned long rw_magic; + struct file *rw_file; + krwlock_t rw_rwlock; + spinlock_t rw_lock; wait_queue_head_t rw_waitq; - atomic_t rw_completed; - atomic_t rw_acquired; - atomic_t rw_waiters; - atomic_t rw_release; + int rw_completed; + int rw_holders; + int rw_waiters; + int rw_release; + int rw_rc; + krw_type_t rw_type; } rw_priv_t; typedef struct rw_thr { - int rwt_id; const char *rwt_name; rw_priv_t *rwt_rwp; - int rwt_rc; + int rwt_id; } rw_thr_t; -static inline void -splat_rwlock_sleep(signed long delay) +void splat_init_rw_priv(rw_priv_t *rwp, struct file *file) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(delay); + rwp->rw_magic = SPLAT_RWLOCK_TEST_MAGIC; + rwp->rw_file = file; + rw_init(&rwp->rw_rwlock, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL); + spin_lock_init(&rwp->rw_lock); + init_waitqueue_head(&rwp->rw_waitq); + rwp->rw_completed = 0; + rwp->rw_holders = 0; + rwp->rw_waiters = 0; + rwp->rw_release = SPLAT_RWLOCK_RELEASE_INIT; + rwp->rw_rc = 0; + rwp->rw_type = 0; } -#define splat_rwlock_lock_and_test(lock,test) \ -({ \ - int ret = 0; \ - \ - spin_lock(lock); \ - ret = (test) ? 1 : 0; \ - spin_unlock(lock); \ - ret; \ -}) - -void splat_init_rw_priv(rw_priv_t *rwv, struct file *file) -{ - rwv->rw_magic = SPLAT_RWLOCK_TEST_MAGIC; - rwv->rw_file = file; - spin_lock_init(&rwv->rw_priv_lock); - init_waitqueue_head(&rwv->rw_waitq); - atomic_set(&rwv->rw_completed, 0); - atomic_set(&rwv->rw_acquired, 0); - atomic_set(&rwv->rw_waiters, 0); - atomic_set(&rwv->rw_release, SPLAT_RWLOCK_RELEASE_INIT); - - /* Initialize the read/write lock */ - rw_init(&rwv->rwl, SPLAT_RWLOCK_TEST_NAME, RW_DEFAULT, NULL); -} - -int -splat_rwlock_test1_writer_thread(void *arg) +static int +splat_rwlock_wr_thr(void *arg) { rw_thr_t *rwt = (rw_thr_t *)arg; - rw_priv_t *rwv = rwt->rwt_rwp; - uint8_t rnd = 0; + rw_priv_t *rwp = rwt->rwt_rwp; + uint8_t rnd; char name[16]; - ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); - snprintf(name, sizeof(name), "%s%d", - SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id); + ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); + snprintf(name, sizeof(name), "rwlock_wr_thr%d", rwt->rwt_id); daemonize(name); get_random_bytes((void *)&rnd, 1); - splat_rwlock_sleep(rnd * HZ / 1000); + msleep((unsigned int)rnd); - spin_lock(&rwv->rw_priv_lock); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s writer thread trying to acquire rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - atomic_inc(&rwv->rw_waiters); - spin_unlock(&rwv->rw_priv_lock); + splat_vprint(rwp->rw_file, rwt->rwt_name, + "%s trying to acquire rwlock (%d holding/%d waiting)\n", + name, rwp->rw_holders, rwp->rw_waiters); + spin_lock(&rwp->rw_lock); + rwp->rw_waiters++; + spin_unlock(&rwp->rw_lock); + rw_enter(&rwp->rw_rwlock, RW_WRITER); - /* Take the semaphore for writing - * release it when we are told to */ - rw_enter(&rwv->rwl, RW_WRITER); + spin_lock(&rwp->rw_lock); + rwp->rw_waiters--; + rwp->rw_holders++; + spin_unlock(&rwp->rw_lock); + splat_vprint(rwp->rw_file, rwt->rwt_name, + "%s acquired rwlock (%d holding/%d waiting)\n", + name, rwp->rw_holders, rwp->rw_waiters); - spin_lock(&rwv->rw_priv_lock); - atomic_dec(&rwv->rw_waiters); - atomic_inc(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s writer thread acquired rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); + /* Wait for control thread to signal we can release the write lock */ + wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock, + rwp->rw_release == SPLAT_RWLOCK_RELEASE_WR)); - /* Wait here until the control thread - * says we can release the write lock */ - wait_event_interruptible(rwv->rw_waitq, - splat_rwlock_lock_and_test(&rwv->rw_priv_lock, - atomic_read(&rwv->rw_release) == - SPLAT_RWLOCK_RELEASE_WRITERS)); - spin_lock(&rwv->rw_priv_lock); - atomic_inc(&rwv->rw_completed); - atomic_dec(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s writer thread dropped rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); + spin_lock(&rwp->rw_lock); + rwp->rw_completed++; + rwp->rw_holders--; + spin_unlock(&rwp->rw_lock); + splat_vprint(rwp->rw_file, rwt->rwt_name, + "%s dropped rwlock (%d holding/%d waiting)\n", + name, rwp->rw_holders, rwp->rw_waiters); + + rw_exit(&rwp->rw_rwlock); - /* Release the semaphore */ - rw_exit(&rwv->rwl); return 0; } -int -splat_rwlock_test1_reader_thread(void *arg) +static int +splat_rwlock_rd_thr(void *arg) { rw_thr_t *rwt = (rw_thr_t *)arg; - rw_priv_t *rwv = rwt->rwt_rwp; - uint8_t rnd = 0; + rw_priv_t *rwp = rwt->rwt_rwp; + uint8_t rnd; char name[16]; - ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); - snprintf(name, sizeof(name), "%s%d", - SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id); + ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); + snprintf(name, sizeof(name), "rwlock_rd_thr%d", rwt->rwt_id); daemonize(name); get_random_bytes((void *)&rnd, 1); - splat_rwlock_sleep(rnd * HZ / 1000); + msleep((unsigned int)rnd); - /* Don't try and and take the semaphore until - * someone else has already acquired it */ - wait_event_interruptible(rwv->rw_waitq, - splat_rwlock_lock_and_test(&rwv->rw_priv_lock, - atomic_read(&rwv->rw_acquired) > 0)); + /* Don't try and take the semaphore until after someone has it */ + wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock, + rwp->rw_holders > 0)); - spin_lock(&rwv->rw_priv_lock); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread trying to acquire rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - atomic_inc(&rwv->rw_waiters); - spin_unlock(&rwv->rw_priv_lock); + splat_vprint(rwp->rw_file, rwt->rwt_name, + "%s trying to acquire rwlock (%d holding/%d waiting)\n", + name, rwp->rw_holders, rwp->rw_waiters); + spin_lock(&rwp->rw_lock); + rwp->rw_waiters++; + spin_unlock(&rwp->rw_lock); + rw_enter(&rwp->rw_rwlock, RW_READER); - /* Take the semaphore for reading - * release it when we are told to */ - rw_enter(&rwv->rwl, RW_READER); + spin_lock(&rwp->rw_lock); + rwp->rw_waiters--; + rwp->rw_holders++; + spin_unlock(&rwp->rw_lock); + splat_vprint(rwp->rw_file, rwt->rwt_name, + "%s acquired rwlock (%d holding/%d waiting)\n", + name, rwp->rw_holders, rwp->rw_waiters); - spin_lock(&rwv->rw_priv_lock); - atomic_dec(&rwv->rw_waiters); - atomic_inc(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread acquired rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); + /* Wait for control thread to signal we can release the read lock */ + wait_event_interruptible(rwp->rw_waitq, splat_locked_test(&rwp->rw_lock, + rwp->rw_release == SPLAT_RWLOCK_RELEASE_RD)); - /* Wait here until the control thread - * says we can release the read lock */ - wait_event_interruptible(rwv->rw_waitq, - splat_rwlock_lock_and_test(&rwv->rw_priv_lock, - atomic_read(&rwv->rw_release) == - SPLAT_RWLOCK_RELEASE_READERS)); + spin_lock(&rwp->rw_lock); + rwp->rw_completed++; + rwp->rw_holders--; + spin_unlock(&rwp->rw_lock); + splat_vprint(rwp->rw_file, rwt->rwt_name, + "%s dropped rwlock (%d holding/%d waiting)\n", + name, rwp->rw_holders, rwp->rw_waiters); - spin_lock(&rwv->rw_priv_lock); - atomic_inc(&rwv->rw_completed); - atomic_dec(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread dropped rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); + rw_exit(&rwp->rw_rwlock); - /* Release the semaphore */ - rw_exit(&rwv->rwl); return 0; } @@ -243,543 +201,467 @@ splat_rwlock_test1(struct file *file, void *arg) int i, count = 0, rc = 0; long pids[SPLAT_RWLOCK_TEST_COUNT]; rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT]; - rw_priv_t rwv; + rw_priv_t *rwp; - /* Initialize private data including the rwlock */ - splat_init_rw_priv(&rwv, file); + rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); + if (rwp == NULL) + return -ENOMEM; + + splat_init_rw_priv(rwp, file); /* Create some threads, the exact number isn't important just as * long as we know how many we managed to create and should expect. */ + + + for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) { - rwt[i].rwt_rwp = &rwv; + rwt[i].rwt_rwp = rwp; rwt[i].rwt_id = i; rwt[i].rwt_name = SPLAT_RWLOCK_TEST1_NAME; - rwt[i].rwt_rc = 0; - /* The first thread will be a writer */ - if (i == 0) { - pids[i] = kernel_thread(splat_rwlock_test1_writer_thread, - &rwt[i], 0); - } else { - pids[i] = kernel_thread(splat_rwlock_test1_reader_thread, - &rwt[i], 0); - } + /* The first thread will be the writer */ + if (i == 0) + pids[i] = kernel_thread(splat_rwlock_wr_thr, &rwt[i], 0); + else + pids[i] = kernel_thread(splat_rwlock_rd_thr, &rwt[i], 0); - if (pids[i] >= 0) { + if (pids[i] >= 0) count++; - } } - /* Once the writer has the lock, release the readers */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, atomic_read(&rwv.rw_acquired) <= 0)) { - splat_rwlock_sleep(1 * HZ); + /* Wait for the writer */ + while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders == 0)) { + wake_up_interruptible(&rwp->rw_waitq); + msleep(100); } - wake_up_interruptible(&rwv.rw_waitq); - /* Ensure that there is only 1 writer and all readers are waiting */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_acquired) != 1 || - atomic_read(&rwv.rw_waiters) != - SPLAT_RWLOCK_TEST_COUNT - 1)) { - - splat_rwlock_sleep(1 * HZ); + /* Wait for 'count-1' readers */ + while (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters < count - 1)) { + wake_up_interruptible(&rwp->rw_waitq); + msleep(100); } - /* Relase the writer */ - spin_lock(&rwv.rw_priv_lock); - atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS); - spin_unlock(&rwv.rw_priv_lock); - wake_up_interruptible(&rwv.rw_waitq); - /* Now ensure that there are multiple reader threads holding the lock */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_acquired) <= 1)) { - splat_rwlock_sleep(1 * HZ); + /* Verify there is only one lock holder */ + if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders) != 1) { + splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only 1 holder " + "expected for rwlock (%d holding/%d waiting)\n", + rwp->rw_holders, rwp->rw_waiters); + rc = -EINVAL; } - /* Release the readers */ - spin_lock(&rwv.rw_priv_lock); - atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_READERS); - spin_unlock(&rwv.rw_priv_lock); - wake_up_interruptible(&rwv.rw_waitq); + + /* Verify 'count-1' readers */ + if (splat_locked_test(&rwp->rw_lock, rwp->rw_waiters != count - 1)) { + splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d waiters " + "expected for rwlock (%d holding/%d waiting)\n", + count - 1, rwp->rw_holders, rwp->rw_waiters); + rc = -EINVAL; + } + + /* Signal the writer to release, allows readers to acquire */ + spin_lock(&rwp->rw_lock); + rwp->rw_release = SPLAT_RWLOCK_RELEASE_WR; + wake_up_interruptible(&rwp->rw_waitq); + spin_unlock(&rwp->rw_lock); + + /* Wait for 'count-1' readers to hold the lock */ + while (splat_locked_test(&rwp->rw_lock, rwp->rw_holders < count - 1)) { + wake_up_interruptible(&rwp->rw_waitq); + msleep(100); + } + + /* Verify there are 'count-1' readers */ + if (splat_locked_test(&rwp->rw_lock, rwp->rw_holders != count - 1)) { + splat_vprint(file, SPLAT_RWLOCK_TEST1_NAME, "Only %d holders " + "expected for rwlock (%d holding/%d waiting)\n", + count - 1, rwp->rw_holders, rwp->rw_waiters); + rc = -EINVAL; + } + + /* Release 'count-1' readers */ + spin_lock(&rwp->rw_lock); + rwp->rw_release = SPLAT_RWLOCK_RELEASE_RD; + wake_up_interruptible(&rwp->rw_waitq); + spin_unlock(&rwp->rw_lock); /* Wait for the test to complete */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_acquired) != 0 || - atomic_read(&rwv.rw_waiters) != 0)) { - splat_rwlock_sleep(1 * HZ); + while (splat_locked_test(&rwp->rw_lock, + rwp->rw_holders>0 || rwp->rw_waiters>0)) + msleep(100); - } + rw_destroy(&(rwp->rw_rwlock)); + kfree(rwp); - rw_destroy(&rwv.rwl); return rc; } -int -splat_rwlock_test2_writer_thread(void *arg) +static void +splat_rwlock_test2_func(void *arg) { - rw_thr_t *rwt = (rw_thr_t *)arg; - rw_priv_t *rwv = rwt->rwt_rwp; - uint8_t rnd = 0; - char name[16]; + rw_priv_t *rwp = (rw_priv_t *)arg; + int rc; + ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); - ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); - snprintf(name, sizeof(name), "%s%d", - SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id); - daemonize(name); - get_random_bytes((void *)&rnd, 1); - splat_rwlock_sleep(rnd * HZ / 1000); - - /* Here just increment the waiters count even if we are not - * exactly about to call rw_enter(). Not really a big deal - * since more than likely will be true when we simulate work - * later on */ - spin_lock(&rwv->rw_priv_lock); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s writer thread trying to acquire rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - atomic_inc(&rwv->rw_waiters); - spin_unlock(&rwv->rw_priv_lock); - - /* Wait here until the control thread - * says we can acquire the write lock */ - wait_event_interruptible(rwv->rw_waitq, - splat_rwlock_lock_and_test(&rwv->rw_priv_lock, - atomic_read(&rwv->rw_release) == - SPLAT_RWLOCK_RELEASE_WRITERS)); - - /* Take the semaphore for writing */ - rw_enter(&rwv->rwl, RW_WRITER); - - spin_lock(&rwv->rw_priv_lock); - atomic_dec(&rwv->rw_waiters); - atomic_inc(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s writer thread acquired rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); - - /* Give up the processor for a bit to simulate - * doing some work while taking the write lock */ - splat_rwlock_sleep(rnd * HZ / 1000); - - /* Ensure that we are the only one writing */ - if (atomic_read(&rwv->rw_acquired) > 1) { - rwt->rwt_rc = 1; - } else { - rwt->rwt_rc = 0; - } - - spin_lock(&rwv->rw_priv_lock); - atomic_inc(&rwv->rw_completed); - atomic_dec(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s writer thread dropped rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); - - rw_exit(&rwv->rwl); - - return 0; + /* Read the value before sleeping and write it after we wake up to + * maximize the chance of a race if rwlocks are not working properly */ + rw_enter(&rwp->rw_rwlock, RW_WRITER); + rc = rwp->rw_rc; + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(HZ / 100); /* 1/100 of a second */ + VERIFY(rwp->rw_rc == rc); + rwp->rw_rc = rc + 1; + rw_exit(&rwp->rw_rwlock); } static int splat_rwlock_test2(struct file *file, void *arg) { - int i, count = 0, rc = 0; - long pids[SPLAT_RWLOCK_TEST_COUNT]; - rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT]; - rw_priv_t rwv; + rw_priv_t *rwp; + taskq_t *tq; + int i, rc = 0, tq_count = 256; - /* Initialize private data including the rwlock */ - splat_init_rw_priv(&rwv, file); + rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); + if (rwp == NULL) + return -ENOMEM; - /* Create some threads, the exact number isn't important just as - * long as we know how many we managed to create and should expect. */ - for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) { - rwt[i].rwt_rwp = &rwv; - rwt[i].rwt_id = i; - rwt[i].rwt_name = SPLAT_RWLOCK_TEST2_NAME; - rwt[i].rwt_rc = 0; + splat_init_rw_priv(rwp, file); - /* The first thread will be a writer */ - pids[i] = kernel_thread(splat_rwlock_test2_writer_thread, - &rwt[i], 0); + /* Create several threads allowing tasks to race with each other */ + tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, num_online_cpus(), + maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE); + if (tq == NULL) { + rc = -ENOMEM; + goto out; + } - if (pids[i] >= 0) { - count++; + /* + * Schedule N work items to the work queue each of which enters the + * writer rwlock, sleeps briefly, then exits the writer rwlock. On a + * multiprocessor box these work items will be handled by all available + * CPUs. The task function checks to ensure the tracked shared variable + * is always only incremented by one. Additionally, the rwlock itself + * is instrumented such that if any two processors are in the + * critical region at the same time the system will panic. If the + * rwlock is implemented right this will never happy, that's a pass. + */ + for (i = 0; i < tq_count; i++) { + if (!taskq_dispatch(tq,splat_rwlock_test2_func,rwp,TQ_SLEEP)) { + splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, + "Failed to queue task %d\n", i); + rc = -EINVAL; } } - /* Wait for writers to get queued up */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_waiters) < SPLAT_RWLOCK_TEST_COUNT)) { - splat_rwlock_sleep(1 * HZ); - } - /* Relase the writers */ - spin_lock(&rwv.rw_priv_lock); - atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS); - spin_unlock(&rwv.rw_priv_lock); - wake_up_interruptible(&rwv.rw_waitq); + taskq_wait(tq); - /* Wait for the test to complete */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_acquired) != 0 || - atomic_read(&rwv.rw_waiters) != 0)) { - splat_rwlock_sleep(HZ); + if (rwp->rw_rc == tq_count) { + splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads " + "correctly entered/exited the rwlock %d times\n", + num_online_cpus(), rwp->rw_rc); + } else { + splat_vprint(file, SPLAT_RWLOCK_TEST2_NAME, "%d racing threads " + "only processed %d/%d w rwlock work items\n", + num_online_cpus(), rwp->rw_rc, tq_count); + rc = -EINVAL; } - /* If any of the write threads ever acquired the lock - * while another thread had it, make sure we return - * an error */ - for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) { - if (rwt[i].rwt_rc) { - rc++; - } - } - - rw_destroy(&rwv.rwl); + taskq_destroy(tq); + rw_destroy(&(rwp->rw_rwlock)); +out: + kfree(rwp); return rc; } +#define splat_rwlock_test3_helper(rwp,rex1,rex2,wex1,wex2,held_func,rc) \ +do { \ + int result, _rc1_, _rc2_, _rc3_, _rc4_; \ + \ + rc = 0; \ + rw_enter(&(rwp)->rw_rwlock, RW_READER); \ + _rc1_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex1); \ + splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \ + " returned %d (expected %d) when RW_READER\n", \ + _rc1_ ? "Fail " : "", result, rex1); \ + rw_exit(&(rwp)->rw_rwlock); \ + _rc2_ = ((result = held_func(&(rwp)->rw_rwlock)) != rex2); \ + splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \ + " returned %d (expected %d) when !RW_READER\n", \ + _rc2_ ? "Fail " : "", result, rex2); \ + \ + rw_enter(&(rwp)->rw_rwlock, RW_WRITER); \ + _rc3_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex1); \ + splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \ + " returned %d (expected %d) when RW_WRITER\n", \ + _rc3_ ? "Fail " : "", result, wex1); \ + rw_exit(&(rwp)->rw_rwlock); \ + _rc4_ = ((result = held_func(&(rwp)->rw_rwlock)) != wex2); \ + splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "%s" #held_func \ + " returned %d (expected %d) when !RW_WRITER\n", \ + _rc4_ ? "Fail " : "", result, wex2); \ + \ + rc = ((_rc1_ || _rc2_ || _rc3_ || _rc4_) ? -EINVAL : 0); \ +} while(0); + static int splat_rwlock_test3(struct file *file, void *arg) { - kthread_t *owner; - rw_priv_t rwv; - int rc = 0; + rw_priv_t *rwp; + int rc1, rc2, rc3; - /* Initialize private data - * including the rwlock */ - splat_init_rw_priv(&rwv, file); + rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); + if (rwp == NULL) + return -ENOMEM; - /* Take the rwlock for writing */ - rw_enter(&rwv.rwl, RW_WRITER); - owner = rw_owner(&rwv.rwl); - if (current != owner) { - splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should " - "be owned by pid %d but is owned by pid %d\n", - current->pid, owner ? owner->pid : -1); - rc = -EINVAL; - goto out; - } + splat_init_rw_priv(rwp, file); - /* Release the rwlock */ - rw_exit(&rwv.rwl); - owner = rw_owner(&rwv.rwl); - if (owner) { - splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not " - "be owned but is owned by pid %d\n", owner->pid); - rc = -EINVAL; - goto out; - } + splat_rwlock_test3_helper(rwp, 1, 0, 1, 0, RW_LOCK_HELD, rc1); + splat_rwlock_test3_helper(rwp, 1, 0, 0, 0, RW_READ_HELD, rc2); + splat_rwlock_test3_helper(rwp, 0, 0, 1, 0, RW_WRITE_HELD, rc3); - /* Take the rwlock for reading. - * Should not have an owner */ - rw_enter(&rwv.rwl, RW_READER); - owner = rw_owner(&rwv.rwl); - if (owner) { - splat_vprint(file, SPLAT_RWLOCK_TEST3_NAME, "rwlock should not " - "be owned but is owned by pid %d\n", owner->pid); - /* Release the rwlock */ - rw_exit(&rwv.rwl); - rc = -EINVAL; - goto out; - } + rw_destroy(&rwp->rw_rwlock); + kfree(rwp); - /* Release the rwlock */ - rw_exit(&rwv.rwl); - -out: - rw_destroy(&rwv.rwl); - return rc; + return ((rc1 || rc2 || rc3) ? -EINVAL : 0); } -int -splat_rwlock_test4_reader_thread(void *arg) +static void +splat_rwlock_test4_func(void *arg) { - rw_thr_t *rwt = (rw_thr_t *)arg; - rw_priv_t *rwv = rwt->rwt_rwp; - uint8_t rnd = 0; - char name[16]; + rw_priv_t *rwp = (rw_priv_t *)arg; + ASSERT(rwp->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); - ASSERT(rwv->rw_magic == SPLAT_RWLOCK_TEST_MAGIC); - snprintf(name, sizeof(name), "%s%d", - SPLAT_RWLOCK_TEST_NAME, rwt->rwt_id); - daemonize(name); - get_random_bytes((void *)&rnd, 1); - splat_rwlock_sleep(rnd * HZ / 1000); - - /* Don't try and and take the semaphore until - * someone else has already acquired it */ - wait_event_interruptible(rwv->rw_waitq, - splat_rwlock_lock_and_test(&rwv->rw_priv_lock, - atomic_read(&rwv->rw_acquired) > 0)); - - spin_lock(&rwv->rw_priv_lock); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread trying to acquire rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); - - /* Take the semaphore for reading - * release it when we are told to */ - rwt->rwt_rc = rw_tryenter(&rwv->rwl, RW_READER); - - /* Here we acquired the lock this is a - * failure since the writer should be - * holding the lock */ - if (rwt->rwt_rc == 1) { - spin_lock(&rwv->rw_priv_lock); - atomic_inc(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread acquired rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); - - spin_lock(&rwv->rw_priv_lock); - atomic_dec(&rwv->rw_acquired); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread dropped rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); - - /* Release the semaphore */ - rw_exit(&rwv->rwl); + if (rw_tryenter(&rwp->rw_rwlock, rwp->rw_type)) { + rwp->rw_rc = 0; + rw_exit(&rwp->rw_rwlock); + } else { + rwp->rw_rc = -EBUSY; } - /* Here we know we didn't block and didn't - * acquire the rwlock for reading */ - else { - spin_lock(&rwv->rw_priv_lock); - atomic_inc(&rwv->rw_completed); - splat_vprint(rwv->rw_file, rwt->rwt_name, - "%s reader thread could not acquire rwlock with " - "%d holding lock and %d waiting\n", - name, atomic_read(&rwv->rw_acquired), - atomic_read(&rwv->rw_waiters)); - spin_unlock(&rwv->rw_priv_lock); +} + +static char * +splat_rwlock_test4_name(krw_t type) +{ + switch (type) { + case RW_NONE: return "RW_NONE"; + case RW_WRITER: return "RW_WRITER"; + case RW_READER: return "RW_READER"; } - return 0; + return NULL; +} + +static int +splat_rwlock_test4_type(taskq_t *tq, rw_priv_t *rwp, int expected_rc, + krw_t holder_type, krw_t try_type) +{ + int id, rc = 0; + + /* Schedule a task function which will try and acquire the rwlock + * using type try_type while the rwlock is being held as holder_type. + * The result must match expected_rc for the test to pass */ + rwp->rw_rc = -EINVAL; + rwp->rw_type = try_type; + + if (holder_type == RW_WRITER || holder_type == RW_READER) + rw_enter(&rwp->rw_rwlock, holder_type); + + id = taskq_dispatch(tq, splat_rwlock_test4_func, rwp, TQ_SLEEP); + if (id == 0) { + splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, "%s", + "taskq_dispatch() failed\n"); + rc = -EINVAL; + goto out; + } + + taskq_wait_id(tq, id); + + if (rwp->rw_rc != expected_rc) + rc = -EINVAL; + + splat_vprint(rwp->rw_file, SPLAT_RWLOCK_TEST4_NAME, + "%srw_tryenter(%s) returned %d (expected %d) when %s\n", + rc ? "Fail " : "", splat_rwlock_test4_name(try_type), + rwp->rw_rc, expected_rc, + splat_rwlock_test4_name(holder_type)); +out: + if (holder_type == RW_WRITER || holder_type == RW_READER) + rw_exit(&rwp->rw_rwlock); + + return rc; } static int splat_rwlock_test4(struct file *file, void *arg) { - int i, count = 0, rc = 0; - long pids[SPLAT_RWLOCK_TEST_COUNT]; - rw_thr_t rwt[SPLAT_RWLOCK_TEST_COUNT]; - rw_priv_t rwv; + rw_priv_t *rwp; + taskq_t *tq; + int rc = 0, rc1, rc2, rc3, rc4, rc5, rc6; - /* Initialize private data - * including the rwlock */ - splat_init_rw_priv(&rwv, file); + rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); + if (rwp == NULL) + return -ENOMEM; - /* Create some threads, the exact number isn't important just as - * long as we know how many we managed to create and should expect. */ - for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) { - rwt[i].rwt_rwp = &rwv; - rwt[i].rwt_id = i; - rwt[i].rwt_name = SPLAT_RWLOCK_TEST4_NAME; - rwt[i].rwt_rc = 0; - - /* The first thread will be a writer */ - if (i == 0) { - /* We can reuse the test1 writer thread here */ - pids[i] = kernel_thread(splat_rwlock_test1_writer_thread, - &rwt[i], 0); - } else { - pids[i] = kernel_thread(splat_rwlock_test4_reader_thread, - &rwt[i], 0); - } - - if (pids[i] >= 0) { - count++; - } + tq = taskq_create(SPLAT_RWLOCK_TEST_TASKQ, 1, maxclsyspri, + 50, INT_MAX, TASKQ_PREPOPULATE); + if (tq == NULL) { + rc = -ENOMEM; + goto out; } - /* Once the writer has the lock, release the readers */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_acquired) <= 0)) { - splat_rwlock_sleep(1 * HZ); - } - wake_up_interruptible(&rwv.rw_waitq); + splat_init_rw_priv(rwp, file); - /* Make sure that the reader threads complete */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_completed) != SPLAT_RWLOCK_TEST_COUNT - 1)) { - splat_rwlock_sleep(1 * HZ); - } - /* Release the writer */ - spin_lock(&rwv.rw_priv_lock); - atomic_set(&rwv.rw_release, SPLAT_RWLOCK_RELEASE_WRITERS); - spin_unlock(&rwv.rw_priv_lock); - wake_up_interruptible(&rwv.rw_waitq); + /* Validate all combinations of rw_tryenter() contention */ + rc1 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_WRITER); + rc2 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_WRITER, RW_READER); + rc3 = splat_rwlock_test4_type(tq, rwp, -EBUSY, RW_READER, RW_WRITER); + rc4 = splat_rwlock_test4_type(tq, rwp, 0, RW_READER, RW_READER); + rc5 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_WRITER); + rc6 = splat_rwlock_test4_type(tq, rwp, 0, RW_NONE, RW_READER); - /* Wait for the test to complete */ - while (splat_rwlock_lock_and_test(&rwv.rw_priv_lock, - atomic_read(&rwv.rw_acquired) != 0 || - atomic_read(&rwv.rw_waiters) != 0)) { - splat_rwlock_sleep(1 * HZ); - } + if (rc1 || rc2 || rc3 || rc4 || rc5 || rc6) + rc = -EINVAL; - /* If any of the reader threads ever acquired the lock - * while another thread had it, make sure we return - * an error since the rw_tryenter() should have failed */ - for (i = 0; i < SPLAT_RWLOCK_TEST_COUNT; i++) { - if (rwt[i].rwt_rc) { - rc++; - } - } + taskq_destroy(tq); +out: + rw_destroy(&(rwp->rw_rwlock)); + kfree(rwp); - rw_destroy(&rwv.rwl); return rc; } static int splat_rwlock_test5(struct file *file, void *arg) { - kthread_t *owner; - rw_priv_t rwv; - int rc = 0; + rw_priv_t *rwp; + int rc = -EINVAL; - /* Initialize private data - * including the rwlock */ - splat_init_rw_priv(&rwv, file); + rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); + if (rwp == NULL) + return -ENOMEM; - /* Take the rwlock for writing */ - rw_enter(&rwv.rwl, RW_WRITER); - owner = rw_owner(&rwv.rwl); - if (current != owner) { - splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should " - "be owned by pid %d but is owned by pid %d\n", - current->pid, owner ? owner->pid : -1); - rc = -EINVAL; + splat_init_rw_priv(rwp, file); + + rw_enter(&rwp->rw_rwlock, RW_WRITER); + if (!RW_WRITE_HELD(&rwp->rw_rwlock)) { + splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, + "rwlock should be write lock: %d\n", + RW_WRITE_HELD(&rwp->rw_rwlock)); goto out; } - /* Make sure that the downgrade - * worked properly */ - rw_downgrade(&rwv.rwl); - - owner = rw_owner(&rwv.rwl); - if (owner) { - splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "rwlock should not " - "be owned but is owned by pid %d\n", owner->pid); - /* Release the rwlock */ - rw_exit(&rwv.rwl); - rc = -EINVAL; + rw_downgrade(&rwp->rw_rwlock); + if (!RW_READ_HELD(&rwp->rw_rwlock)) { + splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, + "rwlock should be read lock: %d\n", + RW_READ_HELD(&rwp->rw_rwlock)); goto out; } - /* Release the rwlock */ - rw_exit(&rwv.rwl); - + rc = 0; + splat_vprint(file, SPLAT_RWLOCK_TEST5_NAME, "%s", + "rwlock properly downgraded\n"); out: - rw_destroy(&rwv.rwl); + rw_exit(&rwp->rw_rwlock); + rw_destroy(&rwp->rw_rwlock); + kfree(rwp); + return rc; } static int splat_rwlock_test6(struct file *file, void *arg) { - kthread_t *owner; - rw_priv_t rwv; - int rc = 0; + rw_priv_t *rwp; + int rc = -EINVAL; - /* Initialize private data - * including the rwlock */ - splat_init_rw_priv(&rwv, file); + rwp = (rw_priv_t *)kmalloc(sizeof(*rwp), GFP_KERNEL); + if (rwp == NULL) + return -ENOMEM; - /* Take the rwlock for reading */ - rw_enter(&rwv.rwl, RW_READER); - owner = rw_owner(&rwv.rwl); - if (owner) { - splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should not " - "be owned but is owned by pid %d\n", owner->pid); - rc = -EINVAL; + splat_init_rw_priv(rwp, file); + + rw_enter(&rwp->rw_rwlock, RW_READER); + if (!RW_READ_HELD(&rwp->rw_rwlock)) { + splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, + "rwlock should be read lock: %d\n", + RW_READ_HELD(&rwp->rw_rwlock)); goto out; } - /* Make sure that the upgrade - * worked properly */ - rc = !rw_tryupgrade(&rwv.rwl); + /* With one reader upgrade should never fail */ + rc = rw_tryupgrade(&rwp->rw_rwlock); + if (!rc) { + splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, + "rwlock contended preventing upgrade: %d\n", + RW_COUNT(&rwp->rw_rwlock)); + goto out; + } - owner = rw_owner(&rwv.rwl); - if (rc || current != owner) { + if (RW_READ_HELD(&rwp->rw_rwlock) || !RW_WRITE_HELD(&rwp->rw_rwlock)) { splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "rwlock should " - "be owned by pid %d but is owned by pid %d " - "trylock rc %d\n", - current->pid, owner ? owner->pid : -1, rc); - rc = -EINVAL; + "have 0 (not %d) reader and 1 (not %d) writer\n", + RW_READ_HELD(&rwp->rw_rwlock), + RW_WRITE_HELD(&rwp->rw_rwlock)); goto out; } - /* Release the rwlock */ - rw_exit(&rwv.rwl); - + rc = 0; + splat_vprint(file, SPLAT_RWLOCK_TEST6_NAME, "%s", + "rwlock properly upgraded\n"); out: - rw_destroy(&rwv.rwl); + rw_exit(&rwp->rw_rwlock); + rw_destroy(&rwp->rw_rwlock); + kfree(rwp); + return rc; } splat_subsystem_t * splat_rwlock_init(void) { - splat_subsystem_t *sub; + splat_subsystem_t *sub; - sub = kmalloc(sizeof(*sub), GFP_KERNEL); - if (sub == NULL) - return NULL; + sub = kmalloc(sizeof(*sub), GFP_KERNEL); + if (sub == NULL) + return NULL; - memset(sub, 0, sizeof(*sub)); - strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE); - strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE); - INIT_LIST_HEAD(&sub->subsystem_list); - INIT_LIST_HEAD(&sub->test_list); - spin_lock_init(&sub->test_lock); - sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK; + memset(sub, 0, sizeof(*sub)); + strncpy(sub->desc.name, SPLAT_RWLOCK_NAME, SPLAT_NAME_SIZE); + strncpy(sub->desc.desc, SPLAT_RWLOCK_DESC, SPLAT_DESC_SIZE); + INIT_LIST_HEAD(&sub->subsystem_list); + INIT_LIST_HEAD(&sub->test_list); + spin_lock_init(&sub->test_lock); + sub->desc.id = SPLAT_SUBSYSTEM_RWLOCK; - SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC, - SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1); - SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC, - SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2); - SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC, - SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3); - SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC, - SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4); - SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC, - SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5); - SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC, - SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6); + SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST1_NAME, SPLAT_RWLOCK_TEST1_DESC, + SPLAT_RWLOCK_TEST1_ID, splat_rwlock_test1); + SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST2_NAME, SPLAT_RWLOCK_TEST2_DESC, + SPLAT_RWLOCK_TEST2_ID, splat_rwlock_test2); + SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST3_NAME, SPLAT_RWLOCK_TEST3_DESC, + SPLAT_RWLOCK_TEST3_ID, splat_rwlock_test3); + SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST4_NAME, SPLAT_RWLOCK_TEST4_DESC, + SPLAT_RWLOCK_TEST4_ID, splat_rwlock_test4); + SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST5_NAME, SPLAT_RWLOCK_TEST5_DESC, + SPLAT_RWLOCK_TEST5_ID, splat_rwlock_test5); + SPLAT_TEST_INIT(sub, SPLAT_RWLOCK_TEST6_NAME, SPLAT_RWLOCK_TEST6_DESC, + SPLAT_RWLOCK_TEST6_ID, splat_rwlock_test6); - return sub; + return sub; } void splat_rwlock_fini(splat_subsystem_t *sub) { - ASSERT(sub); - SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID); - SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID); - SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID); - SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID); - SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID); - SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID); - kfree(sub); + ASSERT(sub); + SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST6_ID); + SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST5_ID); + SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST4_ID); + SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST3_ID); + SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST2_ID); + SPLAT_TEST_FINI(sub, SPLAT_RWLOCK_TEST1_ID); + kfree(sub); } int splat_rwlock_id(void) { - return SPLAT_SUBSYSTEM_RWLOCK; + return SPLAT_SUBSYSTEM_RWLOCK; } diff --git a/patches/fc11-spl-export-symbols.patch b/patches/fc11-spl-export-symbols.patch index 726aabddc..c9d3ec880 100644 --- a/patches/fc11-spl-export-symbols.patch +++ b/patches/fc11-spl-export-symbols.patch @@ -92,3 +92,15 @@ index f5b7d17..1468a22 100644 static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes) { +diff --git a/kernel/fork.c b/kernel/fork.c +index 9b42695..852499e 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -159,6 +159,7 @@ void __put_task_struct(struct task_struct *tsk) + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++EXPORT_SYMBOL(__put_task_struct); + + /* + * macro override instead of weak attribute alias, to workaround diff --git a/spl_config.h.in b/spl_config.h.in index c4097d80f..eedff8e81 100644 --- a/spl_config.h.in +++ b/spl_config.h.in @@ -117,6 +117,9 @@ /* pgdat_list is available */ #undef HAVE_PGDAT_LIST +/* __put_task_struct() is available */ +#undef HAVE_PUT_TASK_STRUCT + /* set_normalized_timespec() is available as export */ #undef HAVE_SET_NORMALIZED_TIMESPEC_EXPORT