mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-25 18:59:33 +03:00
- Updated rwlock's to reside in a .c file instead of a static inline
- Updated rwlock's so they can be safely initialized in ctors. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@96 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
d6a26c6a32
commit
e8b31e8482
@ -68,20 +68,12 @@ extern kthread_t *__spl_mutex_owner(kmutex_t *mp);
|
||||
|
||||
#define mutex_init(mp, name, type, ibc) \
|
||||
({ \
|
||||
__ENTRY(S_MUTEX); \
|
||||
if ((name) == NULL) \
|
||||
__spl_mutex_init(mp, #mp, type, ibc); \
|
||||
else \
|
||||
__spl_mutex_init(mp, name, type, ibc); \
|
||||
__EXIT(S_MUTEX); \
|
||||
})
|
||||
#define mutex_destroy(mp) \
|
||||
({ \
|
||||
__ENTRY(S_MUTEX); \
|
||||
__spl_mutex_destroy(mp); \
|
||||
__EXIT(S_MUTEX); \
|
||||
})
|
||||
|
||||
#define mutex_destroy(mp) __spl_mutex_destroy(mp)
|
||||
#define mutex_tryenter(mp) __mutex_tryenter(mp)
|
||||
#define mutex_enter(mp) __mutex_enter(mp)
|
||||
#define mutex_exit(mp) __mutex_exit(mp)
|
||||
|
@ -1,13 +1,14 @@
|
||||
#ifndef _SPL_RWLOCK_H
|
||||
#define _SPL_RWLOCK_H
|
||||
#define _SPL_RWLOCK_H
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <asm/current.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/kmem.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
@ -21,292 +22,50 @@ typedef enum {
|
||||
RW_READER
|
||||
} krw_t;
|
||||
|
||||
#define RW_READ_HELD(x) (__rw_read_held((x)))
|
||||
#define RW_WRITE_HELD(x) (__rw_write_held((x)))
|
||||
#define RW_LOCK_HELD(x) (__rw_lock_held((x)))
|
||||
#define RW_ISWRITER(x) (__rw_iswriter(x))
|
||||
|
||||
#define RW_MAGIC 0x3423645a
|
||||
#define RW_POISON 0xa6
|
||||
|
||||
typedef struct {
|
||||
int rw_magic;
|
||||
int32_t rw_magic;
|
||||
int32_t rw_name_size;
|
||||
char *rw_name;
|
||||
struct rw_semaphore rw_sem;
|
||||
struct task_struct *rw_owner; /* holder of the write lock */
|
||||
} krwlock_t;
|
||||
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
struct rwsem_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
unsigned int flags;
|
||||
#define RWSEM_WAITING_FOR_READ 0x00000001
|
||||
#define RWSEM_WAITING_FOR_WRITE 0x00000002
|
||||
};
|
||||
|
||||
/*
|
||||
* wake a single writer
|
||||
*/
|
||||
static inline struct rw_semaphore *
|
||||
__rwsem_wake_one_writer_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
struct rwsem_waiter *waiter;
|
||||
struct task_struct *tsk;
|
||||
|
||||
sem->activity = -1;
|
||||
|
||||
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
|
||||
list_del(&waiter->list);
|
||||
|
||||
tsk = waiter->task;
|
||||
smp_mb();
|
||||
waiter->task = NULL;
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
return sem;
|
||||
}
|
||||
|
||||
/*
|
||||
* release a read lock on the semaphore
|
||||
*/
|
||||
static void fastcall
|
||||
__up_read_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
|
||||
sem = __rwsem_wake_one_writer_locked(sem);
|
||||
}
|
||||
|
||||
/*
|
||||
* trylock for writing -- returns 1 if successful, 0 if contention
|
||||
*/
|
||||
static int fastcall
|
||||
__down_write_trylock_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
sem->activity = -1;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void __rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg);
|
||||
extern void __rw_destroy(krwlock_t *rwlp);
|
||||
extern int __rw_tryenter(krwlock_t *rwlp, krw_t rw);
|
||||
extern void __rw_enter(krwlock_t *rwlp, krw_t rw);
|
||||
extern void __rw_exit(krwlock_t *rwlp);
|
||||
extern void __rw_downgrade(krwlock_t *rwlp);
|
||||
extern int __rw_tryupgrade(krwlock_t *rwlp);
|
||||
extern kthread_t *__rw_owner(krwlock_t *rwlp);
|
||||
extern int __rw_read_held(krwlock_t *rwlp);
|
||||
extern int __rw_write_held(krwlock_t *rwlp);
|
||||
extern int __rw_lock_held(krwlock_t *rwlp);
|
||||
|
||||
static __inline__ void
|
||||
rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
|
||||
{
|
||||
ASSERT(type == RW_DEFAULT); /* XXX no irq handler use */
|
||||
ASSERT(arg == NULL); /* XXX no irq handler use */
|
||||
#define rw_init(rwlp, name, type, arg) \
|
||||
({ \
|
||||
if ((name) == NULL) \
|
||||
__rw_init(rwlp, #rwlp, type, arg); \
|
||||
else \
|
||||
__rw_init(rwlp, name, type, arg); \
|
||||
})
|
||||
#define rw_destroy(rwlp) __rw_destroy(rwlp)
|
||||
#define rw_tryenter(rwlp, rw) __rw_tryenter(rwlp, rw)
|
||||
#define rw_enter(rwlp, rw) __rw_enter(rwlp, rw)
|
||||
#define rw_exit(rwlp) __rw_exit(rwlp)
|
||||
#define rw_downgrade(rwlp) __rw_downgrade(rwlp)
|
||||
#define rw_tryupgrade(rwlp) __rw_tryupgrade(rwlp)
|
||||
#define rw_owner(rwlp) __rw_owner(rwlp)
|
||||
#define RW_READ_HELD(rwlp) __rw_read_held(rwlp)
|
||||
#define RW_WRITE_HELD(rwlp) __rw_write_held(rwlp)
|
||||
#define RW_LOCK_HELD(rwlp) __rw_lock_held(rwlp)
|
||||
|
||||
rwlp->rw_magic = RW_MAGIC;
|
||||
rwlp->rw_owner = NULL; /* no one holds the write lock yet */
|
||||
init_rwsem(&rwlp->rw_sem);
|
||||
rwlp->rw_name = NULL;
|
||||
|
||||
if (name) {
|
||||
rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
|
||||
if (rwlp->rw_name)
|
||||
strcpy(rwlp->rw_name, name);
|
||||
}
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
rw_destroy(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
spin_lock(&rwlp->rw_sem.wait_lock);
|
||||
ASSERT(list_empty(&rwlp->rw_sem.wait_list));
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
|
||||
if (rwlp->rw_name)
|
||||
kfree(rwlp->rw_name);
|
||||
|
||||
memset(rwlp, RW_POISON, sizeof(krwlock_t));
|
||||
}
|
||||
|
||||
/* Return 0 if the lock could not be obtained without blocking.
|
||||
*/
|
||||
static __inline__ int
|
||||
rw_tryenter(krwlock_t *rwlp, krw_t rw)
|
||||
{
|
||||
int result;
|
||||
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
switch (rw) {
|
||||
/* these functions return 1 if success, 0 if contention */
|
||||
case RW_READER:
|
||||
/* Here the Solaris code would return 0
|
||||
* if there were any write waiters. Specifically
|
||||
* thinking about the case where readers may have
|
||||
* the lock and we would also allow this thread
|
||||
* to grab the read lock with a writer waiting in the
|
||||
* queue. This doesn't seem like a correctness
|
||||
* issue, so just call down_read_trylock()
|
||||
* for the test. We may have to revisit this if
|
||||
* it becomes an issue */
|
||||
result = down_read_trylock(&rwlp->rw_sem);
|
||||
break;
|
||||
case RW_WRITER:
|
||||
result = down_write_trylock(&rwlp->rw_sem);
|
||||
if (result) {
|
||||
/* there better not be anyone else
|
||||
* holding the write lock here */
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
rwlp->rw_owner = current;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
SBUG();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
rw_enter(krwlock_t *rwlp, krw_t rw)
|
||||
{
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
switch (rw) {
|
||||
case RW_READER:
|
||||
/* Here the Solaris code would block
|
||||
* if there were any write waiters. Specifically
|
||||
* thinking about the case where readers may have
|
||||
* the lock and we would also allow this thread
|
||||
* to grab the read lock with a writer waiting in the
|
||||
* queue. This doesn't seem like a correctness
|
||||
* issue, so just call down_read()
|
||||
* for the test. We may have to revisit this if
|
||||
* it becomes an issue */
|
||||
down_read(&rwlp->rw_sem);
|
||||
break;
|
||||
case RW_WRITER:
|
||||
down_write(&rwlp->rw_sem);
|
||||
|
||||
/* there better not be anyone else
|
||||
* holding the write lock here */
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
rwlp->rw_owner = current;
|
||||
break;
|
||||
default:
|
||||
SBUG();
|
||||
}
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
rw_exit(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
/* rw_owner is held by current
|
||||
* thread iff it is a writer */
|
||||
if (rwlp->rw_owner == current) {
|
||||
rwlp->rw_owner = NULL;
|
||||
up_write(&rwlp->rw_sem);
|
||||
} else {
|
||||
up_read(&rwlp->rw_sem);
|
||||
}
|
||||
}
|
||||
|
||||
static __inline__ void
|
||||
rw_downgrade(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
ASSERT(rwlp->rw_owner == current);
|
||||
|
||||
rwlp->rw_owner = NULL;
|
||||
downgrade_write(&rwlp->rw_sem);
|
||||
}
|
||||
|
||||
/* Return 0 if unable to perform the upgrade.
|
||||
* Might be wise to fix the caller
|
||||
* to acquire the write lock first?
|
||||
*/
|
||||
static __inline__ int
|
||||
rw_tryupgrade(krwlock_t *rwlp)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
spin_lock(&rwlp->rw_sem.wait_lock);
|
||||
|
||||
/* Check if there is anyone waiting for the
|
||||
* lock. If there is, then we know we should
|
||||
* not try to upgrade the lock */
|
||||
if (!list_empty(&rwlp->rw_sem.wait_list)) {
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
return 0;
|
||||
}
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
/* Note that activity is protected by
|
||||
* the wait_lock. Don't try to upgrade
|
||||
* if there are multiple readers currently
|
||||
* holding the lock */
|
||||
if (rwlp->rw_sem.activity > 1) {
|
||||
#else
|
||||
/* Don't try to upgrade
|
||||
* if there are multiple readers currently
|
||||
* holding the lock */
|
||||
if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
|
||||
#endif
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
/* Here it should be safe to drop the
|
||||
* read lock and reacquire it for writing since
|
||||
* we know there are no waiters */
|
||||
__up_read_locked(&rwlp->rw_sem);
|
||||
|
||||
/* returns 1 if success, 0 if contention */
|
||||
result = __down_write_trylock_locked(&rwlp->rw_sem);
|
||||
#else
|
||||
/* Here it should be safe to drop the
|
||||
* read lock and reacquire it for writing since
|
||||
* we know there are no waiters */
|
||||
up_read(&rwlp->rw_sem);
|
||||
|
||||
/* returns 1 if success, 0 if contention */
|
||||
result = down_write_trylock(&rwlp->rw_sem);
|
||||
#endif
|
||||
|
||||
/* Check if upgrade failed. Should not ever happen
|
||||
* if we got to this point */
|
||||
ASSERT(result);
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
rwlp->rw_owner = current;
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __inline__ kthread_t *
|
||||
rw_owner(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
return rwlp->rw_owner;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _SPL_RWLOCK_H */
|
||||
#endif /* _SPL_RWLOCK_H */
|
||||
|
@ -6,10 +6,295 @@
|
||||
|
||||
#define DEBUG_SUBSYSTEM S_RWLOCK
|
||||
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
struct rwsem_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
unsigned int flags;
|
||||
#define RWSEM_WAITING_FOR_READ 0x00000001
|
||||
#define RWSEM_WAITING_FOR_WRITE 0x00000002
|
||||
};
|
||||
/* wake a single writer */
|
||||
static struct rw_semaphore *
|
||||
__rwsem_wake_one_writer_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
struct rwsem_waiter *waiter;
|
||||
struct task_struct *tsk;
|
||||
|
||||
sem->activity = -1;
|
||||
|
||||
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
|
||||
list_del(&waiter->list);
|
||||
|
||||
tsk = waiter->task;
|
||||
smp_mb();
|
||||
waiter->task = NULL;
|
||||
wake_up_process(tsk);
|
||||
put_task_struct(tsk);
|
||||
return sem;
|
||||
}
|
||||
|
||||
/* release a read lock on the semaphore */
|
||||
static void
|
||||
__up_read_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
|
||||
sem = __rwsem_wake_one_writer_locked(sem);
|
||||
}
|
||||
|
||||
/* trylock for writing -- returns 1 if successful, 0 if contention */
|
||||
static int
|
||||
__down_write_trylock_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
|
||||
/* granted */
|
||||
sem->activity = -1;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
__rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
|
||||
{
|
||||
int flags = KM_SLEEP;
|
||||
|
||||
ASSERT(rwlp);
|
||||
ASSERT(name);
|
||||
ASSERT(type == RW_DEFAULT); /* XXX no irq handler use */
|
||||
ASSERT(arg == NULL); /* XXX no irq handler use */
|
||||
|
||||
rwlp->rw_magic = RW_MAGIC;
|
||||
rwlp->rw_owner = NULL;
|
||||
rwlp->rw_name = NULL;
|
||||
rwlp->rw_name_size = strlen(name) + 1;
|
||||
|
||||
/* We may be called when there is a non-zero preempt_count or
|
||||
* interrupts are disabled is which case we must not sleep.
|
||||
*/
|
||||
if (current_thread_info()->preempt_count || irqs_disabled())
|
||||
flags = KM_NOSLEEP;
|
||||
|
||||
rwlp->rw_name = kmem_alloc(rwlp->rw_name_size, flags);
|
||||
if (rwlp->rw_name == NULL)
|
||||
return;
|
||||
|
||||
init_rwsem(&rwlp->rw_sem);
|
||||
strcpy(rwlp->rw_name, name);
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_init);
|
||||
|
||||
void
|
||||
__rw_destroy(krwlock_t *rwlp)
|
||||
{
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
spin_lock(&rwlp->rw_sem.wait_lock);
|
||||
ASSERT(list_empty(&rwlp->rw_sem.wait_list));
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
|
||||
kmem_free(rwlp->rw_name, rwlp->rw_name_size);
|
||||
|
||||
memset(rwlp, RW_POISON, sizeof(krwlock_t));
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_destroy);
|
||||
|
||||
/* Return 0 if the lock could not be obtained without blocking. */
|
||||
int
|
||||
__rw_tryenter(krwlock_t *rwlp, krw_t rw)
|
||||
{
|
||||
int rc = 0;
|
||||
ENTRY;
|
||||
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
switch (rw) {
|
||||
/* these functions return 1 if success, 0 if contention */
|
||||
case RW_READER:
|
||||
/* Here the Solaris code would return 0
|
||||
* if there were any write waiters. Specifically
|
||||
* thinking about the case where readers may have
|
||||
* the lock and we would also allow this thread
|
||||
* to grab the read lock with a writer waiting in the
|
||||
* queue. This doesn't seem like a correctness
|
||||
* issue, so just call down_read_trylock()
|
||||
* for the test. We may have to revisit this if
|
||||
* it becomes an issue */
|
||||
rc = down_read_trylock(&rwlp->rw_sem);
|
||||
break;
|
||||
case RW_WRITER:
|
||||
rc = down_write_trylock(&rwlp->rw_sem);
|
||||
if (rc) {
|
||||
/* there better not be anyone else
|
||||
* holding the write lock here */
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
rwlp->rw_owner = current;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
SBUG();
|
||||
}
|
||||
|
||||
RETURN(rc);
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_tryenter);
|
||||
|
||||
void
|
||||
__rw_enter(krwlock_t *rwlp, krw_t rw)
|
||||
{
|
||||
ENTRY;
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
switch (rw) {
|
||||
case RW_READER:
|
||||
/* Here the Solaris code would block
|
||||
* if there were any write waiters. Specifically
|
||||
* thinking about the case where readers may have
|
||||
* the lock and we would also allow this thread
|
||||
* to grab the read lock with a writer waiting in the
|
||||
* queue. This doesn't seem like a correctness
|
||||
* issue, so just call down_read()
|
||||
* for the test. We may have to revisit this if
|
||||
* it becomes an issue */
|
||||
down_read(&rwlp->rw_sem);
|
||||
break;
|
||||
case RW_WRITER:
|
||||
down_write(&rwlp->rw_sem);
|
||||
|
||||
/* there better not be anyone else
|
||||
* holding the write lock here */
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
rwlp->rw_owner = current;
|
||||
break;
|
||||
default:
|
||||
SBUG();
|
||||
}
|
||||
EXIT;
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_enter);
|
||||
|
||||
void
|
||||
__rw_exit(krwlock_t *rwlp)
|
||||
{
|
||||
ENTRY;
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
/* rw_owner is held by current
|
||||
* thread iff it is a writer */
|
||||
if (rwlp->rw_owner == current) {
|
||||
rwlp->rw_owner = NULL;
|
||||
up_write(&rwlp->rw_sem);
|
||||
} else {
|
||||
up_read(&rwlp->rw_sem);
|
||||
}
|
||||
EXIT;
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_exit);
|
||||
|
||||
void
|
||||
__rw_downgrade(krwlock_t *rwlp)
|
||||
{
|
||||
ENTRY;
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
ASSERT(rwlp->rw_owner == current);
|
||||
|
||||
rwlp->rw_owner = NULL;
|
||||
downgrade_write(&rwlp->rw_sem);
|
||||
EXIT;
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_downgrade);
|
||||
|
||||
/* Return 0 if unable to perform the upgrade.
|
||||
* Might be wise to fix the caller
|
||||
* to acquire the write lock first?
|
||||
*/
|
||||
int
|
||||
__rw_tryupgrade(krwlock_t *rwlp)
|
||||
{
|
||||
int rc = 0;
|
||||
ENTRY;
|
||||
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
spin_lock(&rwlp->rw_sem.wait_lock);
|
||||
|
||||
/* Check if there is anyone waiting for the
|
||||
* lock. If there is, then we know we should
|
||||
* not try to upgrade the lock */
|
||||
if (!list_empty(&rwlp->rw_sem.wait_list)) {
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
RETURN(0);
|
||||
}
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
/* Note that activity is protected by
|
||||
* the wait_lock. Don't try to upgrade
|
||||
* if there are multiple readers currently
|
||||
* holding the lock */
|
||||
if (rwlp->rw_sem.activity > 1) {
|
||||
#else
|
||||
/* Don't try to upgrade
|
||||
* if there are multiple readers currently
|
||||
* holding the lock */
|
||||
if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
|
||||
#endif
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
RETURN(0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
/* Here it should be safe to drop the
|
||||
* read lock and reacquire it for writing since
|
||||
* we know there are no waiters */
|
||||
__up_read_locked(&rwlp->rw_sem);
|
||||
|
||||
/* returns 1 if success, 0 if contention */
|
||||
rc = __down_write_trylock_locked(&rwlp->rw_sem);
|
||||
#else
|
||||
/* Here it should be safe to drop the
|
||||
* read lock and reacquire it for writing since
|
||||
* we know there are no waiters */
|
||||
up_read(&rwlp->rw_sem);
|
||||
|
||||
/* returns 1 if success, 0 if contention */
|
||||
rc = down_write_trylock(&rwlp->rw_sem);
|
||||
#endif
|
||||
|
||||
/* Check if upgrade failed. Should not ever happen
|
||||
* if we got to this point */
|
||||
ASSERT(rc);
|
||||
ASSERT(rwlp->rw_owner == NULL);
|
||||
rwlp->rw_owner = current;
|
||||
spin_unlock(&rwlp->rw_sem.wait_lock);
|
||||
|
||||
RETURN(1);
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_tryupgrade);
|
||||
|
||||
kthread_t *
|
||||
__rw_owner(krwlock_t *rwlp)
|
||||
{
|
||||
ENTRY;
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
RETURN(rwlp->rw_owner);
|
||||
}
|
||||
EXPORT_SYMBOL(__rw_owner);
|
||||
|
||||
int
|
||||
__rw_read_held(krwlock_t *rwlp)
|
||||
{
|
||||
ENTRY;
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
RETURN(__rw_lock_held(rwlp) && rwlp->rw_owner == NULL);
|
||||
}
|
||||
@ -19,6 +304,7 @@ int
|
||||
__rw_write_held(krwlock_t *rwlp)
|
||||
{
|
||||
ENTRY;
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
RETURN(rwlp->rw_owner == current);
|
||||
}
|
||||
@ -30,6 +316,7 @@ __rw_lock_held(krwlock_t *rwlp)
|
||||
int rc = 0;
|
||||
ENTRY;
|
||||
|
||||
ASSERT(rwlp);
|
||||
ASSERT(rwlp->rw_magic == RW_MAGIC);
|
||||
|
||||
spin_lock_irq(&(rwlp->rw_sem.wait_lock));
|
||||
|
Loading…
Reference in New Issue
Block a user