diff --git a/include/spl/sys/rwlock.h b/include/spl/sys/rwlock.h index 088e28b44..0ac528e16 100644 --- a/include/spl/sys/rwlock.h +++ b/include/spl/sys/rwlock.h @@ -172,7 +172,7 @@ RW_LOCK_HELD(krwlock_t *rwp) } /* - * The following functions must be a #define and not static inline. + * The following functions must be a #define and not static inline. * This ensures that the native linux semaphore functions (down/up) * will be correctly located in the users code which is important * for the built in kernel lock analysis tools @@ -188,10 +188,10 @@ RW_LOCK_HELD(krwlock_t *rwp) spl_rw_set_type(rwp, type); \ }) -#define rw_destroy(rwp) \ -({ \ - VERIFY(!RW_LOCK_HELD(rwp)); \ -}) +/* + * The Linux rwsem implementation does not require a matching destroy. + */ +#define rw_destroy(rwp) ((void) 0) #define rw_tryenter(rwp, rw) \ ({ \ diff --git a/module/spl/spl-rwlock.c b/module/spl/spl-rwlock.c index cf03bc593..4ffebc8ea 100644 --- a/module/spl/spl-rwlock.c +++ b/module/spl/spl-rwlock.c @@ -35,16 +35,24 @@ static int __rwsem_tryupgrade(struct rw_semaphore *rwsem) { - +#if defined(READER_BIAS) && defined(WRITER_BIAS) + /* + * After the 4.9.20-rt16 kernel the realtime patch series lifted the + * single reader restriction. While this could be accommodated by + * adding additional compatibility code assume the rwsem can never + * be upgraded. All caller must already cleanly handle this case. + */ + return (0); +#else ASSERT((struct task_struct *) ((unsigned long)rwsem->lock.owner & ~RT_MUTEX_OWNER_MASKALL) == current); /* - * Under the realtime patch series, rwsem is implemented as a - * single mutex held by readers and writers alike. However, - * this implementation would prevent a thread from taking a - * read lock twice, as the mutex would already be locked on + * Prior to 4.9.20-rt16 kernel the realtime patch series, rwsem is + * implemented as a single mutex held by readers and writers alike. + * However, this implementation would prevent a thread from taking + * a read lock twice, as the mutex would already be locked on * the second attempt. Therefore the implementation allows a * single thread to take a rwsem as read lock multiple times * tracking that nesting as read_depth counter. @@ -60,6 +68,7 @@ __rwsem_tryupgrade(struct rw_semaphore *rwsem) return (1); } return (0); +#endif } #elif defined(CONFIG_RWSEM_GENERIC_SPINLOCK) static int diff --git a/module/spl/spl-vnode.c b/module/spl/spl-vnode.c index aebee0a82..b72c4896a 100644 --- a/module/spl/spl-vnode.c +++ b/module/spl/spl-vnode.c @@ -744,7 +744,7 @@ vn_file_cache_destructor(void *buf, void *cdrarg) int spl_vn_init(void) { - vn_file_lock = __SPIN_LOCK_UNLOCKED(vn_file_lock); + spin_lock_init(&vn_file_lock); vn_cache = kmem_cache_create("spl_vn_cache", sizeof (struct vnode), 64, vn_cache_constructor,