mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-11-17 18:11:00 +03:00
Enforce architecture-specific barriers around clear_bit()
The comment above the Linux 3.16 kernel's clear_bit() states: /** * clear_bit - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ This comment does not make sense in the context of x86 because x86 maps the operations to barrier(), which is a compiler barrier. However, it does make sense to me when I consider architectures that reorder around atomic instructions. In such situations, a processor is allowed to execute the wake_up_bit() before clear_bit() and we have a race. There are a few architectures that suffer from this issue. In such situations, the other processor would wake-up, see the bit is still taken and go to sleep, while the one responsible for waking it up will assume that it did its job and continue. This patch implements a wrapper that maps smp_mb__{before,after}_atomic() to smp_mb__{before,after}_clear_bit() on older kernels and changes our code to leverage it in a manner consistent with the mainline kernel. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
parent
c2fa09454e
commit
a988a35a93
@ -42,6 +42,20 @@
|
||||
#undef kmem_cache_free
|
||||
|
||||
|
||||
/*
|
||||
* Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
|
||||
* with smp_mb__{before,after}_atomic() because they were redundant. This is
|
||||
* only used inside our SLAB allocator, so we implement an internal wrapper
|
||||
* here to give us smp_mb__{before,after}_atomic() on older kernels.
|
||||
*/
|
||||
#ifndef smp_mb__before_atomic
|
||||
#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic
|
||||
#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Cache expiration was implemented because it was part of the default Solaris
|
||||
* kmem_cache behavior. The idea is that per-cpu objects which haven't been
|
||||
@ -1110,8 +1124,10 @@ spl_cache_grow_work(void *data)
|
||||
}
|
||||
|
||||
atomic_dec(&skc->skc_ref);
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
|
||||
clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_all(&skc->skc_waitq);
|
||||
spin_unlock(&skc->skc_lock);
|
||||
|
||||
@ -1164,7 +1180,8 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
|
||||
|
||||
ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
|
||||
if (ska == NULL) {
|
||||
clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
|
||||
clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_all(&skc->skc_waitq);
|
||||
return (-ENOMEM);
|
||||
}
|
||||
@ -1616,8 +1633,8 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
|
||||
}
|
||||
|
||||
spl_slab_reclaim(skc, count, 1);
|
||||
clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
|
||||
smp_wmb();
|
||||
clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
|
||||
out:
|
||||
atomic_dec(&skc->skc_ref);
|
||||
|
Loading…
Reference in New Issue
Block a user