diff --git a/include/os/freebsd/spl/sys/atomic.h b/include/os/freebsd/spl/sys/atomic.h index e283c6c0e..4227e5f7d 100644 --- a/include/os/freebsd/spl/sys/atomic.h +++ b/include/os/freebsd/spl/sys/atomic.h @@ -32,79 +32,30 @@ #include #include -#define casptr(_a, _b, _c) \ - atomic_cmpset_ptr((volatile uintptr_t *)(_a), \ - (uintptr_t)(_b), \ - (uintptr_t)(_c)) -#define cas32 atomic_cmpset_32 -#define atomic_sub_64 atomic_subtract_64 +#define atomic_sub_64 atomic_subtract_64 -#if defined(__i386__) || defined(KLD_MODULE) +#if defined(__i386__) && (defined(_KERNEL) || defined(KLD_MODULE)) #define I386_HAVE_ATOMIC64 #endif +#if defined(__i386__) || defined(__amd64__) || defined(__arm__) +/* No spurious failures from fcmpset. */ +#define STRONG_FCMPSET +#endif + #if !defined(__LP64__) && !defined(__mips_n32) && \ - !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) + !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ + !defined(HAS_EMULATED_ATOMIC64) extern void atomic_add_64(volatile uint64_t *target, int64_t delta); extern void atomic_dec_64(volatile uint64_t *target); -#endif -#ifndef __sparc64__ -#if defined(__LP64__) || defined(__mips_n32) || \ - defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) - -#define membar_producer() wmb() - -static __inline uint64_t -atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) -{ - -#ifdef __i386__ - atomic_fcmpset_64(target, &cmp, newval); -#else - atomic_fcmpset_long(target, &cmp, newval); -#endif - return (cmp); -} - -static __inline uint32_t -atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) -{ - - atomic_fcmpset_int(target, &cmp, newval); - return (cmp); -} - -static __inline uint64_t -atomic_add_64_nv(volatile uint64_t *target, int64_t delta) -{ - uint64_t prev; - - prev = atomic_fetchadd_long(target, delta); - - return (prev + delta); -} - -#else -extern uint32_t atomic_cas_32(volatile uint32_t *target, uint32_t cmp, - uint32_t newval); +extern uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value); +extern uint64_t atomic_load_64(volatile uint64_t *a); +extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta); extern uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval); -extern uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta); -extern void membar_producer(void); #endif -#endif -extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value); -#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \ - defined(__mips__) || defined(__aarch64__) || defined(__riscv) -extern void atomic_or_8(volatile uint8_t *target, uint8_t value); -#else -static __inline void -atomic_or_8(volatile uint8_t *target, uint8_t value) -{ - atomic_set_8(target, value); -} -#endif +#define membar_producer atomic_thread_fence_rel static __inline uint32_t atomic_add_32_nv(volatile uint32_t *target, int32_t delta) @@ -112,33 +63,12 @@ atomic_add_32_nv(volatile uint32_t *target, int32_t delta) return (atomic_fetchadd_32(target, delta) + delta); } -static __inline uint32_t -atomic_add_int_nv(volatile uint32_t *target, int delta) +static __inline uint_t +atomic_add_int_nv(volatile uint_t *target, int delta) { return (atomic_add_32_nv(target, delta)); } -static __inline void -atomic_dec_32(volatile uint32_t *target) -{ - atomic_subtract_32(target, 1); -} - -static __inline uint32_t -atomic_dec_32_nv(volatile uint32_t *target) -{ - return (atomic_fetchadd_32(target, -1) - 1); -} - -#if defined(__LP64__) || defined(__mips_n32) || \ - defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) -static __inline void -atomic_dec_64(volatile uint64_t *target) -{ - atomic_subtract_64(target, 1); -} -#endif - static __inline void atomic_inc_32(volatile uint32_t *target) { @@ -151,6 +81,70 @@ atomic_inc_32_nv(volatile uint32_t *target) return (atomic_add_32_nv(target, 1)); } +static __inline void +atomic_dec_32(volatile uint32_t *target) +{ + atomic_subtract_32(target, 1); +} + +static __inline uint32_t +atomic_dec_32_nv(volatile uint32_t *target) +{ + return (atomic_add_32_nv(target, -1)); +} + +#ifndef __sparc64__ +static inline uint32_t +atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) +{ +#ifdef STRONG_FCMPSET + (void) atomic_fcmpset_32(target, &cmp, newval); +#else + uint32_t expected = cmp; + + do { + if (atomic_fcmpset_32(target, &cmp, newval)) + break; + } while (cmp == expected); +#endif + return (cmp); +} +#endif + +#if defined(__LP64__) || defined(__mips_n32) || \ + defined(ARM_HAVE_ATOMIC64) || defined(I386_HAVE_ATOMIC64) || \ + defined(HAS_EMULATED_ATOMIC64) +static __inline void +atomic_dec_64(volatile uint64_t *target) +{ + atomic_subtract_64(target, 1); +} + +static inline uint64_t +atomic_add_64_nv(volatile uint64_t *target, int64_t delta) +{ + return (atomic_fetchadd_64(target, delta) + delta); +} + +#ifndef __sparc64__ +static inline uint64_t +atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) +{ +#ifdef STRONG_FCMPSET + (void) atomic_fcmpset_64(target, &cmp, newval); +#else + uint64_t expected = cmp; + + do { + if (atomic_fcmpset_64(target, &cmp, newval)) + break; + } while (cmp == expected); +#endif + return (cmp); +} +#endif +#endif + static __inline void atomic_inc_64(volatile uint64_t *target) { diff --git a/module/os/freebsd/spl/spl_atomic.c b/module/os/freebsd/spl/spl_atomic.c index e82fed847..80040fc6a 100644 --- a/module/os/freebsd/spl/spl_atomic.c +++ b/module/os/freebsd/spl/spl_atomic.c @@ -32,6 +32,10 @@ __FBSDID("$FreeBSD$"); #include #include +#if !defined(__LP64__) && !defined(__mips_n32) && \ + !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) && \ + !defined(HAS_EMULATED_ATOMIC64) + #ifdef _KERNEL #include @@ -52,8 +56,6 @@ atomic_init(void) } #endif -#if !defined(__LP64__) && !defined(__mips_n32) && \ - !defined(ARM_HAVE_ATOMIC64) && !defined(I386_HAVE_ATOMIC64) void atomic_add_64(volatile uint64_t *target, int64_t delta) { @@ -71,7 +73,29 @@ atomic_dec_64(volatile uint64_t *target) *target -= 1; mtx_unlock(&atomic_mtx); } -#endif + +uint64_t +atomic_swap_64(volatile uint64_t *a, uint64_t value) +{ + uint64_t ret; + + mtx_lock(&atomic_mtx); + ret = *a; + *a = value; + mtx_unlock(&atomic_mtx); + return (ret); +} + +uint64_t +atomic_load_64(volatile uint64_t *a) +{ + uint64_t ret; + + mtx_lock(&atomic_mtx); + ret = *a; + mtx_unlock(&atomic_mtx); + return (ret); +} uint64_t atomic_add_64_nv(volatile uint64_t *target, int64_t delta) @@ -84,27 +108,6 @@ atomic_add_64_nv(volatile uint64_t *target, int64_t delta) return (newval); } -#if defined(__powerpc__) || defined(__arm__) || defined(__mips__) -void -atomic_or_8(volatile uint8_t *target, uint8_t value) -{ - mtx_lock(&atomic_mtx); - *target |= value; - mtx_unlock(&atomic_mtx); -} -#endif - -uint8_t -atomic_or_8_nv(volatile uint8_t *target, uint8_t value) -{ - uint8_t newval; - - mtx_lock(&atomic_mtx); - newval = (*target |= value); - mtx_unlock(&atomic_mtx); - return (newval); -} - uint64_t atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) { @@ -117,22 +120,4 @@ atomic_cas_64(volatile uint64_t *target, uint64_t cmp, uint64_t newval) mtx_unlock(&atomic_mtx); return (oldval); } - -uint32_t -atomic_cas_32(volatile uint32_t *target, uint32_t cmp, uint32_t newval) -{ - uint32_t oldval; - - mtx_lock(&atomic_mtx); - oldval = *target; - if (oldval == cmp) - *target = newval; - mtx_unlock(&atomic_mtx); - return (oldval); -} - -void -membar_producer(void) -{ - /* nothing */ -} +#endif