Add atomic_sub_* functions to libspl.

Both the SPL and the ZFS libspl export most of the atomic_* functions,
except atomic_sub_* functions which are only exported by the SPL, not by
libspl. This patch remedies that by implementing atomic_sub_* functions
in libspl.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #1013
This commit is contained in:
Etienne Dechamps 2012-06-27 10:26:49 +02:00 committed by Brian Behlendorf
parent 82f46731fd
commit 142e6dd100
4 changed files with 284 additions and 0 deletions

View File

@ -103,6 +103,31 @@ void atomic_add_ptr(volatile void *target, ssize_t bits)
}
#define ATOMIC_SUB(name, type1, type2) \
void atomic_sub_##name(volatile type1 *target, type2 bits) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
*target -= bits; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
}
ATOMIC_SUB(8, uint8_t, int8_t)
ATOMIC_SUB(char, uchar_t, signed char)
ATOMIC_SUB(16, uint16_t, int16_t)
ATOMIC_SUB(short, ushort_t, short)
ATOMIC_SUB(32, uint32_t, int32_t)
ATOMIC_SUB(int, uint_t, int)
ATOMIC_SUB(long, ulong_t, long)
ATOMIC_SUB(64, uint64_t, int64_t)
void atomic_sub_ptr(volatile void *target, ssize_t bits)
{
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
*(caddr_t *)target -= bits;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
}
#define ATOMIC_OR(name, type) \
void atomic_or_##name(volatile type *target, type bits) \
{ \
@ -216,6 +241,37 @@ void *atomic_add_ptr_nv(volatile void *target, ssize_t bits)
}
#define ATOMIC_SUB_NV(name, type1, type2) \
type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\
{ \
type1 rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (*target -= bits); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return rc; \
}
ATOMIC_SUB_NV(8, uint8_t, int8_t)
ATOMIC_SUB_NV(char, uchar_t, signed char)
ATOMIC_SUB_NV(16, uint16_t, int16_t)
ATOMIC_SUB_NV(short, ushort_t, short)
ATOMIC_SUB_NV(32, uint32_t, int32_t)
ATOMIC_SUB_NV(int, uint_t, int)
ATOMIC_SUB_NV(long, ulong_t, long)
ATOMIC_SUB_NV(64, uint64_t, int64_t)
void *atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
{
void *ptr;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
ptr = (*(caddr_t *)target -= bits);
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return ptr;
}
#define ATOMIC_OR_NV(name, type) \
type atomic_or_##name##_nv(volatile type *target, type bits) \
{ \

View File

@ -271,6 +271,40 @@
SET_SIZE(atomic_add_int)
SET_SIZE(atomic_add_32)
ENTRY(atomic_sub_8)
ALTENTRY(atomic_sub_char)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
subb %cl, (%eax)
ret
SET_SIZE(atomic_sub_char)
SET_SIZE(atomic_sub_8)
ENTRY(atomic_sub_16)
ALTENTRY(atomic_sub_short)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
subw %cx, (%eax)
ret
SET_SIZE(atomic_sub_short)
SET_SIZE(atomic_sub_16)
ENTRY(atomic_sub_32)
ALTENTRY(atomic_sub_int)
ALTENTRY(atomic_sub_ptr)
ALTENTRY(atomic_sub_long)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
subl %ecx, (%eax)
ret
SET_SIZE(atomic_sub_long)
SET_SIZE(atomic_sub_ptr)
SET_SIZE(atomic_sub_int)
SET_SIZE(atomic_sub_32)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
movl 4(%esp), %eax
@ -384,6 +418,55 @@
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_sub_8_nv)
ALTENTRY(atomic_sub_char_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
subb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_sub_char_nv)
SET_SIZE(atomic_sub_8_nv)
ENTRY(atomic_sub_16_nv)
ALTENTRY(atomic_sub_short_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
subw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_sub_short_nv)
SET_SIZE(atomic_sub_16_nv)
ENTRY(atomic_sub_32_nv)
ALTENTRY(atomic_sub_int_nv)
ALTENTRY(atomic_sub_ptr_nv)
ALTENTRY(atomic_sub_long_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
subl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_sub_long_nv)
SET_SIZE(atomic_sub_ptr_nv)
SET_SIZE(atomic_sub_int_nv)
SET_SIZE(atomic_sub_32_nv)
/*
* NOTE: If atomic_add_64 and atomic_add_64_nv are ever
* separated, it is important to edit the libc i386 platform
@ -413,6 +496,29 @@
SET_SIZE(atomic_add_64_nv)
SET_SIZE(atomic_add_64)
ENTRY(atomic_sub_64)
ALTENTRY(atomic_sub_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
subl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_sub_64_nv)
SET_SIZE(atomic_sub_64)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movl 4(%esp), %edx

View File

@ -232,6 +232,40 @@
SET_SIZE(atomic_add_ptr)
SET_SIZE(atomic_add_64)
ENTRY(atomic_sub_8)
ALTENTRY(atomic_sub_char)
lock
subb %sil, (%rdi)
ret
SET_SIZE(atomic_sub_char)
SET_SIZE(atomic_sub_8)
ENTRY(atomic_sub_16)
ALTENTRY(atomic_sub_short)
lock
subw %si, (%rdi)
ret
SET_SIZE(atomic_sub_short)
SET_SIZE(atomic_sub_16)
ENTRY(atomic_sub_32)
ALTENTRY(atomic_sub_int)
lock
subl %esi, (%rdi)
ret
SET_SIZE(atomic_sub_int)
SET_SIZE(atomic_sub_32)
ENTRY(atomic_sub_64)
ALTENTRY(atomic_sub_ptr)
ALTENTRY(atomic_sub_long)
lock
subq %rsi, (%rdi)
ret
SET_SIZE(atomic_sub_long)
SET_SIZE(atomic_sub_ptr)
SET_SIZE(atomic_sub_64)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
lock
@ -354,6 +388,64 @@
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_64_nv)
ENTRY(atomic_sub_8_nv)
ALTENTRY(atomic_sub_char_nv)
movb (%rdi), %al
1:
movb %sil, %cl
subb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_sub_char_nv)
SET_SIZE(atomic_sub_8_nv)
ENTRY(atomic_sub_16_nv)
ALTENTRY(atomic_sub_short_nv)
movw (%rdi), %ax
1:
movw %si, %cx
subw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_sub_short_nv)
SET_SIZE(atomic_sub_16_nv)
ENTRY(atomic_sub_32_nv)
ALTENTRY(atomic_sub_int_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
subl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_sub_int_nv)
SET_SIZE(atomic_sub_32_nv)
ENTRY(atomic_sub_64_nv)
ALTENTRY(atomic_sub_ptr_nv)
ALTENTRY(atomic_sub_long_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
subq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_sub_long_nv)
SET_SIZE(atomic_sub_ptr_nv)
SET_SIZE(atomic_sub_64_nv)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movb (%rdi), %al

View File

@ -78,6 +78,21 @@ extern void atomic_add_long(volatile ulong_t *, long);
extern void atomic_add_64(volatile uint64_t *, int64_t);
#endif
/*
* Substract delta from target
*/
extern void atomic_sub_8(volatile uint8_t *, int8_t);
extern void atomic_sub_char(volatile uchar_t *, signed char);
extern void atomic_sub_16(volatile uint16_t *, int16_t);
extern void atomic_sub_short(volatile ushort_t *, short);
extern void atomic_sub_32(volatile uint32_t *, int32_t);
extern void atomic_sub_int(volatile uint_t *, int);
extern void atomic_sub_ptr(volatile void *, ssize_t);
extern void atomic_sub_long(volatile ulong_t *, long);
#if defined(_INT64_TYPE)
extern void atomic_sub_64(volatile uint64_t *, int64_t);
#endif
/*
* logical OR bits with target
*/
@ -157,6 +172,21 @@ extern ulong_t atomic_add_long_nv(volatile ulong_t *, long);
extern uint64_t atomic_add_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* Substract delta from target
*/
extern uint8_t atomic_sub_8_nv(volatile uint8_t *, int8_t);
extern uchar_t atomic_sub_char_nv(volatile uchar_t *, signed char);
extern uint16_t atomic_sub_16_nv(volatile uint16_t *, int16_t);
extern ushort_t atomic_sub_short_nv(volatile ushort_t *, short);
extern uint32_t atomic_sub_32_nv(volatile uint32_t *, int32_t);
extern uint_t atomic_sub_int_nv(volatile uint_t *, int);
extern void *atomic_sub_ptr_nv(volatile void *, ssize_t);
extern ulong_t atomic_sub_long_nv(volatile ulong_t *, long);
#if defined(_INT64_TYPE)
extern uint64_t atomic_sub_64_nv(volatile uint64_t *, int64_t);
#endif
/*
* logical OR bits with target and return new value.
*/