libspl: implement atomics in terms of atomics

This replaces the generic libspl atomic.c atomics implementation
with one based on builtin gcc atomics.  This functionality was added
as an experimental feature in gcc 4.4.  Today even CentOS 7 ships
with gcc 4.8 as the default compiler we can make this the default.

Furthermore, the builtin atomics are as good or better than our
hand-rolled implementation so it's reasonable to drop that custom code.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Closes #11904
Closes #12252
Closes #12244
This commit is contained in:
наб 2021-04-19 07:13:24 +02:00 committed by Brian Behlendorf
parent 3de7aeb68a
commit 9a865b7fb7
8 changed files with 84 additions and 1697 deletions

View File

@ -45,7 +45,7 @@ zed_LDADD = \
$(abs_top_builddir)/lib/libnvpair/libnvpair.la \
$(abs_top_builddir)/lib/libuutil/libuutil.la
zed_LDADD += -lrt $(LIBUDEV_LIBS) $(LIBUUID_LIBS)
zed_LDADD += -lrt $(LIBATOMIC_LIBS) $(LIBUDEV_LIBS) $(LIBUUID_LIBS)
zed_LDFLAGS = -pthread
EXTRA_DIST = agents/README.md

34
config/user-libatomic.m4 Normal file
View File

@ -0,0 +1,34 @@
dnl #
dnl # If -latomic exists, it's needed for __atomic intrinsics.
dnl #
dnl # Some systems (like FreeBSD 13) don't have a libatomic at all because
dnl # their toolchain doesn't ship it they obviously don't need it.
dnl #
dnl # Others (like sufficiently ancient CentOS) have one,
dnl # but terminally broken or unlinkable (e.g. it's a dangling symlink,
dnl # or a linker script that points to a nonexistent file)
dnl # most arches affected by this don't actually need -latomic (and if they do,
dnl # then they should have libatomic that actually exists and links,
dnl # so don't fall into this category).
dnl #
dnl # Technically, we could check if the platform *actually* needs -latomic,
dnl # or if it has native support for all the intrinsics we use,
dnl # but it /really/ doesn't matter, and C11 recommends to always link it.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_USER_LIBATOMIC], [
AC_MSG_CHECKING([whether -latomic is present])
saved_libs="$LIBS"
LIBS="$LIBS -latomic"
AC_LINK_IFELSE([AC_LANG_PROGRAM([], [])], [
LIBATOMIC_LIBS="-latomic"
AC_MSG_RESULT([yes])
], [
LIBATOMIC_LIBS=""
AC_MSG_RESULT([no])
])
LIBS="$saved_libs"
AC_SUBST([LIBATOMIC_LIBS])
])

View File

@ -21,6 +21,7 @@ AC_DEFUN([ZFS_AC_CONFIG_USER], [
ZFS_AC_CONFIG_USER_LIBUDEV
ZFS_AC_CONFIG_USER_LIBCRYPTO
ZFS_AC_CONFIG_USER_LIBAIO
ZFS_AC_CONFIG_USER_LIBATOMIC
ZFS_AC_CONFIG_USER_CLOCK_GETTIME
ZFS_AC_CONFIG_USER_PAM
ZFS_AC_CONFIG_USER_RUNSTATEDIR

View File

@ -1,26 +1,14 @@
include $(top_srcdir)/config/Rules.am
if TARGET_CPU_I386
TARGET_CPU_ATOMIC_SOURCE = asm-i386/atomic.S
else
if TARGET_CPU_X86_64
TARGET_CPU_ATOMIC_SOURCE = asm-x86_64/atomic.S
else
TARGET_CPU_ATOMIC_SOURCE = asm-generic/atomic.c
endif
endif
SUBDIRS = include
AM_CCASFLAGS = \
$(CFLAGS)
noinst_LTLIBRARIES = libspl_assert.la libspl.la
libspl_assert_la_SOURCES = \
assert.c
USER_C = \
atomic.c \
list.c \
mkdirp.c \
page.c \
@ -47,20 +35,11 @@ USER_C += \
os/freebsd/zone.c
endif
libspl_la_SOURCES = \
$(USER_C) \
$(TARGET_CPU_ATOMIC_SOURCE)
libspl_la_SOURCES = $(USER_C)
libspl_la_LIBADD = \
libspl_assert.la
libspl_la_LIBADD += $(LIBCLOCK_GETTIME)
libspl_la_LIBADD += $(LIBATOMIC_LIBS) $(LIBCLOCK_GETTIME)
include $(top_srcdir)/config/CppCheck.am
# Override the default SOURCES which includes TARGET_CPU_ATOMIC_SOURCE
# in order to always evaluate the generic asm-generic/atomic.c source.
CPPCHECKSRC = $(USER_C) asm-generic/atomic.c
cppcheck:
$(CPPCHECK) -j$(CPU_COUNT) $(CPPCHECKFLAGS) --force \
$(DEFAULT_INCLUDES) $(CPPCHECKSRC)

View File

@ -1 +0,0 @@
/atomic.S

View File

@ -1,840 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
.ident "%Z%%M% %I% %E% SMI"
.file "%M%"
#define _ASM
#ifdef __linux__
#include <ia32/sys/asm_linkage.h>
#elif __FreeBSD__
#include <machine/asmacros.h>
#define SET_SIZE(x)
#endif
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
movl 4(%esp), %eax
lock
incb (%eax)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
movl 4(%esp), %eax
lock
incw (%eax)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
ALTENTRY(atomic_inc_ulong)
movl 4(%esp), %eax
lock
incl (%eax)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
leal 1(%eax), %ecx
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
leal 1(%eax), %ecx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
ALTENTRY(atomic_inc_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
leal 1(%eax), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
/*
* NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
* separated, you need to also edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_inc_64_nv.
*/
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
incl %ebx
addl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_inc_64_nv)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
movl 4(%esp), %eax
lock
decb (%eax)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
movl 4(%esp), %eax
lock
decw (%eax)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
ALTENTRY(atomic_dec_ulong)
movl 4(%esp), %eax
lock
decl (%eax)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
leal -1(%eax), %ecx
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
leal -1(%eax), %ecx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
ALTENTRY(atomic_dec_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
leal -1(%eax), %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
/*
* NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_dec_64_nv.
*/
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
xorl %ebx, %ebx
xorl %ecx, %ecx
not %ecx
not %ebx
addl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_dec_64_nv)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_add_8)
ALTENTRY(atomic_add_char)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
addb %cl, (%eax)
ret
SET_SIZE(atomic_add_char)
SET_SIZE(atomic_add_8)
ENTRY(atomic_add_16)
ALTENTRY(atomic_add_short)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
addw %cx, (%eax)
ret
SET_SIZE(atomic_add_short)
SET_SIZE(atomic_add_16)
ENTRY(atomic_add_32)
ALTENTRY(atomic_add_int)
ALTENTRY(atomic_add_ptr)
ALTENTRY(atomic_add_long)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
addl %ecx, (%eax)
ret
SET_SIZE(atomic_add_long)
SET_SIZE(atomic_add_ptr)
SET_SIZE(atomic_add_int)
SET_SIZE(atomic_add_32)
ENTRY(atomic_sub_8)
ALTENTRY(atomic_sub_char)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
subb %cl, (%eax)
ret
SET_SIZE(atomic_sub_char)
SET_SIZE(atomic_sub_8)
ENTRY(atomic_sub_16)
ALTENTRY(atomic_sub_short)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
subw %cx, (%eax)
ret
SET_SIZE(atomic_sub_short)
SET_SIZE(atomic_sub_16)
ENTRY(atomic_sub_32)
ALTENTRY(atomic_sub_int)
ALTENTRY(atomic_sub_ptr)
ALTENTRY(atomic_sub_long)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
subl %ecx, (%eax)
ret
SET_SIZE(atomic_sub_long)
SET_SIZE(atomic_sub_ptr)
SET_SIZE(atomic_sub_int)
SET_SIZE(atomic_sub_32)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
orb %cl, (%eax)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
orw %cx, (%eax)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
ALTENTRY(atomic_or_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
orl %ecx, (%eax)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
movl 4(%esp), %eax
movb 8(%esp), %cl
lock
andb %cl, (%eax)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
movl 4(%esp), %eax
movw 8(%esp), %cx
lock
andw %cx, (%eax)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
ALTENTRY(atomic_and_ulong)
movl 4(%esp), %eax
movl 8(%esp), %ecx
lock
andl %ecx, (%eax)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
addb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
addw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
addl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_sub_8_nv)
ALTENTRY(atomic_sub_char_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
subb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_sub_char_nv)
SET_SIZE(atomic_sub_8_nv)
ENTRY(atomic_sub_16_nv)
ALTENTRY(atomic_sub_short_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
subw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_sub_short_nv)
SET_SIZE(atomic_sub_16_nv)
ENTRY(atomic_sub_32_nv)
ALTENTRY(atomic_sub_int_nv)
ALTENTRY(atomic_sub_ptr_nv)
ALTENTRY(atomic_sub_long_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
subl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_sub_long_nv)
SET_SIZE(atomic_sub_ptr_nv)
SET_SIZE(atomic_sub_int_nv)
SET_SIZE(atomic_sub_32_nv)
/*
* NOTE: If atomic_add_64 and atomic_add_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_add_64_nv.
*/
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
addl %eax, %ebx
adcl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_add_64_nv)
SET_SIZE(atomic_add_64)
ENTRY(atomic_sub_64)
ALTENTRY(atomic_sub_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
subl %eax, %ebx
sbbl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_sub_64_nv)
SET_SIZE(atomic_sub_64)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
orb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_or_uchar_nv)
SET_SIZE(atomic_or_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
orw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
ALTENTRY(atomic_or_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
orl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
/*
* NOTE: If atomic_or_64 and atomic_or_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_or_64_nv.
*/
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
orl %eax, %ebx
orl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_or_64_nv)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movl 4(%esp), %edx
movb (%edx), %al
1:
movl 8(%esp), %ecx
andb %al, %cl
lock
cmpxchgb %cl, (%edx)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movl 4(%esp), %edx
movw (%edx), %ax
1:
movl 8(%esp), %ecx
andw %ax, %cx
lock
cmpxchgw %cx, (%edx)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
ALTENTRY(atomic_and_ulong_nv)
movl 4(%esp), %edx
movl (%edx), %eax
1:
movl 8(%esp), %ecx
andl %eax, %ecx
lock
cmpxchgl %ecx, (%edx)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
/*
* NOTE: If atomic_and_64 and atomic_and_64_nv are ever
* separated, it is important to edit the libc i386 platform
* specific mapfile and remove the NODYNSORT attribute
* from atomic_and_64_nv.
*/
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_64_nv)
pushl %edi
pushl %ebx
movl 12(%esp), %edi
movl (%edi), %eax
movl 4(%edi), %edx
1:
movl 16(%esp), %ebx
movl 20(%esp), %ecx
andl %eax, %ebx
andl %edx, %ecx
lock
cmpxchg8b (%edi)
jne 1b
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
popl %edi
ret
SET_SIZE(atomic_and_64_nv)
SET_SIZE(atomic_and_64)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
movb 12(%esp), %cl
lock
cmpxchgb %cl, (%edx)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
movw 12(%esp), %cx
lock
cmpxchgw %cx, (%edx)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
movl 4(%esp), %edx
movl 8(%esp), %eax
movl 12(%esp), %ecx
lock
cmpxchgl %ecx, (%edx)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
pushl %ebx
pushl %esi
movl 12(%esp), %esi
movl 16(%esp), %eax
movl 20(%esp), %edx
movl 24(%esp), %ebx
movl 28(%esp), %ecx
lock
cmpxchg8b (%esi)
popl %esi
popl %ebx
ret
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movl 4(%esp), %edx
movzbl 8(%esp), %eax
lock
xchgb %al, (%edx)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movl 4(%esp), %edx
movzwl 8(%esp), %eax
lock
xchgw %ax, (%edx)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
ALTENTRY(atomic_swap_ptr)
ALTENTRY(atomic_swap_ulong)
movl 4(%esp), %edx
movl 8(%esp), %eax
lock
xchgl %eax, (%edx)
ret
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
pushl %esi
pushl %ebx
movl 12(%esp), %esi
movl 16(%esp), %ebx
movl 20(%esp), %ecx
movl (%esi), %eax
movl 4(%esi), %edx
1:
lock
cmpxchg8b (%esi)
jne 1b
popl %ebx
popl %esi
ret
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
movl 4(%esp), %edx
movl 8(%esp), %ecx
xorl %eax, %eax
lock
btsl %ecx, (%edx)
jnc 1f
decl %eax
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
movl 4(%esp), %edx
movl 8(%esp), %ecx
xorl %eax, %eax
lock
btrl %ecx, (%edx)
jc 1f
decl %eax
1:
ret
SET_SIZE(atomic_clear_long_excl)
/*
* NOTE: membar_enter, membar_exit, membar_producer, and
* membar_consumer are all identical routines. We define them
* separately, instead of using ALTENTRY definitions to alias them
* together, so that DTrace and debuggers will see a unique address
* for them, allowing more accurate tracing.
*/
ENTRY(membar_enter)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_enter)
ENTRY(membar_exit)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_exit)
ENTRY(membar_producer)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_producer)
ENTRY(membar_consumer)
lock
xorl $0, (%esp)
ret
SET_SIZE(membar_consumer)
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif

View File

@ -1,691 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2007 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
.ident "%Z%%M% %I% %E% SMI"
.file "%M%"
#define _ASM
#ifdef __linux__
#include <ia32/sys/asm_linkage.h>
#elif __FreeBSD__
#include <machine/asmacros.h>
#define SET_SIZE(x)
#endif
ENTRY(atomic_inc_8)
ALTENTRY(atomic_inc_uchar)
lock
incb (%rdi)
ret
SET_SIZE(atomic_inc_uchar)
SET_SIZE(atomic_inc_8)
ENTRY(atomic_inc_16)
ALTENTRY(atomic_inc_ushort)
lock
incw (%rdi)
ret
SET_SIZE(atomic_inc_ushort)
SET_SIZE(atomic_inc_16)
ENTRY(atomic_inc_32)
ALTENTRY(atomic_inc_uint)
lock
incl (%rdi)
ret
SET_SIZE(atomic_inc_uint)
SET_SIZE(atomic_inc_32)
ENTRY(atomic_inc_64)
ALTENTRY(atomic_inc_ulong)
lock
incq (%rdi)
ret
SET_SIZE(atomic_inc_ulong)
SET_SIZE(atomic_inc_64)
ENTRY(atomic_inc_8_nv)
ALTENTRY(atomic_inc_uchar_nv)
movb (%rdi), %al
1:
leaq 1(%rax), %rcx
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_inc_uchar_nv)
SET_SIZE(atomic_inc_8_nv)
ENTRY(atomic_inc_16_nv)
ALTENTRY(atomic_inc_ushort_nv)
movw (%rdi), %ax
1:
leaq 1(%rax), %rcx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_inc_ushort_nv)
SET_SIZE(atomic_inc_16_nv)
ENTRY(atomic_inc_32_nv)
ALTENTRY(atomic_inc_uint_nv)
movl (%rdi), %eax
1:
leaq 1(%rax), %rcx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_inc_uint_nv)
SET_SIZE(atomic_inc_32_nv)
ENTRY(atomic_inc_64_nv)
ALTENTRY(atomic_inc_ulong_nv)
movq (%rdi), %rax
1:
leaq 1(%rax), %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_inc_ulong_nv)
SET_SIZE(atomic_inc_64_nv)
ENTRY(atomic_dec_8)
ALTENTRY(atomic_dec_uchar)
lock
decb (%rdi)
ret
SET_SIZE(atomic_dec_uchar)
SET_SIZE(atomic_dec_8)
ENTRY(atomic_dec_16)
ALTENTRY(atomic_dec_ushort)
lock
decw (%rdi)
ret
SET_SIZE(atomic_dec_ushort)
SET_SIZE(atomic_dec_16)
ENTRY(atomic_dec_32)
ALTENTRY(atomic_dec_uint)
lock
decl (%rdi)
ret
SET_SIZE(atomic_dec_uint)
SET_SIZE(atomic_dec_32)
ENTRY(atomic_dec_64)
ALTENTRY(atomic_dec_ulong)
lock
decq (%rdi)
ret
SET_SIZE(atomic_dec_ulong)
SET_SIZE(atomic_dec_64)
ENTRY(atomic_dec_8_nv)
ALTENTRY(atomic_dec_uchar_nv)
movb (%rdi), %al
1:
leaq -1(%rax), %rcx
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_dec_uchar_nv)
SET_SIZE(atomic_dec_8_nv)
ENTRY(atomic_dec_16_nv)
ALTENTRY(atomic_dec_ushort_nv)
movw (%rdi), %ax
1:
leaq -1(%rax), %rcx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_dec_ushort_nv)
SET_SIZE(atomic_dec_16_nv)
ENTRY(atomic_dec_32_nv)
ALTENTRY(atomic_dec_uint_nv)
movl (%rdi), %eax
1:
leaq -1(%rax), %rcx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_dec_uint_nv)
SET_SIZE(atomic_dec_32_nv)
ENTRY(atomic_dec_64_nv)
ALTENTRY(atomic_dec_ulong_nv)
movq (%rdi), %rax
1:
leaq -1(%rax), %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_dec_ulong_nv)
SET_SIZE(atomic_dec_64_nv)
ENTRY(atomic_add_8)
ALTENTRY(atomic_add_char)
lock
addb %sil, (%rdi)
ret
SET_SIZE(atomic_add_char)
SET_SIZE(atomic_add_8)
ENTRY(atomic_add_16)
ALTENTRY(atomic_add_short)
lock
addw %si, (%rdi)
ret
SET_SIZE(atomic_add_short)
SET_SIZE(atomic_add_16)
ENTRY(atomic_add_32)
ALTENTRY(atomic_add_int)
lock
addl %esi, (%rdi)
ret
SET_SIZE(atomic_add_int)
SET_SIZE(atomic_add_32)
ENTRY(atomic_add_64)
ALTENTRY(atomic_add_ptr)
ALTENTRY(atomic_add_long)
lock
addq %rsi, (%rdi)
ret
SET_SIZE(atomic_add_long)
SET_SIZE(atomic_add_ptr)
SET_SIZE(atomic_add_64)
ENTRY(atomic_sub_8)
ALTENTRY(atomic_sub_char)
lock
subb %sil, (%rdi)
ret
SET_SIZE(atomic_sub_char)
SET_SIZE(atomic_sub_8)
ENTRY(atomic_sub_16)
ALTENTRY(atomic_sub_short)
lock
subw %si, (%rdi)
ret
SET_SIZE(atomic_sub_short)
SET_SIZE(atomic_sub_16)
ENTRY(atomic_sub_32)
ALTENTRY(atomic_sub_int)
lock
subl %esi, (%rdi)
ret
SET_SIZE(atomic_sub_int)
SET_SIZE(atomic_sub_32)
ENTRY(atomic_sub_64)
ALTENTRY(atomic_sub_ptr)
ALTENTRY(atomic_sub_long)
lock
subq %rsi, (%rdi)
ret
SET_SIZE(atomic_sub_long)
SET_SIZE(atomic_sub_ptr)
SET_SIZE(atomic_sub_64)
ENTRY(atomic_or_8)
ALTENTRY(atomic_or_uchar)
lock
orb %sil, (%rdi)
ret
SET_SIZE(atomic_or_uchar)
SET_SIZE(atomic_or_8)
ENTRY(atomic_or_16)
ALTENTRY(atomic_or_ushort)
lock
orw %si, (%rdi)
ret
SET_SIZE(atomic_or_ushort)
SET_SIZE(atomic_or_16)
ENTRY(atomic_or_32)
ALTENTRY(atomic_or_uint)
lock
orl %esi, (%rdi)
ret
SET_SIZE(atomic_or_uint)
SET_SIZE(atomic_or_32)
ENTRY(atomic_or_64)
ALTENTRY(atomic_or_ulong)
lock
orq %rsi, (%rdi)
ret
SET_SIZE(atomic_or_ulong)
SET_SIZE(atomic_or_64)
ENTRY(atomic_and_8)
ALTENTRY(atomic_and_uchar)
lock
andb %sil, (%rdi)
ret
SET_SIZE(atomic_and_uchar)
SET_SIZE(atomic_and_8)
ENTRY(atomic_and_16)
ALTENTRY(atomic_and_ushort)
lock
andw %si, (%rdi)
ret
SET_SIZE(atomic_and_ushort)
SET_SIZE(atomic_and_16)
ENTRY(atomic_and_32)
ALTENTRY(atomic_and_uint)
lock
andl %esi, (%rdi)
ret
SET_SIZE(atomic_and_uint)
SET_SIZE(atomic_and_32)
ENTRY(atomic_and_64)
ALTENTRY(atomic_and_ulong)
lock
andq %rsi, (%rdi)
ret
SET_SIZE(atomic_and_ulong)
SET_SIZE(atomic_and_64)
ENTRY(atomic_add_8_nv)
ALTENTRY(atomic_add_char_nv)
movb (%rdi), %al
1:
movb %sil, %cl
addb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_add_char_nv)
SET_SIZE(atomic_add_8_nv)
ENTRY(atomic_add_16_nv)
ALTENTRY(atomic_add_short_nv)
movw (%rdi), %ax
1:
movw %si, %cx
addw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_add_short_nv)
SET_SIZE(atomic_add_16_nv)
ENTRY(atomic_add_32_nv)
ALTENTRY(atomic_add_int_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
addl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_add_int_nv)
SET_SIZE(atomic_add_32_nv)
ENTRY(atomic_add_64_nv)
ALTENTRY(atomic_add_ptr_nv)
ALTENTRY(atomic_add_long_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
addq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_add_long_nv)
SET_SIZE(atomic_add_ptr_nv)
SET_SIZE(atomic_add_64_nv)
ENTRY(atomic_sub_8_nv)
ALTENTRY(atomic_sub_char_nv)
movb (%rdi), %al
1:
movb %sil, %cl
subb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_sub_char_nv)
SET_SIZE(atomic_sub_8_nv)
ENTRY(atomic_sub_16_nv)
ALTENTRY(atomic_sub_short_nv)
movw (%rdi), %ax
1:
movw %si, %cx
subw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_sub_short_nv)
SET_SIZE(atomic_sub_16_nv)
ENTRY(atomic_sub_32_nv)
ALTENTRY(atomic_sub_int_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
subl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_sub_int_nv)
SET_SIZE(atomic_sub_32_nv)
ENTRY(atomic_sub_64_nv)
ALTENTRY(atomic_sub_ptr_nv)
ALTENTRY(atomic_sub_long_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
subq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_sub_long_nv)
SET_SIZE(atomic_sub_ptr_nv)
SET_SIZE(atomic_sub_64_nv)
ENTRY(atomic_and_8_nv)
ALTENTRY(atomic_and_uchar_nv)
movb (%rdi), %al
1:
movb %sil, %cl
andb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_and_16_nv)
ALTENTRY(atomic_and_ushort_nv)
movw (%rdi), %ax
1:
movw %si, %cx
andw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_and_ushort_nv)
SET_SIZE(atomic_and_16_nv)
ENTRY(atomic_and_32_nv)
ALTENTRY(atomic_and_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
andl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_and_uint_nv)
SET_SIZE(atomic_and_32_nv)
ENTRY(atomic_and_64_nv)
ALTENTRY(atomic_and_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
andq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_and_ulong_nv)
SET_SIZE(atomic_and_64_nv)
ENTRY(atomic_or_8_nv)
ALTENTRY(atomic_or_uchar_nv)
movb (%rdi), %al
1:
movb %sil, %cl
orb %al, %cl
lock
cmpxchgb %cl, (%rdi)
jne 1b
movzbl %cl, %eax
ret
SET_SIZE(atomic_and_uchar_nv)
SET_SIZE(atomic_and_8_nv)
ENTRY(atomic_or_16_nv)
ALTENTRY(atomic_or_ushort_nv)
movw (%rdi), %ax
1:
movw %si, %cx
orw %ax, %cx
lock
cmpxchgw %cx, (%rdi)
jne 1b
movzwl %cx, %eax
ret
SET_SIZE(atomic_or_ushort_nv)
SET_SIZE(atomic_or_16_nv)
ENTRY(atomic_or_32_nv)
ALTENTRY(atomic_or_uint_nv)
movl (%rdi), %eax
1:
movl %esi, %ecx
orl %eax, %ecx
lock
cmpxchgl %ecx, (%rdi)
jne 1b
movl %ecx, %eax
ret
SET_SIZE(atomic_or_uint_nv)
SET_SIZE(atomic_or_32_nv)
ENTRY(atomic_or_64_nv)
ALTENTRY(atomic_or_ulong_nv)
movq (%rdi), %rax
1:
movq %rsi, %rcx
orq %rax, %rcx
lock
cmpxchgq %rcx, (%rdi)
jne 1b
movq %rcx, %rax
ret
SET_SIZE(atomic_or_ulong_nv)
SET_SIZE(atomic_or_64_nv)
ENTRY(atomic_cas_8)
ALTENTRY(atomic_cas_uchar)
movzbl %sil, %eax
lock
cmpxchgb %dl, (%rdi)
ret
SET_SIZE(atomic_cas_uchar)
SET_SIZE(atomic_cas_8)
ENTRY(atomic_cas_16)
ALTENTRY(atomic_cas_ushort)
movzwl %si, %eax
lock
cmpxchgw %dx, (%rdi)
ret
SET_SIZE(atomic_cas_ushort)
SET_SIZE(atomic_cas_16)
ENTRY(atomic_cas_32)
ALTENTRY(atomic_cas_uint)
movl %esi, %eax
lock
cmpxchgl %edx, (%rdi)
ret
SET_SIZE(atomic_cas_uint)
SET_SIZE(atomic_cas_32)
ENTRY(atomic_cas_64)
ALTENTRY(atomic_cas_ulong)
ALTENTRY(atomic_cas_ptr)
movq %rsi, %rax
lock
cmpxchgq %rdx, (%rdi)
ret
SET_SIZE(atomic_cas_ptr)
SET_SIZE(atomic_cas_ulong)
SET_SIZE(atomic_cas_64)
ENTRY(atomic_swap_8)
ALTENTRY(atomic_swap_uchar)
movzbl %sil, %eax
lock
xchgb %al, (%rdi)
ret
SET_SIZE(atomic_swap_uchar)
SET_SIZE(atomic_swap_8)
ENTRY(atomic_swap_16)
ALTENTRY(atomic_swap_ushort)
movzwl %si, %eax
lock
xchgw %ax, (%rdi)
ret
SET_SIZE(atomic_swap_ushort)
SET_SIZE(atomic_swap_16)
ENTRY(atomic_swap_32)
ALTENTRY(atomic_swap_uint)
movl %esi, %eax
lock
xchgl %eax, (%rdi)
ret
SET_SIZE(atomic_swap_uint)
SET_SIZE(atomic_swap_32)
ENTRY(atomic_swap_64)
ALTENTRY(atomic_swap_ulong)
ALTENTRY(atomic_swap_ptr)
movq %rsi, %rax
lock
xchgq %rax, (%rdi)
ret
SET_SIZE(atomic_swap_ptr)
SET_SIZE(atomic_swap_ulong)
SET_SIZE(atomic_swap_64)
ENTRY(atomic_set_long_excl)
xorl %eax, %eax
lock
btsq %rsi, (%rdi)
jnc 1f
decl %eax
1:
ret
SET_SIZE(atomic_set_long_excl)
ENTRY(atomic_clear_long_excl)
xorl %eax, %eax
lock
btrq %rsi, (%rdi)
jc 1f
decl %eax
1:
ret
SET_SIZE(atomic_clear_long_excl)
/*
* NOTE: membar_enter, and membar_exit are identical routines.
* We define them separately, instead of using an ALTENTRY
* definitions to alias them together, so that DTrace and
* debuggers will see a unique address for them, allowing
* more accurate tracing.
*/
ENTRY(membar_enter)
mfence
ret
SET_SIZE(membar_enter)
ENTRY(membar_exit)
mfence
ret
SET_SIZE(membar_exit)
ENTRY(membar_producer)
sfence
ret
SET_SIZE(membar_producer)
ENTRY(membar_consumer)
lfence
ret
SET_SIZE(membar_consumer)
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif

View File

@ -25,16 +25,6 @@
*/
#include <atomic.h>
#include <assert.h>
#include <pthread.h>
/*
* All operations are implemented by serializing them through a global
* pthread mutex. This provides a correct generic implementation.
* However all supported architectures are encouraged to provide a
* native implementation is assembly for performance reasons.
*/
pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER;
/*
* These are the void returning variants
@ -43,9 +33,7 @@ pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER;
#define ATOMIC_INC(name, type) \
void atomic_inc_##name(volatile type *target) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
(*target)++; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
(void) __atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST); \
}
ATOMIC_INC(8, uint8_t)
@ -61,9 +49,7 @@ ATOMIC_INC(64, uint64_t)
#define ATOMIC_DEC(name, type) \
void atomic_dec_##name(volatile type *target) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
(*target)--; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
(void) __atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST); \
}
ATOMIC_DEC(8, uint8_t)
@ -79,9 +65,7 @@ ATOMIC_DEC(64, uint64_t)
#define ATOMIC_ADD(name, type1, type2) \
void atomic_add_##name(volatile type1 *target, type2 bits) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
*target += bits; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
(void) __atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
ATOMIC_ADD(8, uint8_t, int8_t)
@ -96,18 +80,14 @@ ATOMIC_ADD(64, uint64_t, int64_t)
void
atomic_add_ptr(volatile void *target, ssize_t bits)
{
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
*(caddr_t *)target += bits;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
(void) __atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
}
#define ATOMIC_SUB(name, type1, type2) \
void atomic_sub_##name(volatile type1 *target, type2 bits) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
*target -= bits; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
(void) __atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
ATOMIC_SUB(8, uint8_t, int8_t)
@ -122,18 +102,14 @@ ATOMIC_SUB(64, uint64_t, int64_t)
void
atomic_sub_ptr(volatile void *target, ssize_t bits)
{
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
*(caddr_t *)target -= bits;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
(void) __atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST);
}
#define ATOMIC_OR(name, type) \
void atomic_or_##name(volatile type *target, type bits) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
*target |= bits; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
(void) __atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
ATOMIC_OR(8, uint8_t)
@ -149,9 +125,7 @@ ATOMIC_OR(64, uint64_t)
#define ATOMIC_AND(name, type) \
void atomic_and_##name(volatile type *target, type bits) \
{ \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
*target &= bits; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
(void) __atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST); \
}
ATOMIC_AND(8, uint8_t)
@ -171,11 +145,7 @@ ATOMIC_AND(64, uint64_t)
#define ATOMIC_INC_NV(name, type) \
type atomic_inc_##name##_nv(volatile type *target) \
{ \
type rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (++(*target)); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (rc); \
return (__atomic_add_fetch(target, 1, __ATOMIC_SEQ_CST)); \
}
ATOMIC_INC_NV(8, uint8_t)
@ -191,11 +161,7 @@ ATOMIC_INC_NV(64, uint64_t)
#define ATOMIC_DEC_NV(name, type) \
type atomic_dec_##name##_nv(volatile type *target) \
{ \
type rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (--(*target)); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (rc); \
return (__atomic_sub_fetch(target, 1, __ATOMIC_SEQ_CST)); \
}
ATOMIC_DEC_NV(8, uint8_t)
@ -209,13 +175,9 @@ ATOMIC_DEC_NV(64, uint64_t)
#define ATOMIC_ADD_NV(name, type1, type2) \
type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits)\
type1 atomic_add_##name##_nv(volatile type1 *target, type2 bits) \
{ \
type1 rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (*target += bits); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (rc); \
return (__atomic_add_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
ATOMIC_ADD_NV(8, uint8_t, int8_t)
@ -230,24 +192,14 @@ ATOMIC_ADD_NV(64, uint64_t, int64_t)
void *
atomic_add_ptr_nv(volatile void *target, ssize_t bits)
{
void *ptr;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
ptr = (*(caddr_t *)target += bits);
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (ptr);
return (__atomic_add_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
}
#define ATOMIC_SUB_NV(name, type1, type2) \
type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits)\
type1 atomic_sub_##name##_nv(volatile type1 *target, type2 bits) \
{ \
type1 rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (*target -= bits); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (rc); \
return (__atomic_sub_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
ATOMIC_SUB_NV(8, uint8_t, int8_t)
@ -262,24 +214,14 @@ ATOMIC_SUB_NV(64, uint64_t, int64_t)
void *
atomic_sub_ptr_nv(volatile void *target, ssize_t bits)
{
void *ptr;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
ptr = (*(caddr_t *)target -= bits);
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (ptr);
return (__atomic_sub_fetch((void **)target, bits, __ATOMIC_SEQ_CST));
}
#define ATOMIC_OR_NV(name, type) \
type atomic_or_##name##_nv(volatile type *target, type bits) \
{ \
type rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (*target |= bits); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (rc); \
return (__atomic_or_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
ATOMIC_OR_NV(8, uint8_t)
@ -295,11 +237,7 @@ ATOMIC_OR_NV(64, uint64_t)
#define ATOMIC_AND_NV(name, type) \
type atomic_and_##name##_nv(volatile type *target, type bits) \
{ \
type rc; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
rc = (*target &= bits); \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (rc); \
return (__atomic_and_fetch(target, bits, __ATOMIC_SEQ_CST)); \
}
ATOMIC_AND_NV(8, uint8_t)
@ -313,19 +251,21 @@ ATOMIC_AND_NV(64, uint64_t)
/*
* If *arg1 == arg2, set *arg1 = arg3; return old value
* If *tgt == exp, set *tgt = des; return old value
*
* This may not look right on the first pass (or the sixteenth), but,
* from https://gcc.gnu.org/onlinedocs/gcc/_005f_005fatomic-Builtins.html:
* > If they are not equal, the operation is a read
* > and the current contents of *ptr are written into *expected.
* And, in the converse case, exp is already *target by definition.
*/
#define ATOMIC_CAS(name, type) \
type atomic_cas_##name(volatile type *target, type arg1, type arg2) \
type atomic_cas_##name(volatile type *target, type exp, type des) \
{ \
type old; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
old = *target; \
if (old == arg1) \
*target = arg2; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (old); \
__atomic_compare_exchange_n(target, &exp, des, B_FALSE, \
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
return (exp); \
}
ATOMIC_CAS(8, uint8_t)
@ -338,17 +278,12 @@ ATOMIC_CAS(ulong, ulong_t)
ATOMIC_CAS(64, uint64_t)
void *
atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
atomic_cas_ptr(volatile void *target, void *exp, void *des)
{
void *old;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
old = *(void **)target;
if (old == arg1)
*(void **)target = arg2;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (old);
__atomic_compare_exchange_n((void **)target, &exp, des, B_FALSE,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return (exp);
}
@ -359,12 +294,7 @@ atomic_cas_ptr(volatile void *target, void *arg1, void *arg2)
#define ATOMIC_SWAP(name, type) \
type atomic_swap_##name(volatile type *target, type bits) \
{ \
type old; \
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0); \
old = *target; \
*target = bits; \
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0); \
return (old); \
return (__atomic_exchange_n(target, bits, __ATOMIC_SEQ_CST)); \
}
ATOMIC_SWAP(8, uint8_t)
@ -380,14 +310,7 @@ ATOMIC_SWAP(64, uint64_t)
void *
atomic_swap_ptr(volatile void *target, void *bits)
{
void *old;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
old = *(void **)target;
*(void **)target = bits;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (old);
return (__atomic_exchange_n((void **)target, bits, __ATOMIC_SEQ_CST));
}
#ifndef _LP64
@ -407,57 +330,39 @@ atomic_store_64(volatile uint64_t *target, uint64_t bits)
int
atomic_set_long_excl(volatile ulong_t *target, uint_t value)
{
ulong_t bit;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
bit = (1UL << value);
if ((*target & bit) != 0) {
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (-1);
}
*target |= bit;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (0);
ulong_t bit = 1UL << value;
ulong_t old = __atomic_fetch_or(target, bit, __ATOMIC_SEQ_CST);
return ((old & bit) ? -1 : 0);
}
int
atomic_clear_long_excl(volatile ulong_t *target, uint_t value)
{
ulong_t bit;
VERIFY3S(pthread_mutex_lock(&atomic_lock), ==, 0);
bit = (1UL << value);
if ((*target & bit) == 0) {
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (-1);
}
*target &= ~bit;
VERIFY3S(pthread_mutex_unlock(&atomic_lock), ==, 0);
return (0);
ulong_t bit = 1UL << value;
ulong_t old = __atomic_fetch_and(target, ~bit, __ATOMIC_SEQ_CST);
return ((old & bit) ? 0 : -1);
}
void
membar_enter(void)
{
/* XXX - Implement me */
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
void
membar_exit(void)
{
/* XXX - Implement me */
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
void
membar_producer(void)
{
/* XXX - Implement me */
__atomic_thread_fence(__ATOMIC_RELEASE);
}
void
membar_consumer(void)
{
/* XXX - Implement me */
__atomic_thread_fence(__ATOMIC_ACQUIRE);
}