Convert all HAVE_<name> SIMD gates to HAVE_SIMD(<name>)

The original names no longer exist, and the new ones will need to be
selectable based on the current compilation target.

Sponsored-by: TrueNAS
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rob Norris <rob.norris@truenas.com>
Closes #18285
This commit is contained in:
Rob Norris 2026-03-04 22:28:21 +11:00 committed by Brian Behlendorf
parent 92a6ab405f
commit 35f74f84e6
33 changed files with 113 additions and 112 deletions

View File

@ -315,19 +315,19 @@ kfpu_begin(void)
* FPU state to be correctly preserved and restored.
*/
uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
#if defined(HAVE_XSAVES)
#if HAVE_SIMD(XSAVES)
if (static_cpu_has(X86_FEATURE_XSAVES)) {
kfpu_do_xsave("xsaves", state, ~XFEATURE_MASK_XTILE);
return;
}
#endif
#if defined(HAVE_XSAVEOPT)
#if HAVE_SIMD(XSAVEOPT)
if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
kfpu_do_xsave("xsaveopt", state, ~XFEATURE_MASK_XTILE);
return;
}
#endif
#if defined(HAVE_XSAVE)
#if HAVE_SIMD(XSAVE)
if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_do_xsave("xsave", state, ~XFEATURE_MASK_XTILE);
return;
@ -380,13 +380,13 @@ static inline void
kfpu_end(void)
{
uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
#if defined(HAVE_XSAVES)
#if HAVE_SIMD(XSAVES)
if (static_cpu_has(X86_FEATURE_XSAVES)) {
kfpu_do_xrstor("xrstors", state, ~XFEATURE_MASK_XTILE);
goto out;
}
#endif
#if defined(HAVE_XSAVE)
#if HAVE_SIMD(XSAVE)
if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_do_xrstor("xrstor", state, ~XFEATURE_MASK_XTILE);
goto out;

View File

@ -172,19 +172,19 @@ typedef struct reflow_node {
extern const raidz_impl_ops_t vdev_raidz_scalar_impl;
extern boolean_t raidz_will_scalar_work(void);
#if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(SSE2) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_sse2_impl;
#endif
#if defined(__x86_64) && defined(HAVE_SSSE3) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(SSSE3) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_ssse3_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX2) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(AVX2) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_avx2_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(AVX512F) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_avx512f_impl;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(AVX512BW) /* only x86_64 for now */
extern const raidz_impl_ops_t vdev_raidz_avx512bw_impl;
#endif
#if defined(__aarch64__)

View File

@ -97,13 +97,13 @@ typedef union fletcher_4_ctx {
zio_cksum_t scalar;
zfs_fletcher_superscalar_t superscalar[4];
#if defined(HAVE_SSE2) || (defined(HAVE_SSE2) && defined(HAVE_SSSE3))
#if HAVE_SIMD(SSE2) || (HAVE_SIMD(SSE2) && HAVE_SIMD(SSSE3))
zfs_fletcher_sse_t sse[4];
#endif
#if defined(HAVE_AVX) && defined(HAVE_AVX2)
#if HAVE_SIMD(AVX) && HAVE_SIMD(AVX2)
zfs_fletcher_avx_t avx[4];
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F)
#if defined(__x86_64) && HAVE_SIMD(AVX512F)
zfs_fletcher_avx512_t avx512[4];
#endif
#if defined(__aarch64__)
@ -134,23 +134,23 @@ typedef struct fletcher_4_func {
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_superscalar_ops;
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_superscalar4_ops;
#if defined(HAVE_SSE2)
#if HAVE_SIMD(SSE2)
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_sse2_ops;
#endif
#if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
#if HAVE_SIMD(SSE2) && HAVE_SIMD(SSSE3)
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_ssse3_ops;
#endif
#if defined(HAVE_AVX) && defined(HAVE_AVX2)
#if HAVE_SIMD(AVX) && HAVE_SIMD(AVX2)
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_avx2_ops;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F)
#if defined(__x86_64) && HAVE_SIMD(AVX512F)
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_avx512f_ops;
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW)
#if defined(__x86_64) && HAVE_SIMD(AVX512BW)
_ZFS_FLETCHER_H const fletcher_4_ops_t fletcher_4_avx512bw_ops;
#endif

View File

@ -231,7 +231,7 @@ static const aes_impl_ops_t *aes_all_impl[] = {
#if defined(__x86_64)
&aes_x86_64_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AES)
#if defined(__x86_64) && HAVE_SIMD(AES)
&aes_aesni_impl,
#endif
};
@ -315,7 +315,7 @@ aes_impl_init(void)
* hardware accelerated version is the fastest.
*/
#if defined(__x86_64)
#if defined(HAVE_AES)
#if HAVE_SIMD(AES)
if (aes_aesni_impl.is_supported()) {
memcpy(&aes_fastest_impl, &aes_aesni_impl,
sizeof (aes_fastest_impl));

View File

@ -23,7 +23,7 @@
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#if defined(__x86_64) && defined(HAVE_AES)
#if defined(__x86_64) && HAVE_SIMD(AES)
#include <sys/simd.h>
#include <sys/types.h>
@ -123,4 +123,4 @@ const aes_impl_ops_t aes_aesni_impl = {
.name = "aesni"
};
#endif /* defined(__x86_64) && defined(HAVE_AES) */
#endif /* defined(__x86_64) && HAVE_SIMD(AES) */

View File

@ -32,7 +32,7 @@
#include "blake3_impl.h"
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__x86_64) && HAVE_SIMD(SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
extern void ASMABI zfs_blake3_compress_in_place_sse2(uint32_t cv[8],
@ -98,7 +98,7 @@ const blake3_ops_t blake3_sse2_impl = {
#endif
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__x86_64) && HAVE_SIMD(SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
extern void ASMABI zfs_blake3_compress_in_place_sse41(uint32_t cv[8],
@ -163,7 +163,7 @@ const blake3_ops_t blake3_sse41_impl = {
};
#endif
#if defined(__x86_64) && defined(HAVE_SSE4_1) && defined(HAVE_AVX2)
#if defined(__x86_64) && HAVE_SIMD(SSE4_1) && HAVE_SIMD(AVX2)
extern void ASMABI zfs_blake3_hash_many_avx2(const uint8_t * const *inputs,
size_t num_inputs, size_t blocks, const uint32_t key[8],
uint64_t counter, boolean_t increment_counter, uint8_t flags,
@ -196,7 +196,7 @@ blake3_avx2_impl = {
};
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) && defined(HAVE_AVX512VL)
#if defined(__x86_64) && HAVE_SIMD(AVX512F) && HAVE_SIMD(AVX512VL)
extern void ASMABI zfs_blake3_compress_in_place_avx512(uint32_t cv[8],
const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len,
uint64_t counter, uint8_t flags);
@ -259,19 +259,19 @@ extern const blake3_ops_t blake3_generic_impl;
static const blake3_ops_t *const blake3_impls[] = {
&blake3_generic_impl,
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE2)) || \
(defined(__x86_64) && HAVE_SIMD(SSE2)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
&blake3_sse2_impl,
#endif
#if defined(__aarch64__) || \
(defined(__x86_64) && defined(HAVE_SSE4_1)) || \
(defined(__x86_64) && HAVE_SIMD(SSE4_1)) || \
(defined(__PPC64__) && defined(__LITTLE_ENDIAN__))
&blake3_sse41_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSE4_1) && defined(HAVE_AVX2)
#if defined(__x86_64) && HAVE_SIMD(SSE4_1) && HAVE_SIMD(AVX2)
&blake3_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) && defined(HAVE_AVX512VL)
#if defined(__x86_64) && HAVE_SIMD(AVX512F) && HAVE_SIMD(AVX512VL)
&blake3_avx512_impl,
#endif
};

View File

@ -714,7 +714,7 @@ static gcm_impl_ops_t gcm_fastest_impl = {
/* All compiled in implementations */
static const gcm_impl_ops_t *gcm_all_impl[] = {
&gcm_generic_impl,
#if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
#if defined(__x86_64) && HAVE_SIMD(PCLMULQDQ)
&gcm_pclmulqdq_impl,
#endif
};
@ -801,7 +801,7 @@ gcm_impl_init(void)
* Set the fastest implementation given the assumption that the
* hardware accelerated version is the fastest.
*/
#if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
#if defined(__x86_64) && HAVE_SIMD(PCLMULQDQ)
if (gcm_pclmulqdq_impl.is_supported()) {
memcpy(&gcm_fastest_impl, &gcm_pclmulqdq_impl,
sizeof (gcm_fastest_impl));
@ -827,7 +827,7 @@ gcm_impl_init(void)
} else
#endif
if (gcm_avx_will_work()) {
#ifdef HAVE_MOVBE
#if HAVE_SIMD(MOVBE)
if (zfs_movbe_available() == B_TRUE) {
atomic_swap_32(&gcm_avx_can_use_movbe, B_TRUE);
}

View File

@ -23,7 +23,7 @@
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*/
#if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
#if defined(__x86_64) && HAVE_SIMD(PCLMULQDQ)
#include <sys/types.h>
#include <sys/simd.h>
@ -63,4 +63,4 @@ const gcm_impl_ops_t gcm_pclmulqdq_impl = {
.name = "pclmulqdq"
};
#endif /* defined(__x86_64) && defined(HAVE_PCLMULQDQ) */
#endif /* defined(__x86_64) && HAVE_SIMD(PCLMULQDQ) */

View File

@ -65,7 +65,7 @@ const sha256_ops_t sha256_x64_impl = {
.name = "x64"
};
#if defined(HAVE_SSSE3)
#if HAVE_SIMD(SSSE3)
static boolean_t sha2_have_ssse3(void)
{
return (kfpu_allowed() && zfs_ssse3_available());
@ -79,7 +79,7 @@ const sha256_ops_t sha256_ssse3_impl = {
};
#endif
#if defined(HAVE_AVX)
#if HAVE_SIMD(AVX)
static boolean_t sha2_have_avx(void)
{
return (kfpu_allowed() && zfs_avx_available());
@ -93,7 +93,7 @@ const sha256_ops_t sha256_avx_impl = {
};
#endif
#if defined(HAVE_AVX2)
#if HAVE_SIMD(AVX2)
static boolean_t sha2_have_avx2(void)
{
return (kfpu_allowed() && zfs_avx2_available());
@ -107,7 +107,7 @@ const sha256_ops_t sha256_avx2_impl = {
};
#endif
#if defined(HAVE_SSE4_1)
#if HAVE_SIMD(SSE4_1)
static boolean_t sha2_have_shani(void)
{
return (kfpu_allowed() && zfs_sse4_1_available() && \
@ -186,16 +186,16 @@ static const sha256_ops_t *const sha256_impls[] = {
#if defined(__x86_64)
&sha256_x64_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSSE3)
#if defined(__x86_64) && HAVE_SIMD(SSSE3)
&sha256_ssse3_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX)
#if defined(__x86_64) && HAVE_SIMD(AVX)
&sha256_avx_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX2)
#if defined(__x86_64) && HAVE_SIMD(AVX2)
&sha256_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSE4_1)
#if defined(__x86_64) && HAVE_SIMD(SSE4_1)
&sha256_shani_impl,
#endif
#if defined(__aarch64__) || defined(__arm__)

View File

@ -65,7 +65,7 @@ const sha512_ops_t sha512_x64_impl = {
.name = "x64"
};
#if defined(HAVE_AVX)
#if HAVE_SIMD(AVX)
static boolean_t sha2_have_avx(void)
{
return (kfpu_allowed() && zfs_avx_available());
@ -79,7 +79,7 @@ const sha512_ops_t sha512_avx_impl = {
};
#endif
#if defined(HAVE_AVX2)
#if HAVE_SIMD(AVX2)
static boolean_t sha2_have_avx2(void)
{
return (kfpu_allowed() && zfs_avx2_available());
@ -93,7 +93,7 @@ const sha512_ops_t sha512_avx2_impl = {
};
#endif
#if defined(HAVE_SHA512EXT)
#if HAVE_SIMD(SHA512EXT)
static boolean_t sha2_have_sha512ext(void)
{
return (kfpu_allowed() && zfs_sha512ext_available());
@ -173,13 +173,13 @@ static const sha512_ops_t *const sha512_impls[] = {
#if defined(__x86_64)
&sha512_x64_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX)
#if defined(__x86_64) && HAVE_SIMD(AVX)
&sha512_avx_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX2)
#if defined(__x86_64) && HAVE_SIMD(AVX2)
&sha512_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SHA512EXT)
#if defined(__x86_64) && HAVE_SIMD(SHA512EXT)
&sha512_sha512ext_impl,
#endif
#if defined(__aarch64__) || defined(__arm__)

View File

@ -179,7 +179,7 @@ rijndael_key_setup_dec_intel(uint32_t rk[], const uint32_t cipherKey[],
}
#elif defined(HAVE_AES) /* guard by instruction set */
#elif HAVE_SIMD(AES) /* guard by instruction set */
#define _ASM
#include <sys/asm_linkage.h>

View File

@ -26,7 +26,7 @@
* Copyright (c) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#if defined(HAVE_AVX2)
#if HAVE_SIMD(AVX2)
#define _ASM
#include <sys/asm_linkage.h>
@ -1822,7 +1822,7 @@ CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
BLAKE3_IV:
.long 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A
#endif /* HAVE_AVX2 */
#endif /* HAVE_SIMD(AVX2) */
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits

View File

@ -26,7 +26,7 @@
* Copyright (c) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#if defined(HAVE_AVX512F) && defined(HAVE_AVX512VL)
#if HAVE_SIMD(AVX512F) && HAVE_SIMD(AVX512VL)
#define _ASM
#include <sys/asm_linkage.h>
@ -2588,7 +2588,7 @@ BLAKE3_IV_2:
BLAKE3_IV_3:
.long 0xA54FF53A
#endif /* HAVE_AVX512 */
#endif /* HAVE_SIMD(AVX512) */
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits

View File

@ -26,7 +26,7 @@
* Copyright (c) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#if defined(HAVE_SSE2)
#if HAVE_SIMD(SSE2)
#define _ASM
#include <sys/asm_linkage.h>
@ -2293,7 +2293,7 @@ PBLENDW_0x3F_MASK:
PBLENDW_0xC0_MASK:
.long 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF
#endif /* HAVE_SSE2 */
#endif /* HAVE_SIMD(SSE2) */
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits

View File

@ -26,7 +26,7 @@
* Copyright (c) 2022 Tino Reichardt <milky-zfs@mcmilk.de>
*/
#if defined(HAVE_SSE4_1)
#if HAVE_SIMD(SSE4_1)
#define _ASM
#include <sys/asm_linkage.h>
@ -2031,7 +2031,7 @@ BLAKE3_BLOCK_LEN:
CMP_MSB_MASK:
.long 0x80000000, 0x80000000, 0x80000000, 0x80000000
#endif /* HAVE_SSE4_1 */
#endif /* HAVE_SIMD(SSE4_1) */
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits

View File

@ -5,8 +5,8 @@
// perlasm source: https://github.com/google/boringssl/blob/d5440dd2c2c500ac2d3bba4afec47a054b4d99ae/crypto/fipsmodule/aes/asm/aes-gcm-avx2-x86_64.pl
// generated source: https://github.com/google/boringssl/blob/d5440dd2c2c500ac2d3bba4afec47a054b4d99ae/gen/bcm/aes-gcm-avx2-x86_64-linux.S
#if defined(__x86_64__) && defined(HAVE_AVX2) && \
defined(HAVE_VAES) && defined(HAVE_VPCLMULQDQ)
#if defined(__x86_64__) && HAVE_SIMD(AVX2) && \
HAVE_SIMD(VAES) && HAVE_SIMD(VPCLMULQDQ)
#define _ASM
#include <sys/asm_linkage.h>
@ -1288,4 +1288,4 @@ SET_SIZE(aes_gcm_dec_update_vaes_avx2)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* defined(__x86_64__) && defined(HAVE_AVX2) && defined(HAVE_VAES) ... */
#endif /* defined(__x86_64__) && HAVE_SIMD(AVX2) && HAVE_SIMD(VAES) ... */

View File

@ -45,8 +45,8 @@
# and modified for ICP. Modification are kept at a bare minimum to ease later
# upstream merges.
#if defined(__x86_64__) && defined(HAVE_AVX) && \
defined(HAVE_AES) && defined(HAVE_PCLMULQDQ)
#if defined(__x86_64__) && HAVE_SIMD(AVX) && \
HAVE_SIMD(AES) && HAVE_SIMD(PCLMULQDQ)
#define _ASM
#include <sys/asm_linkage.h>
@ -64,7 +64,7 @@
.text
#ifdef HAVE_MOVBE
#if HAVE_SIMD(MOVBE)
.balign 32
FUNCTION(_aesni_ctr32_ghash_6x)
.cfi_startproc
@ -380,7 +380,7 @@ FUNCTION(_aesni_ctr32_ghash_6x)
.cfi_endproc
SET_SIZE(_aesni_ctr32_ghash_6x)
STACK_FRAME_NON_STANDARD _aesni_ctr32_ghash_6x
#endif /* ifdef HAVE_MOVBE */
#endif /* HAVE_SIMD(MOVBE) */
.balign 32
FUNCTION(_aesni_ctr32_ghash_no_movbe_6x)
@ -781,7 +781,7 @@ ENTRY_ALIGN(aesni_gcm_decrypt, 32)
vmovdqu %xmm2,96(%rsp)
vmovdqu %xmm3,112(%rsp)
#ifdef HAVE_MOVBE
#if HAVE_SIMD(MOVBE)
#ifdef _KERNEL
testl $1,gcm_avx_can_use_movbe(%rip)
#else
@ -992,7 +992,7 @@ ENTRY_ALIGN(aesni_gcm_encrypt, 32)
movq $192,%r10
vpshufb %xmm0,%xmm8,%xmm8
#ifdef HAVE_MOVBE
#if HAVE_SIMD(MOVBE)
#ifdef _KERNEL
testl $1,gcm_avx_can_use_movbe(%rip)
#else
@ -1266,7 +1266,7 @@ SECTION_STATIC
#if defined(__linux__) && defined(HAVE_STACK_FRAME_NON_STANDARD) && \
! defined(HAVE_STACK_FRAME_NON_STANDARD_ASM)
.section .discard.func_stack_frame_non_standard, "aw"
#ifdef HAVE_MOVBE
#if HAVE_SIMD(MOVBE)
.long _aesni_ctr32_ghash_6x - .
#endif
.long _aesni_ctr32_ghash_no_movbe_6x - .
@ -1279,4 +1279,4 @@ SECTION_STATIC
.section .note.GNU-stack,"",%progbits
#endif
#endif /* defined(__x86_64__) && defined(HAVE_AVX) && defined(HAVE_AES) ... */
#endif /* defined(__x86_64__) && HAVE_SIMD(AVX) && HAVE_SIMD(AES) ... */

View File

@ -91,7 +91,7 @@ gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) {
(void) x_in, (void) y, (void) res;
}
#elif defined(HAVE_PCLMULQDQ) /* guard by instruction set */
#elif HAVE_SIMD(PCLMULQDQ) /* guard by instruction set */
#define _ASM
#include <sys/asm_linkage.h>

View File

@ -95,8 +95,8 @@
# and modified for ICP. Modification are kept at a bare minimum to ease later
# upstream merges.
#if defined(__x86_64__) && defined(HAVE_AVX) && \
defined(HAVE_AES) && defined(HAVE_PCLMULQDQ)
#if defined(__x86_64__) && HAVE_SIMD(AVX) && \
HAVE_SIMD(AES) && HAVE_SIMD(PCLMULQDQ)
#define _ASM
#include <sys/asm_linkage.h>
@ -654,4 +654,4 @@ SET_OBJ(.Lrem_8bit)
.section .note.GNU-stack,"",%progbits
#endif
#endif /* defined(__x86_64__) && defined(HAVE_AVX) && defined(HAVE_AES) ... */
#endif /* defined(__x86_64__) && HAVE_SIMD(AVX) && HAVE_SIMD(AES) ... */

View File

@ -4054,7 +4054,7 @@ ENTRY_ALIGN(zfs_sha512_transform_avx2, 64)
SET_SIZE(zfs_sha512_transform_avx2)
STACK_FRAME_NON_STANDARD zfs_sha512_transform_avx2
#ifdef HAVE_SHA512EXT
#if HAVE_SIMD(SHA512EXT)
ENTRY_ALIGN(zfs_sha512_transform_sha512ext, 64)
.cfi_startproc
ENDBR
@ -4324,7 +4324,7 @@ ENTRY_ALIGN(zfs_sha512_transform_sha512ext, 64)
.cfi_endproc
SET_SIZE(zfs_sha512_transform_sha512ext)
STACK_FRAME_NON_STANDARD zfs_sha512_transform_sha512ext
#endif /* HAVE_SHA512EXT */
#endif /* HAVE_SIMD(SHA512EXT) */
/* Workaround for missing asm macro in RHEL 8. */
#if defined(__linux__) && defined(HAVE_STACK_FRAME_NON_STANDARD) && \
@ -4333,7 +4333,7 @@ STACK_FRAME_NON_STANDARD zfs_sha512_transform_sha512ext
.long zfs_sha512_transform_x64 - .
.long zfs_sha512_transform_avx - .
.long zfs_sha512_transform_avx2 - .
#ifdef HAVE_SHA512EXT
#if HAVE_SIMD(SHA512EXT)
.long zfs_sha512_transform_sha512ext - .
#endif
#endif

View File

@ -196,7 +196,7 @@ extern ASMABI void aes_encrypt_amd64(const uint32_t rk[], int Nr,
extern ASMABI void aes_decrypt_amd64(const uint32_t rk[], int Nr,
const uint32_t ct[4], uint32_t pt[4]);
#endif
#if defined(__x86_64) && defined(HAVE_AES)
#if defined(__x86_64) && HAVE_SIMD(AES)
extern const aes_impl_ops_t aes_aesni_impl;
#endif

View File

@ -55,7 +55,7 @@ typedef struct gcm_impl_ops {
} gcm_impl_ops_t;
extern const gcm_impl_ops_t gcm_generic_impl;
#if defined(__x86_64) && defined(HAVE_PCLMULQDQ)
#if defined(__x86_64) && HAVE_SIMD(PCLMULQDQ)
extern const gcm_impl_ops_t gcm_pclmulqdq_impl;
#endif

View File

@ -40,9 +40,9 @@ extern "C" {
* routines. AVX support should imply AES-NI and PCLMULQDQ, but make sure
* anyhow.
*/
#if defined(__x86_64__) && defined(HAVE_AVX) && \
defined(HAVE_AES) && defined(HAVE_PCLMULQDQ)
#define CAN_USE_GCM_ASM (HAVE_VAES && HAVE_VPCLMULQDQ ? 2 : 1)
#if defined(__x86_64__) && HAVE_SIMD(AVX) && \
HAVE_SIMD(AES) && HAVE_SIMD(PCLMULQDQ)
#define CAN_USE_GCM_ASM (HAVE_SIMD(VAES) && HAVE_SIMD(VPCLMULQDQ) ? 2 : 1)
extern boolean_t gcm_avx_can_use_movbe;
#endif

View File

@ -174,19 +174,19 @@ static const fletcher_4_ops_t *fletcher_4_impls[] = {
&fletcher_4_scalar_ops,
&fletcher_4_superscalar_ops,
&fletcher_4_superscalar4_ops,
#if defined(HAVE_SSE2)
#if HAVE_SIMD(SSE2)
&fletcher_4_sse2_ops,
#endif
#if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
#if HAVE_SIMD(SSE2) && HAVE_SIMD(SSSE3)
&fletcher_4_ssse3_ops,
#endif
#if defined(HAVE_AVX) && defined(HAVE_AVX2)
#if HAVE_SIMD(AVX) && HAVE_SIMD(AVX2)
&fletcher_4_avx2_ops,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F)
#if defined(__x86_64) && HAVE_SIMD(AVX512F)
&fletcher_4_avx512f_ops,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW)
#if defined(__x86_64) && HAVE_SIMD(AVX512BW)
&fletcher_4_avx512bw_ops,
#endif
#if defined(__aarch64__) && !defined(__FreeBSD__)

View File

@ -23,7 +23,7 @@
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#if defined(__x86_64) && defined(HAVE_AVX512F)
#if defined(__x86_64) && HAVE_SIMD(AVX512F)
#include <sys/byteorder.h>
#include <sys/frame.h>
@ -167,7 +167,7 @@ const fletcher_4_ops_t fletcher_4_avx512f_ops = {
.name = "avx512f"
};
#if defined(HAVE_AVX512BW)
#if HAVE_SIMD(AVX512BW)
static void
fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
uint64_t size)
@ -219,4 +219,4 @@ const fletcher_4_ops_t fletcher_4_avx512bw_ops = {
};
#endif
#endif /* defined(__x86_64) && defined(HAVE_AVX512F) */
#endif /* defined(__x86_64) && HAVE_SIMD(AVX512F) */

View File

@ -41,7 +41,7 @@
* SOFTWARE.
*/
#if defined(HAVE_AVX) && defined(HAVE_AVX2)
#if HAVE_SIMD(AVX) && HAVE_SIMD(AVX2)
#include <sys/spa_checksum.h>
#include <sys/string.h>
@ -164,4 +164,4 @@ const fletcher_4_ops_t fletcher_4_avx2_ops = {
.name = "avx2"
};
#endif /* defined(HAVE_AVX) && defined(HAVE_AVX2) */
#endif /* HAVE_SIMD(AVX) && HAVE_SIMD(AVX2) */

View File

@ -42,7 +42,7 @@
* SOFTWARE.
*/
#if defined(HAVE_SSE2)
#if HAVE_SIMD(SSE2)
#include <sys/simd.h>
#include <sys/spa_checksum.h>
@ -165,9 +165,9 @@ const fletcher_4_ops_t fletcher_4_sse2_ops = {
.name = "sse2"
};
#endif /* defined(HAVE_SSE2) */
#endif /* HAVE_SIMD(SSE2) */
#if defined(HAVE_SSE2) && defined(HAVE_SSSE3)
#if HAVE_SIMD(SSE2) && HAVE_SIMD(SSSE3)
static void
fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
{
@ -220,4 +220,4 @@ const fletcher_4_ops_t fletcher_4_ssse3_ops = {
.name = "ssse3"
};
#endif /* defined(HAVE_SSE2) && defined(HAVE_SSSE3) */
#endif /* HAVE_SIMD(SSE2) && HAVE_SIMD(SSSE3) */

View File

@ -47,19 +47,19 @@ static raidz_impl_ops_t vdev_raidz_fastest_impl = {
static const raidz_impl_ops_t *const raidz_all_maths[] = {
&vdev_raidz_original_impl,
&vdev_raidz_scalar_impl,
#if defined(__x86_64) && defined(HAVE_SSE2) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(SSE2) /* only x86_64 for now */
&vdev_raidz_sse2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_SSSE3) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(SSSE3) /* only x86_64 for now */
&vdev_raidz_ssse3_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX2) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(AVX2) /* only x86_64 for now */
&vdev_raidz_avx2_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512F) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(AVX512F) /* only x86_64 for now */
&vdev_raidz_avx512f_impl,
#endif
#if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */
#if defined(__x86_64) && HAVE_SIMD(AVX512BW) /* only x86_64 for now */
&vdev_raidz_avx512bw_impl,
#endif
#if defined(__aarch64__) && !defined(__FreeBSD__)

View File

@ -22,9 +22,10 @@
/*
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/isa_defs.h>
#if defined(__x86_64) && defined(HAVE_AVX2)
#if defined(__x86_64) && HAVE_SIMD(AVX2)
#include <sys/types.h>
#include <sys/simd.h>
@ -411,4 +412,4 @@ const raidz_impl_ops_t vdev_raidz_avx2_impl = {
.name = "avx2"
};
#endif /* defined(__x86_64) && defined(HAVE_AVX2) */
#endif /* defined(__x86_64) && HAVE_SIMD(AVX2) */

View File

@ -26,7 +26,7 @@
#include <sys/isa_defs.h>
#if defined(__x86_64) && defined(HAVE_AVX512BW)
#if defined(__x86_64) && HAVE_SIMD(AVX512BW)
#include <sys/param.h>
#include <sys/types.h>
@ -411,4 +411,4 @@ const raidz_impl_ops_t vdev_raidz_avx512bw_impl = {
.name = "avx512bw"
};
#endif /* defined(__x86_64) && defined(HAVE_AVX512BW) */
#endif /* defined(__x86_64) && HAVE_SIMD(AVX512BW) */

View File

@ -26,7 +26,7 @@
#include <sys/isa_defs.h>
#if defined(__x86_64) && defined(HAVE_AVX512F)
#if defined(__x86_64) && HAVE_SIMD(AVX512F)
#include <sys/types.h>
#include <sys/simd.h>
@ -492,4 +492,4 @@ const raidz_impl_ops_t vdev_raidz_avx512f_impl = {
.name = "avx512f"
};
#endif /* defined(__x86_64) && defined(HAVE_AVX512F) */
#endif /* defined(__x86_64) && HAVE_SIMD(AVX512F) */

View File

@ -25,7 +25,7 @@
#include <sys/isa_defs.h>
#if defined(__x86_64) && defined(HAVE_SSE2)
#if defined(__x86_64) && HAVE_SIMD(SSE2)
#include <sys/types.h>
#include <sys/simd.h>
@ -629,4 +629,4 @@ const raidz_impl_ops_t vdev_raidz_sse2_impl = {
.name = "sse2"
};
#endif /* defined(__x86_64) && defined(HAVE_SSE2) */
#endif /* defined(__x86_64) && HAVE_SIMD(SSE2) */

View File

@ -25,7 +25,7 @@
#include <sys/isa_defs.h>
#if defined(__x86_64) && defined(HAVE_SSSE3)
#if defined(__x86_64) && HAVE_SIMD(SSSE3)
#include <sys/types.h>
#include <sys/simd.h>
@ -415,11 +415,11 @@ const raidz_impl_ops_t vdev_raidz_ssse3_impl = {
.name = "ssse3"
};
#endif /* defined(__x86_64) && defined(HAVE_SSSE3) */
#endif /* defined(__x86_64) && HAVE_SIMD(SSSE3) */
#if defined(__x86_64)
#if defined(HAVE_SSSE3) || defined(HAVE_AVX2) || defined(HAVE_AVX512BW)
#if HAVE_SIMD(SSSE3) || HAVE_SIMD(AVX2) || HAVE_SIMD(AVX512BW)
/* BEGIN CSTYLED */
const uint8_t
__attribute__((aligned(256))) gf_clmul_mod_lt[4*256][16] =
@ -2474,5 +2474,5 @@ __attribute__((aligned(256))) gf_clmul_mod_lt[4*256][16] =
0xf8, 0x07, 0x06, 0xf9, 0x04, 0xfb, 0xfa, 0x05 }
};
/* END CSTYLED */
#endif /* defined(HAVE_SSSE3) || defined(HAVE_AVX2) || defined(HAVE_AVX512BW) */
#endif /* HAVE_SIMD(SSSE3) || HAVE_SIMD(AVX2) || HAVE_SIMD(AVX512BW) */
#endif /* defined(__x86_64) */