mirror of
https://git.proxmox.com/git/mirror_zfs.git
synced 2024-12-26 19:19:32 +03:00
Micro-optimize fletcher4 calculations
When processing abds, we execute 1 `kfpu_begin()`/`kfpu_end()` pair on every page in the abd. This is wasteful and slows down checksum performance versus what the benchmark claimed. We correct this by moving those calls to the init and fini functions. Also, we always check the buffer length against 0 before calling the non-scalar checksum functions. This means that we do not need to execute the loop condition for the first loop iteration. That allows us to micro-optimize the checksum calculations by switching to do-while loops. Note that we do not apply that micro-optimization to the scalar implementation because there is no check in `fletcher_4_incremental_native()`/`fletcher_4_incremental_byteswap()` against 0 sized buffers being passed. Reviewed-by: Alexander Motin <mav@FreeBSD.org> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Richard Yao <richard.yao@alumni.stonybrook.edu> Closes #14247
This commit is contained in:
parent
7b9a423076
commit
59493b63c1
@ -52,6 +52,7 @@ ZFS_NO_SANITIZE_UNDEFINED
|
|||||||
static void
|
static void
|
||||||
fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx)
|
fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx)
|
||||||
{
|
{
|
||||||
|
kfpu_begin();
|
||||||
memset(ctx->aarch64_neon, 0, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
|
memset(ctx->aarch64_neon, 0, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -69,6 +70,7 @@ fletcher_4_aarch64_neon_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
|
|||||||
8 * ctx->aarch64_neon[3].v[1] - 8 * ctx->aarch64_neon[2].v[1] +
|
8 * ctx->aarch64_neon[3].v[1] - 8 * ctx->aarch64_neon[2].v[1] +
|
||||||
ctx->aarch64_neon[1].v[1];
|
ctx->aarch64_neon[1].v[1];
|
||||||
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
||||||
|
kfpu_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NEON_INIT_LOOP() \
|
#define NEON_INIT_LOOP() \
|
||||||
@ -146,17 +148,13 @@ unsigned char TMP2 __attribute__((vector_size(16)));
|
|||||||
unsigned char SRC __attribute__((vector_size(16)));
|
unsigned char SRC __attribute__((vector_size(16)));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
NEON_INIT_LOOP();
|
NEON_INIT_LOOP();
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
NEON_MAIN_LOOP(NEON_DONT_REVERSE);
|
NEON_MAIN_LOOP(NEON_DONT_REVERSE);
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
NEON_FINI_LOOP();
|
NEON_FINI_LOOP();
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -185,17 +183,13 @@ unsigned char TMP2 __attribute__((vector_size(16)));
|
|||||||
unsigned char SRC __attribute__((vector_size(16)));
|
unsigned char SRC __attribute__((vector_size(16)));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
NEON_INIT_LOOP();
|
NEON_INIT_LOOP();
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
NEON_MAIN_LOOP(NEON_DO_REVERSE);
|
NEON_MAIN_LOOP(NEON_DO_REVERSE);
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
NEON_FINI_LOOP();
|
NEON_FINI_LOOP();
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean_t fletcher_4_aarch64_neon_valid(void)
|
static boolean_t fletcher_4_aarch64_neon_valid(void)
|
||||||
|
@ -39,6 +39,7 @@ ZFS_NO_SANITIZE_UNDEFINED
|
|||||||
static void
|
static void
|
||||||
fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
|
fletcher_4_avx512f_init(fletcher_4_ctx_t *ctx)
|
||||||
{
|
{
|
||||||
|
kfpu_begin();
|
||||||
memset(ctx->avx512, 0, 4 * sizeof (zfs_fletcher_avx512_t));
|
memset(ctx->avx512, 0, 4 * sizeof (zfs_fletcher_avx512_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,6 +73,7 @@ fletcher_4_avx512f_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
||||||
|
kfpu_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FLETCHER_4_AVX512_RESTORE_CTX(ctx) \
|
#define FLETCHER_4_AVX512_RESTORE_CTX(ctx) \
|
||||||
@ -96,21 +98,17 @@ fletcher_4_avx512f_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
const uint32_t *ip = buf;
|
const uint32_t *ip = buf;
|
||||||
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_AVX512_RESTORE_CTX(ctx);
|
FLETCHER_4_AVX512_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
for (; ip < ipend; ip += 8) {
|
do {
|
||||||
__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
|
__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
|
||||||
__asm("vpaddq %zmm4, %zmm0, %zmm0");
|
__asm("vpaddq %zmm4, %zmm0, %zmm0");
|
||||||
__asm("vpaddq %zmm0, %zmm1, %zmm1");
|
__asm("vpaddq %zmm0, %zmm1, %zmm1");
|
||||||
__asm("vpaddq %zmm1, %zmm2, %zmm2");
|
__asm("vpaddq %zmm1, %zmm2, %zmm2");
|
||||||
__asm("vpaddq %zmm2, %zmm3, %zmm3");
|
__asm("vpaddq %zmm2, %zmm3, %zmm3");
|
||||||
}
|
} while ((ip += 8) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_AVX512_SAVE_CTX(ctx);
|
FLETCHER_4_AVX512_SAVE_CTX(ctx);
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_native);
|
STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_native);
|
||||||
|
|
||||||
@ -122,8 +120,6 @@ fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
|
|||||||
const uint32_t *ip = buf;
|
const uint32_t *ip = buf;
|
||||||
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_AVX512_RESTORE_CTX(ctx);
|
FLETCHER_4_AVX512_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
__asm("vpbroadcastq %0, %%zmm8" :: "r" (byteswap_mask));
|
__asm("vpbroadcastq %0, %%zmm8" :: "r" (byteswap_mask));
|
||||||
@ -131,7 +127,7 @@ fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
|
|||||||
__asm("vpsllq $16, %zmm8, %zmm10");
|
__asm("vpsllq $16, %zmm8, %zmm10");
|
||||||
__asm("vpsllq $24, %zmm8, %zmm11");
|
__asm("vpsllq $24, %zmm8, %zmm11");
|
||||||
|
|
||||||
for (; ip < ipend; ip += 8) {
|
do {
|
||||||
__asm("vpmovzxdq %0, %%zmm5"::"m" (*ip));
|
__asm("vpmovzxdq %0, %%zmm5"::"m" (*ip));
|
||||||
|
|
||||||
__asm("vpsrlq $24, %zmm5, %zmm6");
|
__asm("vpsrlq $24, %zmm5, %zmm6");
|
||||||
@ -150,11 +146,9 @@ fletcher_4_avx512f_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
|
|||||||
__asm("vpaddq %zmm0, %zmm1, %zmm1");
|
__asm("vpaddq %zmm0, %zmm1, %zmm1");
|
||||||
__asm("vpaddq %zmm1, %zmm2, %zmm2");
|
__asm("vpaddq %zmm1, %zmm2, %zmm2");
|
||||||
__asm("vpaddq %zmm2, %zmm3, %zmm3");
|
__asm("vpaddq %zmm2, %zmm3, %zmm3");
|
||||||
}
|
} while ((ip += 8) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_AVX512_SAVE_CTX(ctx)
|
FLETCHER_4_AVX512_SAVE_CTX(ctx)
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_byteswap);
|
STACK_FRAME_NON_STANDARD(fletcher_4_avx512f_byteswap);
|
||||||
|
|
||||||
@ -189,13 +183,11 @@ fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
|
|||||||
const uint32_t *ip = buf;
|
const uint32_t *ip = buf;
|
||||||
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_AVX512_RESTORE_CTX(ctx);
|
FLETCHER_4_AVX512_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
__asm("vmovdqu64 %0, %%zmm5" :: "m" (mask));
|
__asm("vmovdqu64 %0, %%zmm5" :: "m" (mask));
|
||||||
|
|
||||||
for (; ip < ipend; ip += 8) {
|
do {
|
||||||
__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
|
__asm("vpmovzxdq %0, %%zmm4"::"m" (*ip));
|
||||||
|
|
||||||
__asm("vpshufb %zmm5, %zmm4, %zmm4");
|
__asm("vpshufb %zmm5, %zmm4, %zmm4");
|
||||||
@ -204,11 +196,9 @@ fletcher_4_avx512bw_byteswap(fletcher_4_ctx_t *ctx, const void *buf,
|
|||||||
__asm("vpaddq %zmm0, %zmm1, %zmm1");
|
__asm("vpaddq %zmm0, %zmm1, %zmm1");
|
||||||
__asm("vpaddq %zmm1, %zmm2, %zmm2");
|
__asm("vpaddq %zmm1, %zmm2, %zmm2");
|
||||||
__asm("vpaddq %zmm2, %zmm3, %zmm3");
|
__asm("vpaddq %zmm2, %zmm3, %zmm3");
|
||||||
}
|
} while ((ip += 8) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_AVX512_SAVE_CTX(ctx)
|
FLETCHER_4_AVX512_SAVE_CTX(ctx)
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
STACK_FRAME_NON_STANDARD(fletcher_4_avx512bw_byteswap);
|
STACK_FRAME_NON_STANDARD(fletcher_4_avx512bw_byteswap);
|
||||||
|
|
||||||
|
@ -51,6 +51,7 @@ ZFS_NO_SANITIZE_UNDEFINED
|
|||||||
static void
|
static void
|
||||||
fletcher_4_avx2_init(fletcher_4_ctx_t *ctx)
|
fletcher_4_avx2_init(fletcher_4_ctx_t *ctx)
|
||||||
{
|
{
|
||||||
|
kfpu_begin();
|
||||||
memset(ctx->avx, 0, 4 * sizeof (zfs_fletcher_avx_t));
|
memset(ctx->avx, 0, 4 * sizeof (zfs_fletcher_avx_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,6 +82,7 @@ fletcher_4_avx2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
|
|||||||
64 * ctx->avx[3].v[3];
|
64 * ctx->avx[3].v[3];
|
||||||
|
|
||||||
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
||||||
|
kfpu_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FLETCHER_4_AVX2_RESTORE_CTX(ctx) \
|
#define FLETCHER_4_AVX2_RESTORE_CTX(ctx) \
|
||||||
@ -106,22 +108,18 @@ fletcher_4_avx2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
const uint64_t *ip = buf;
|
const uint64_t *ip = buf;
|
||||||
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_AVX2_RESTORE_CTX(ctx);
|
FLETCHER_4_AVX2_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
|
asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
|
||||||
asm volatile("vpaddq %ymm4, %ymm0, %ymm0");
|
asm volatile("vpaddq %ymm4, %ymm0, %ymm0");
|
||||||
asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
|
asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
|
||||||
asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
|
asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
|
||||||
asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
|
asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_AVX2_SAVE_CTX(ctx);
|
FLETCHER_4_AVX2_SAVE_CTX(ctx);
|
||||||
asm volatile("vzeroupper");
|
asm volatile("vzeroupper");
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -134,13 +132,11 @@ fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
const uint64_t *ip = buf;
|
const uint64_t *ip = buf;
|
||||||
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_AVX2_RESTORE_CTX(ctx);
|
FLETCHER_4_AVX2_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
asm volatile("vmovdqu %0, %%ymm5" :: "m" (mask));
|
asm volatile("vmovdqu %0, %%ymm5" :: "m" (mask));
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
|
asm volatile("vpmovzxdq %0, %%ymm4"::"m" (*ip));
|
||||||
asm volatile("vpshufb %ymm5, %ymm4, %ymm4");
|
asm volatile("vpshufb %ymm5, %ymm4, %ymm4");
|
||||||
|
|
||||||
@ -148,12 +144,10 @@ fletcher_4_avx2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
|
asm volatile("vpaddq %ymm0, %ymm1, %ymm1");
|
||||||
asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
|
asm volatile("vpaddq %ymm1, %ymm2, %ymm2");
|
||||||
asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
|
asm volatile("vpaddq %ymm2, %ymm3, %ymm3");
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_AVX2_SAVE_CTX(ctx);
|
FLETCHER_4_AVX2_SAVE_CTX(ctx);
|
||||||
asm volatile("vzeroupper");
|
asm volatile("vzeroupper");
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean_t fletcher_4_avx2_valid(void)
|
static boolean_t fletcher_4_avx2_valid(void)
|
||||||
|
@ -53,6 +53,7 @@ ZFS_NO_SANITIZE_UNDEFINED
|
|||||||
static void
|
static void
|
||||||
fletcher_4_sse2_init(fletcher_4_ctx_t *ctx)
|
fletcher_4_sse2_init(fletcher_4_ctx_t *ctx)
|
||||||
{
|
{
|
||||||
|
kfpu_begin();
|
||||||
memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t));
|
memset(ctx->sse, 0, 4 * sizeof (zfs_fletcher_sse_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,6 +81,7 @@ fletcher_4_sse2_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
|
|||||||
8 * ctx->sse[2].v[1] + ctx->sse[1].v[1];
|
8 * ctx->sse[2].v[1] + ctx->sse[1].v[1];
|
||||||
|
|
||||||
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
|
||||||
|
kfpu_end();
|
||||||
}
|
}
|
||||||
|
|
||||||
#define FLETCHER_4_SSE_RESTORE_CTX(ctx) \
|
#define FLETCHER_4_SSE_RESTORE_CTX(ctx) \
|
||||||
@ -104,13 +106,11 @@ fletcher_4_sse2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
const uint64_t *ip = buf;
|
const uint64_t *ip = buf;
|
||||||
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_SSE_RESTORE_CTX(ctx);
|
FLETCHER_4_SSE_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
asm volatile("pxor %xmm4, %xmm4");
|
asm volatile("pxor %xmm4, %xmm4");
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
asm volatile("movdqu %0, %%xmm5" :: "m"(*ip));
|
asm volatile("movdqu %0, %%xmm5" :: "m"(*ip));
|
||||||
asm volatile("movdqa %xmm5, %xmm6");
|
asm volatile("movdqa %xmm5, %xmm6");
|
||||||
asm volatile("punpckldq %xmm4, %xmm5");
|
asm volatile("punpckldq %xmm4, %xmm5");
|
||||||
@ -123,11 +123,9 @@ fletcher_4_sse2_native(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
asm volatile("paddq %xmm0, %xmm1");
|
asm volatile("paddq %xmm0, %xmm1");
|
||||||
asm volatile("paddq %xmm1, %xmm2");
|
asm volatile("paddq %xmm1, %xmm2");
|
||||||
asm volatile("paddq %xmm2, %xmm3");
|
asm volatile("paddq %xmm2, %xmm3");
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_SSE_SAVE_CTX(ctx);
|
FLETCHER_4_SSE_SAVE_CTX(ctx);
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -136,11 +134,9 @@ fletcher_4_sse2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
const uint32_t *ip = buf;
|
const uint32_t *ip = buf;
|
||||||
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
const uint32_t *ipend = (uint32_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_SSE_RESTORE_CTX(ctx);
|
FLETCHER_4_SSE_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
uint32_t scratch1 = BSWAP_32(ip[0]);
|
uint32_t scratch1 = BSWAP_32(ip[0]);
|
||||||
uint32_t scratch2 = BSWAP_32(ip[1]);
|
uint32_t scratch2 = BSWAP_32(ip[1]);
|
||||||
asm volatile("movd %0, %%xmm5" :: "r"(scratch1));
|
asm volatile("movd %0, %%xmm5" :: "r"(scratch1));
|
||||||
@ -150,11 +146,9 @@ fletcher_4_sse2_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
asm volatile("paddq %xmm0, %xmm1");
|
asm volatile("paddq %xmm0, %xmm1");
|
||||||
asm volatile("paddq %xmm1, %xmm2");
|
asm volatile("paddq %xmm1, %xmm2");
|
||||||
asm volatile("paddq %xmm2, %xmm3");
|
asm volatile("paddq %xmm2, %xmm3");
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_SSE_SAVE_CTX(ctx);
|
FLETCHER_4_SSE_SAVE_CTX(ctx);
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean_t fletcher_4_sse2_valid(void)
|
static boolean_t fletcher_4_sse2_valid(void)
|
||||||
@ -186,14 +180,12 @@ fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
const uint64_t *ip = buf;
|
const uint64_t *ip = buf;
|
||||||
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
kfpu_begin();
|
|
||||||
|
|
||||||
FLETCHER_4_SSE_RESTORE_CTX(ctx);
|
FLETCHER_4_SSE_RESTORE_CTX(ctx);
|
||||||
|
|
||||||
asm volatile("movdqu %0, %%xmm7"::"m" (mask));
|
asm volatile("movdqu %0, %%xmm7"::"m" (mask));
|
||||||
asm volatile("pxor %xmm4, %xmm4");
|
asm volatile("pxor %xmm4, %xmm4");
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
asm volatile("movdqu %0, %%xmm5"::"m" (*ip));
|
asm volatile("movdqu %0, %%xmm5"::"m" (*ip));
|
||||||
asm volatile("pshufb %xmm7, %xmm5");
|
asm volatile("pshufb %xmm7, %xmm5");
|
||||||
asm volatile("movdqa %xmm5, %xmm6");
|
asm volatile("movdqa %xmm5, %xmm6");
|
||||||
@ -207,11 +199,9 @@ fletcher_4_ssse3_byteswap(fletcher_4_ctx_t *ctx, const void *buf, uint64_t size)
|
|||||||
asm volatile("paddq %xmm0, %xmm1");
|
asm volatile("paddq %xmm0, %xmm1");
|
||||||
asm volatile("paddq %xmm1, %xmm2");
|
asm volatile("paddq %xmm1, %xmm2");
|
||||||
asm volatile("paddq %xmm2, %xmm3");
|
asm volatile("paddq %xmm2, %xmm3");
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
FLETCHER_4_SSE_SAVE_CTX(ctx);
|
FLETCHER_4_SSE_SAVE_CTX(ctx);
|
||||||
|
|
||||||
kfpu_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean_t fletcher_4_ssse3_valid(void)
|
static boolean_t fletcher_4_ssse3_valid(void)
|
||||||
|
@ -89,7 +89,7 @@ fletcher_4_superscalar_native(fletcher_4_ctx_t *ctx,
|
|||||||
c2 = ctx->superscalar[2].v[1];
|
c2 = ctx->superscalar[2].v[1];
|
||||||
d2 = ctx->superscalar[3].v[1];
|
d2 = ctx->superscalar[3].v[1];
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
a += ip[0];
|
a += ip[0];
|
||||||
a2 += ip[1];
|
a2 += ip[1];
|
||||||
b += a;
|
b += a;
|
||||||
@ -98,7 +98,7 @@ fletcher_4_superscalar_native(fletcher_4_ctx_t *ctx,
|
|||||||
c2 += b2;
|
c2 += b2;
|
||||||
d += c;
|
d += c;
|
||||||
d2 += c2;
|
d2 += c2;
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
ctx->superscalar[0].v[0] = a;
|
ctx->superscalar[0].v[0] = a;
|
||||||
ctx->superscalar[1].v[0] = b;
|
ctx->superscalar[1].v[0] = b;
|
||||||
@ -129,7 +129,7 @@ fletcher_4_superscalar_byteswap(fletcher_4_ctx_t *ctx,
|
|||||||
c2 = ctx->superscalar[2].v[1];
|
c2 = ctx->superscalar[2].v[1];
|
||||||
d2 = ctx->superscalar[3].v[1];
|
d2 = ctx->superscalar[3].v[1];
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
do {
|
||||||
a += BSWAP_32(ip[0]);
|
a += BSWAP_32(ip[0]);
|
||||||
a2 += BSWAP_32(ip[1]);
|
a2 += BSWAP_32(ip[1]);
|
||||||
b += a;
|
b += a;
|
||||||
@ -138,7 +138,7 @@ fletcher_4_superscalar_byteswap(fletcher_4_ctx_t *ctx,
|
|||||||
c2 += b2;
|
c2 += b2;
|
||||||
d += c;
|
d += c;
|
||||||
d2 += c2;
|
d2 += c2;
|
||||||
}
|
} while ((ip += 2) < ipend);
|
||||||
|
|
||||||
ctx->superscalar[0].v[0] = a;
|
ctx->superscalar[0].v[0] = a;
|
||||||
ctx->superscalar[1].v[0] = b;
|
ctx->superscalar[1].v[0] = b;
|
||||||
|
@ -113,7 +113,7 @@ fletcher_4_superscalar4_native(fletcher_4_ctx_t *ctx,
|
|||||||
c4 = ctx->superscalar[2].v[3];
|
c4 = ctx->superscalar[2].v[3];
|
||||||
d4 = ctx->superscalar[3].v[3];
|
d4 = ctx->superscalar[3].v[3];
|
||||||
|
|
||||||
for (; ip < ipend; ip += 4) {
|
do {
|
||||||
a += ip[0];
|
a += ip[0];
|
||||||
a2 += ip[1];
|
a2 += ip[1];
|
||||||
a3 += ip[2];
|
a3 += ip[2];
|
||||||
@ -130,7 +130,7 @@ fletcher_4_superscalar4_native(fletcher_4_ctx_t *ctx,
|
|||||||
d2 += c2;
|
d2 += c2;
|
||||||
d3 += c3;
|
d3 += c3;
|
||||||
d4 += c4;
|
d4 += c4;
|
||||||
}
|
} while ((ip += 4) < ipend);
|
||||||
|
|
||||||
ctx->superscalar[0].v[0] = a;
|
ctx->superscalar[0].v[0] = a;
|
||||||
ctx->superscalar[1].v[0] = b;
|
ctx->superscalar[1].v[0] = b;
|
||||||
@ -179,7 +179,7 @@ fletcher_4_superscalar4_byteswap(fletcher_4_ctx_t *ctx,
|
|||||||
c4 = ctx->superscalar[2].v[3];
|
c4 = ctx->superscalar[2].v[3];
|
||||||
d4 = ctx->superscalar[3].v[3];
|
d4 = ctx->superscalar[3].v[3];
|
||||||
|
|
||||||
for (; ip < ipend; ip += 4) {
|
do {
|
||||||
a += BSWAP_32(ip[0]);
|
a += BSWAP_32(ip[0]);
|
||||||
a2 += BSWAP_32(ip[1]);
|
a2 += BSWAP_32(ip[1]);
|
||||||
a3 += BSWAP_32(ip[2]);
|
a3 += BSWAP_32(ip[2]);
|
||||||
@ -196,7 +196,7 @@ fletcher_4_superscalar4_byteswap(fletcher_4_ctx_t *ctx,
|
|||||||
d2 += c2;
|
d2 += c2;
|
||||||
d3 += c3;
|
d3 += c3;
|
||||||
d4 += c4;
|
d4 += c4;
|
||||||
}
|
} while ((ip += 4) < ipend);
|
||||||
|
|
||||||
ctx->superscalar[0].v[0] = a;
|
ctx->superscalar[0].v[0] = a;
|
||||||
ctx->superscalar[1].v[0] = b;
|
ctx->superscalar[1].v[0] = b;
|
||||||
|
Loading…
Reference in New Issue
Block a user