Avoid save/restoring AMX registers to avoid a SPR erratum

Intel SPR erratum SPR4 says that if you trip into a vmexit while
doing FPU save/restore, your AMX register state might misbehave...
and by misbehave, I mean save all zeroes incorrectly, leading to
explosions if you restore it.

Since we're not using AMX for anything, the simple way to avoid
this is to just not save/restore those when we do anything, since
we're killing preemption of any sort across our save/restores.

If we ever decide to use AMX, it's not clear that we have any
way to mitigate this, on Linux...but I am not an expert.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Rich Ercolani <rincebrain@gmail.com>
Closes #14989
Closes #15168
This commit is contained in:
Rich Ercolani 2023-08-26 14:25:46 -04:00 committed by Brian Behlendorf
parent e99e684b33
commit c65aaa8387

View File

@ -147,6 +147,15 @@
#error "Toolchain needs to support the XSAVE assembler instruction" #error "Toolchain needs to support the XSAVE assembler instruction"
#endif #endif
#ifndef XFEATURE_MASK_XTILE
/*
* For kernels where this doesn't exist yet, we still don't want to break
* by save/restoring this broken nonsense.
* See issue #14989 or Intel errata SPR4 for why
*/
#define XFEATURE_MASK_XTILE 0x60000
#endif
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -315,18 +324,18 @@ kfpu_begin(void)
uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()]; uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
#if defined(HAVE_XSAVES) #if defined(HAVE_XSAVES)
if (static_cpu_has(X86_FEATURE_XSAVES)) { if (static_cpu_has(X86_FEATURE_XSAVES)) {
kfpu_do_xsave("xsaves", state, ~0); kfpu_do_xsave("xsaves", state, ~XFEATURE_MASK_XTILE);
return; return;
} }
#endif #endif
#if defined(HAVE_XSAVEOPT) #if defined(HAVE_XSAVEOPT)
if (static_cpu_has(X86_FEATURE_XSAVEOPT)) { if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
kfpu_do_xsave("xsaveopt", state, ~0); kfpu_do_xsave("xsaveopt", state, ~XFEATURE_MASK_XTILE);
return; return;
} }
#endif #endif
if (static_cpu_has(X86_FEATURE_XSAVE)) { if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_do_xsave("xsave", state, ~0); kfpu_do_xsave("xsave", state, ~XFEATURE_MASK_XTILE);
} else if (static_cpu_has(X86_FEATURE_FXSR)) { } else if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_save_fxsr(state); kfpu_save_fxsr(state);
} else { } else {
@ -376,12 +385,12 @@ kfpu_end(void)
uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()]; uint8_t *state = zfs_kfpu_fpregs[smp_processor_id()];
#if defined(HAVE_XSAVES) #if defined(HAVE_XSAVES)
if (static_cpu_has(X86_FEATURE_XSAVES)) { if (static_cpu_has(X86_FEATURE_XSAVES)) {
kfpu_do_xrstor("xrstors", state, ~0); kfpu_do_xrstor("xrstors", state, ~XFEATURE_MASK_XTILE);
goto out; goto out;
} }
#endif #endif
if (static_cpu_has(X86_FEATURE_XSAVE)) { if (static_cpu_has(X86_FEATURE_XSAVE)) {
kfpu_do_xrstor("xrstor", state, ~0); kfpu_do_xrstor("xrstor", state, ~XFEATURE_MASK_XTILE);
} else if (static_cpu_has(X86_FEATURE_FXSR)) { } else if (static_cpu_has(X86_FEATURE_FXSR)) {
kfpu_restore_fxsr(state); kfpu_restore_fxsr(state);
} else { } else {