From 08e179ff5cdbf33c856276155d63e37abc52dbc6 Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Tue, 25 Jul 2023 16:56:04 +0200 Subject: [PATCH] backport Zenbleed stop-gap fix CVE-2023-20593 the actual fix is the microcode update, but this is a stop-gap (with a performance penalty) setting a chicken bit on affected CPUs that do not have the new enough microcode loaded, disabling some features. Signed-off-by: Thomas Lamprecht --- ...-the-errata-checking-functionality-u.patch | 182 ++++++++++++++++++ .../0012-x86-cpu-amd-Add-a-Zenbleed-fix.patch | 170 ++++++++++++++++ 2 files changed, 352 insertions(+) create mode 100644 patches/kernel/0011-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch create mode 100644 patches/kernel/0012-x86-cpu-amd-Add-a-Zenbleed-fix.patch diff --git a/patches/kernel/0011-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch b/patches/kernel/0011-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch new file mode 100644 index 0000000..3fd3ede --- /dev/null +++ b/patches/kernel/0011-x86-cpu-amd-Move-the-errata-checking-functionality-u.patch @@ -0,0 +1,182 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:31:32 +0200 +Subject: [PATCH] x86/cpu/amd: Move the errata checking functionality up + +Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a + +Avoid new and remove old forward declarations. + +No functional changes. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Thomas Lamprecht +--- + arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++-------------------- + 1 file changed, 67 insertions(+), 72 deletions(-) + +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 06f2ede1544f..57181b9c0474 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -27,11 +27,6 @@ + + #include "cpu.h" + +-static const int amd_erratum_383[]; +-static const int amd_erratum_400[]; +-static const int amd_erratum_1054[]; +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); +- + /* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX +@@ -39,6 +34,73 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); + */ + static u32 nodes_per_socket = 1; + ++/* ++ * AMD errata checking ++ * ++ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or ++ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that ++ * have an OSVW id assigned, which it takes as first argument. Both take a ++ * variable number of family-specific model-stepping ranges created by ++ * AMD_MODEL_RANGE(). ++ * ++ * Example: ++ * ++ * const int amd_erratum_319[] = ++ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), ++ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), ++ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); ++ */ ++ ++#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } ++#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } ++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ++ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) ++#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) ++#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) ++#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) ++ ++static const int amd_erratum_400[] = ++ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), ++ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); ++ ++static const int amd_erratum_383[] = ++ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); ++ ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ ++static const int amd_erratum_1054[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); ++ ++static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ++{ ++ int osvw_id = *erratum++; ++ u32 range; ++ u32 ms; ++ ++ if (osvw_id >= 0 && osvw_id < 65536 && ++ cpu_has(cpu, X86_FEATURE_OSVW)) { ++ u64 osvw_len; ++ ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); ++ if (osvw_id < osvw_len) { ++ u64 osvw_bits; ++ ++ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), ++ osvw_bits); ++ return osvw_bits & (1ULL << (osvw_id & 0x3f)); ++ } ++ } ++ ++ /* OSVW unavailable or ID unknown, match family-model-stepping range */ ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping; ++ while ((range = *erratum++)) ++ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && ++ (ms >= AMD_MODEL_RANGE_START(range)) && ++ (ms <= AMD_MODEL_RANGE_END(range))) ++ return true; ++ ++ return false; ++} ++ + static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) + { + u32 gprs[8] = { 0 }; +@@ -1100,73 +1162,6 @@ static const struct cpu_dev amd_cpu_dev = { + + cpu_dev_register(amd_cpu_dev); + +-/* +- * AMD errata checking +- * +- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or +- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that +- * have an OSVW id assigned, which it takes as first argument. Both take a +- * variable number of family-specific model-stepping ranges created by +- * AMD_MODEL_RANGE(). +- * +- * Example: +- * +- * const int amd_erratum_319[] = +- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), +- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), +- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); +- */ +- +-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) +- +-static const int amd_erratum_400[] = +- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), +- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); +- +-static const int amd_erratum_383[] = +- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +- +-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +-static const int amd_erratum_1054[] = +- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); +- +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) +-{ +- int osvw_id = *erratum++; +- u32 range; +- u32 ms; +- +- if (osvw_id >= 0 && osvw_id < 65536 && +- cpu_has(cpu, X86_FEATURE_OSVW)) { +- u64 osvw_len; +- +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); +- if (osvw_id < osvw_len) { +- u64 osvw_bits; +- +- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), +- osvw_bits); +- return osvw_bits & (1ULL << (osvw_id & 0x3f)); +- } +- } +- +- /* OSVW unavailable or ID unknown, match family-model-stepping range */ +- ms = (cpu->x86_model << 4) | cpu->x86_stepping; +- while ((range = *erratum++)) +- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && +- (ms >= AMD_MODEL_RANGE_START(range)) && +- (ms <= AMD_MODEL_RANGE_END(range))) +- return true; +- +- return false; +-} +- + void set_dr_addr_mask(unsigned long mask, int dr) + { + if (!boot_cpu_has(X86_FEATURE_BPEXT)) diff --git a/patches/kernel/0012-x86-cpu-amd-Add-a-Zenbleed-fix.patch b/patches/kernel/0012-x86-cpu-amd-Add-a-Zenbleed-fix.patch new file mode 100644 index 0000000..8508da7 --- /dev/null +++ b/patches/kernel/0012-x86-cpu-amd-Add-a-Zenbleed-fix.patch @@ -0,0 +1,170 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:41:28 +0200 +Subject: [PATCH] x86/cpu/amd: Add a Zenbleed fix + +Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 + +Add a fix for the Zen2 VZEROUPPER data corruption bug where under +certain circumstances executing VZEROUPPER can cause register +corruption or leak data. + +The optimal fix is through microcode but in the case the proper +microcode revision has not been applied, enable a fallback fix using +a chicken bit. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Thomas Lamprecht +--- + arch/x86/include/asm/microcode.h | 1 + + arch/x86/include/asm/microcode_amd.h | 2 + + arch/x86/include/asm/msr-index.h | 1 + + arch/x86/kernel/cpu/amd.c | 60 ++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 2 + + 5 files changed, 66 insertions(+) + +diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h +index 320566a0443d..66dbba181bd9 100644 +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + struct ucode_patch { + struct list_head plist; +diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h +index e6662adf3af4..9675c621c1ca 100644 +--- a/arch/x86/include/asm/microcode_amd.h ++++ b/arch/x86/include/asm/microcode_amd.h +@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(unsigned int family); + extern void load_ucode_amd_ap(unsigned int family); + extern int __init save_microcode_in_initrd_amd(unsigned int family); + void reload_ucode_amd(unsigned int cpu); ++extern void amd_check_microcode(void); + #else + static inline void __init load_ucode_amd_bsp(unsigned int family) {} + static inline void load_ucode_amd_ap(unsigned int family) {} + static inline int __init + save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } + static inline void reload_ucode_amd(unsigned int cpu) {} ++static inline void amd_check_microcode(void) {} + #endif + #endif /* _ASM_X86_MICROCODE_AMD_H */ +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h +index 978a3e203cdb..52a09dbc2c26 100644 +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -538,6 +538,7 @@ + #define MSR_AMD64_DE_CFG 0xc0011029 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) ++#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 + + #define MSR_AMD64_BU_CFG2 0xc001102a + #define MSR_AMD64_IBSFETCHCTL 0xc0011030 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 57181b9c0474..c03b066aaa54 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -70,6 +70,11 @@ static const int amd_erratum_383[] = + static const int amd_erratum_1054[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + ++static const int amd_zenbleed[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -978,6 +983,47 @@ static void init_amd_zn(struct cpuinfo_x86 *c) + } + } + ++static bool cpu_has_zenbleed_microcode(void) ++{ ++ u32 good_rev = 0; ++ ++ switch (boot_cpu_data.x86_model) { ++ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; ++ case 0x60 ... 0x67: good_rev = 0x0860010b; break; ++ case 0x68 ... 0x6f: good_rev = 0x08608105; break; ++ case 0x70 ... 0x7f: good_rev = 0x08701032; break; ++ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; ++ ++ default: ++ return false; ++ break; ++ } ++ ++ if (boot_cpu_data.microcode < good_rev) ++ return false; ++ ++ return true; ++} ++ ++static void zenbleed_check(struct cpuinfo_x86 *c) ++{ ++ if (!cpu_has_amd_erratum(c, amd_zenbleed)) ++ return; ++ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (!cpu_has(c, X86_FEATURE_AVX)) ++ return; ++ ++ if (!cpu_has_zenbleed_microcode()) { ++ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); ++ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } else { ++ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } ++} ++ + static void init_amd(struct cpuinfo_x86 *c) + { + early_init_amd(c); +@@ -1067,6 +1113,8 @@ static void init_amd(struct cpuinfo_x86 *c) + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); + + check_null_seg_clears_base(c); ++ ++ zenbleed_check(c); + } + + #ifdef CONFIG_X86_32 +@@ -1196,3 +1244,15 @@ u32 amd_get_highest_perf(void) + return 255; + } + EXPORT_SYMBOL_GPL(amd_get_highest_perf); ++ ++static void zenbleed_check_cpu(void *unused) ++{ ++ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); ++ ++ zenbleed_check(c); ++} ++ ++void amd_check_microcode(void) ++{ ++ on_each_cpu(zenbleed_check_cpu, NULL, 1); ++} +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 6a25e93f2a87..2ac8ceae0ed1 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -2337,6 +2337,8 @@ void microcode_check(struct cpuinfo_x86 *prev_info) + + perf_check_microcode(); + ++ amd_check_microcode(); ++ + store_cpu_caps(&curr_info); + + if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,