diff --git a/include/linux/simd_x86.h b/include/linux/simd_x86.h index 69dbd5579..1bde1d7c9 100644 --- a/include/linux/simd_x86.h +++ b/include/linux/simd_x86.h @@ -144,6 +144,8 @@ */ #if defined(HAVE_KERNEL_FPU_INTERNAL) +#include + extern union fpregs_state **zfs_kfpu_fpregs; /* @@ -156,7 +158,8 @@ kfpu_fini(void) for_each_possible_cpu(cpu) { if (zfs_kfpu_fpregs[cpu] != NULL) { - kfree(zfs_kfpu_fpregs[cpu]); + free_pages((unsigned long)zfs_kfpu_fpregs[cpu], + get_order(sizeof (union fpregs_state))); } } @@ -166,20 +169,28 @@ kfpu_fini(void) static inline int kfpu_init(void) { - int cpu; - zfs_kfpu_fpregs = kzalloc(num_possible_cpus() * sizeof (union fpregs_state *), GFP_KERNEL); if (zfs_kfpu_fpregs == NULL) return (-ENOMEM); + /* + * The fxsave and xsave operations require 16-/64-byte alignment of + * the target memory. Since kmalloc() provides no alignment + * guarantee instead use alloc_pages_node(). + */ + unsigned int order = get_order(sizeof (union fpregs_state)); + int cpu; + for_each_possible_cpu(cpu) { - zfs_kfpu_fpregs[cpu] = kmalloc_node(sizeof (union fpregs_state), - GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); - if (zfs_kfpu_fpregs[cpu] == NULL) { + struct page *page = alloc_pages_node(cpu_to_node(cpu), + GFP_KERNEL | __GFP_ZERO, order); + if (page == NULL) { kfpu_fini(); return (-ENOMEM); } + + zfs_kfpu_fpregs[cpu] = page_address(page); } return (0);