fix CVE-2017-12188: nested KVM stack overflow
This commit is contained in:
parent
2e38f6f987
commit
0e3176e76f
1
Makefile
1
Makefile
@ -238,6 +238,7 @@ ${KERNEL_SRC}/README: ${KERNEL_SRC_SUBMODULE} | submodule
|
||||
cp -a ${KERNEL_SRC_SUBMODULE} ${KERNEL_SRC}
|
||||
cat ${KERNEL_SRC}/debian.master/config/config.common.ubuntu ${KERNEL_SRC}/debian.master/config/${ARCH}/config.common.${ARCH} ${KERNEL_SRC}/debian.master/config/${ARCH}/config.flavour.generic > ${KERNEL_CFG_ORG}
|
||||
cd ${KERNEL_SRC}; for patch in ../patches/kernel/*.patch; do patch --verbose -p1 < $${patch}; done
|
||||
cd ${KERNEL_SRC}; for patch in ../patches/kernel-cves/*.patch; do patch --verbose -p1 < $${patch}; done
|
||||
sed -i ${KERNEL_SRC}/Makefile -e 's/^EXTRAVERSION.*$$/EXTRAVERSION=${EXTRAVERSION}/'
|
||||
touch $@
|
||||
|
||||
|
@ -0,0 +1,38 @@
|
||||
From 8da45fe7977a1866ea95d5d281485e6f139d75c8 Mon Sep 17 00:00:00 2001
|
||||
From: Ladi Prosek <lprosek@redhat.com>
|
||||
Date: Tue, 10 Oct 2017 17:30:58 +0200
|
||||
Subject: [CVE-2017-12188 1/2] KVM: nVMX: update last_nonleaf_level when
|
||||
initializing nested EPT
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
The function updates context->root_level but didn't call
|
||||
update_last_nonleaf_level so the previous and potentially wrong value
|
||||
was used for page walks. For example, a zero value of last_nonleaf_level
|
||||
would allow a potential out-of-bounds access in arch/x86/mmu/paging_tmpl.h's
|
||||
walk_addr_generic function (CVE-2017-12188).
|
||||
|
||||
Fixes: 155a97a3d7c78b46cef6f1a973c831bc5a4f82bb
|
||||
Signed-off-by: Ladi Prosek <lprosek@redhat.com>
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
||||
---
|
||||
arch/x86/kvm/mmu.c | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
||||
index 56e68dfac974..ca0112742343 100644
|
||||
--- a/arch/x86/kvm/mmu.c
|
||||
+++ b/arch/x86/kvm/mmu.c
|
||||
@@ -4459,6 +4459,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
|
||||
update_permission_bitmask(vcpu, context, true);
|
||||
update_pkru_bitmask(vcpu, context, true);
|
||||
+ update_last_nonleaf_level(vcpu, context);
|
||||
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
|
||||
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
|
||||
}
|
||||
--
|
||||
2.14.1
|
||||
|
@ -0,0 +1,88 @@
|
||||
From ebe182a7c6221878cbb5d03e1eafa8002494f8cb Mon Sep 17 00:00:00 2001
|
||||
From: Ladi Prosek <lprosek@redhat.com>
|
||||
Date: Tue, 10 Oct 2017 17:30:59 +0200
|
||||
Subject: [CVE-2017-12188 2/2] KVM: MMU: always terminate page walks at level 1
|
||||
MIME-Version: 1.0
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
is_last_gpte() is not equivalent to the pseudo-code given in commit
|
||||
6bb69c9b69c31 ("KVM: MMU: simplify last_pte_bitmap") because an incorrect
|
||||
value of last_nonleaf_level may override the result even if level == 1.
|
||||
|
||||
It is critical for is_last_gpte() to return true on level == 1 to
|
||||
terminate page walks. Otherwise memory corruption may occur as level
|
||||
is used as an index to various data structures throughout the page
|
||||
walking code. Even though the actual bug would be wherever the MMU is
|
||||
initialized (as in the previous patch), be defensive and ensure here
|
||||
that is_last_gpte() returns the correct value.
|
||||
|
||||
This patch is also enough to fix CVE-2017-12188, and suggested for
|
||||
stable and distro kernels.
|
||||
|
||||
Fixes: 6bb69c9b69c315200ddc2bc79aee14c0184cf5b2
|
||||
Cc: stable@vger.kernel.org
|
||||
Cc: Andy Honig <ahonig@google.com>
|
||||
Signed-off-by: Ladi Prosek <lprosek@redhat.com>
|
||||
[Panic if walk_addr_generic gets an incorrect level; this is a serious
|
||||
bug and it's not worth a WARN_ON where the recovery path might hide
|
||||
further exploitable issues; suggested by Andrew Honig. - Paolo]
|
||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
||||
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
|
||||
---
|
||||
arch/x86/kvm/paging_tmpl.h | 3 ++-
|
||||
arch/x86/kvm/mmu.c | 14 +++++++-------
|
||||
2 files changed, 9 insertions(+), 8 deletions(-)
|
||||
|
||||
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
|
||||
index b0454c7e4cff..da06dc8c4fc4 100644
|
||||
--- a/arch/x86/kvm/paging_tmpl.h
|
||||
+++ b/arch/x86/kvm/paging_tmpl.h
|
||||
@@ -334,10 +334,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
||||
--walker->level;
|
||||
|
||||
index = PT_INDEX(addr, walker->level);
|
||||
-
|
||||
table_gfn = gpte_to_gfn(pte);
|
||||
offset = index * sizeof(pt_element_t);
|
||||
pte_gpa = gfn_to_gpa(table_gfn) + offset;
|
||||
+
|
||||
+ BUG_ON(walker->level < 1);
|
||||
walker->table_gfn[walker->level - 1] = table_gfn;
|
||||
walker->pte_gpa[walker->level - 1] = pte_gpa;
|
||||
|
||||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
||||
index ca0112742343..2e4a6732aaa9 100644
|
||||
--- a/arch/x86/kvm/mmu.c
|
||||
+++ b/arch/x86/kvm/mmu.c
|
||||
@@ -3934,13 +3934,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
|
||||
static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
||||
unsigned level, unsigned gpte)
|
||||
{
|
||||
- /*
|
||||
- * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
|
||||
- * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
|
||||
- * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
|
||||
- */
|
||||
- gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
|
||||
-
|
||||
/*
|
||||
* The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
|
||||
* If it is clear, there are no large pages at this level, so clear
|
||||
@@ -3948,6 +3941,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
||||
*/
|
||||
gpte &= level - mmu->last_nonleaf_level;
|
||||
|
||||
+ /*
|
||||
+ * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
|
||||
+ * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
|
||||
+ * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
|
||||
+ */
|
||||
+ gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
|
||||
+
|
||||
return gpte & PT_PAGE_SIZE_MASK;
|
||||
}
|
||||
|
||||
--
|
||||
2.14.1
|
||||
|
Loading…
Reference in New Issue
Block a user