From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Fri, 31 Jan 2020 08:06:41 -0300 Subject: [PATCH] x86/kvm: Introduce kvm_(un)map_gfn() CVE-2019-3016 CVE-2020-3016 kvm_vcpu_(un)map operates on gfns from any current address space. In certain cases we want to make sure we are not mapping SMRAM and for that we can use kvm_(un)map_gfn() that we are introducing in this patch. This is part of CVE-2019-3016. Signed-off-by: Paolo Bonzini Signed-off-by: Boris Ostrovsky Reviewed-by: Joao Martins Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Thomas Lamprecht --- include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++----- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d41c521a39da..df4cc0ead363 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -758,8 +758,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 91e56a9b0661..6614e030ae75 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) } EXPORT_SYMBOL_GPL(gfn_to_page); -static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn, +static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn, struct kvm_host_map *map) { kvm_pfn_t pfn; void *hva = NULL; struct page *page = KVM_UNMAPPED_PAGE; + struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn); if (!map) return -EINVAL; @@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn, return 0; } +int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) +{ + return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map); +} +EXPORT_SYMBOL_GPL(kvm_map_gfn); + int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) { - return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map); + return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map); } EXPORT_SYMBOL_GPL(kvm_vcpu_map); -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, - bool dirty) +static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot, + struct kvm_host_map *map, bool dirty) { if (!map) return; @@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, #endif if (dirty) { - kvm_vcpu_mark_page_dirty(vcpu, map->gfn); + mark_page_dirty_in_slot(memslot, map->gfn); kvm_release_pfn_dirty(map->pfn); } else { kvm_release_pfn_clean(map->pfn); @@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, map->hva = NULL; map->page = NULL; } + +int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) +{ + __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty); + return 0; +} +EXPORT_SYMBOL_GPL(kvm_unmap_gfn); + +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) +{ + __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty); +} EXPORT_SYMBOL_GPL(kvm_vcpu_unmap); struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)