rebase patches on top of Ubuntu-5.3.0-41.33
(generated with debian/scripts/import-upstream-tag) Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
d6bd2e83bb
commit
17f964317d
@ -55,7 +55,7 @@ Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|||||||
2 files changed, 111 insertions(+)
|
2 files changed, 111 insertions(+)
|
||||||
|
|
||||||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
|
||||||
index a0e45b4c7a08..d22b0fac61c0 100644
|
index 12305bfe9ac3..9b41dd8b5503 100644
|
||||||
--- a/Documentation/admin-guide/kernel-parameters.txt
|
--- a/Documentation/admin-guide/kernel-parameters.txt
|
||||||
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
+++ b/Documentation/admin-guide/kernel-parameters.txt
|
||||||
@@ -3395,6 +3395,15 @@
|
@@ -3395,6 +3395,15 @@
|
||||||
@ -75,7 +75,7 @@ index a0e45b4c7a08..d22b0fac61c0 100644
|
|||||||
Safety option to keep boot IRQs enabled. This
|
Safety option to keep boot IRQs enabled. This
|
||||||
should never be necessary.
|
should never be necessary.
|
||||||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
|
||||||
index 66cd9678c672..7f589736a49e 100644
|
index 93ce2912a00b..94b06fe4ab4c 100644
|
||||||
--- a/drivers/pci/quirks.c
|
--- a/drivers/pci/quirks.c
|
||||||
+++ b/drivers/pci/quirks.c
|
+++ b/drivers/pci/quirks.c
|
||||||
@@ -193,6 +193,106 @@ static int __init pci_apply_final_quirks(void)
|
@@ -193,6 +193,106 @@ static int __init pci_apply_final_quirks(void)
|
||||||
|
@ -13,7 +13,7 @@ Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|||||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
||||||
index 49ef54267061..91e56a9b0661 100644
|
index 669475b59456..f05e5b5c30e8 100644
|
||||||
--- a/virt/kvm/kvm_main.c
|
--- a/virt/kvm/kvm_main.c
|
||||||
+++ b/virt/kvm/kvm_main.c
|
+++ b/virt/kvm/kvm_main.c
|
||||||
@@ -76,7 +76,7 @@ module_param(halt_poll_ns, uint, 0644);
|
@@ -76,7 +76,7 @@ module_param(halt_poll_ns, uint, 0644);
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Date: Fri, 31 Jan 2020 08:06:40 -0300
|
|
||||||
Subject: [PATCH] x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
|
|
||||||
|
|
||||||
CVE-2019-3016
|
|
||||||
CVE-2020-3016
|
|
||||||
|
|
||||||
kvm_steal_time_set_preempted() may accidentally clear KVM_VCPU_FLUSH_TLB
|
|
||||||
bit if it is called more than once while VCPU is preempted.
|
|
||||||
|
|
||||||
This is part of CVE-2019-3016.
|
|
||||||
|
|
||||||
(This bug was also independently discovered by Jim Mattson
|
|
||||||
<jmattson@google.com>)
|
|
||||||
|
|
||||||
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
|
|
||||||
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
|
|
||||||
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
||||||
---
|
|
||||||
arch/x86/kvm/x86.c | 3 +++
|
|
||||||
1 file changed, 3 insertions(+)
|
|
||||||
|
|
||||||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
||||||
index 9c45e6ca30fd..80e860bd39d5 100644
|
|
||||||
--- a/arch/x86/kvm/x86.c
|
|
||||||
+++ b/arch/x86/kvm/x86.c
|
|
||||||
@@ -3399,6 +3399,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|
||||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
||||||
return;
|
|
||||||
|
|
||||||
+ if (vcpu->arch.st.steal.preempted)
|
|
||||||
+ return;
|
|
||||||
+
|
|
||||||
vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
|
|
||||||
|
|
||||||
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
@ -1,111 +0,0 @@
|
|||||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Date: Fri, 31 Jan 2020 08:06:41 -0300
|
|
||||||
Subject: [PATCH] x86/kvm: Introduce kvm_(un)map_gfn()
|
|
||||||
|
|
||||||
CVE-2019-3016
|
|
||||||
CVE-2020-3016
|
|
||||||
|
|
||||||
kvm_vcpu_(un)map operates on gfns from any current address space.
|
|
||||||
In certain cases we want to make sure we are not mapping SMRAM
|
|
||||||
and for that we can use kvm_(un)map_gfn() that we are introducing
|
|
||||||
in this patch.
|
|
||||||
|
|
||||||
This is part of CVE-2019-3016.
|
|
||||||
|
|
||||||
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
|
|
||||||
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
|
|
||||||
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
|
|
||||||
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
||||||
---
|
|
||||||
include/linux/kvm_host.h | 2 ++
|
|
||||||
virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++-----
|
|
||||||
2 files changed, 26 insertions(+), 5 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
||||||
index d41c521a39da..df4cc0ead363 100644
|
|
||||||
--- a/include/linux/kvm_host.h
|
|
||||||
+++ b/include/linux/kvm_host.h
|
|
||||||
@@ -758,8 +758,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
|
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
|
||||||
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
|
|
||||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
|
||||||
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
|
||||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
|
||||||
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
|
||||||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
||||||
index 91e56a9b0661..6614e030ae75 100644
|
|
||||||
--- a/virt/kvm/kvm_main.c
|
|
||||||
+++ b/virt/kvm/kvm_main.c
|
|
||||||
@@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
|
||||||
|
|
||||||
-static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
|
||||||
+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
|
||||||
struct kvm_host_map *map)
|
|
||||||
{
|
|
||||||
kvm_pfn_t pfn;
|
|
||||||
void *hva = NULL;
|
|
||||||
struct page *page = KVM_UNMAPPED_PAGE;
|
|
||||||
+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
|
||||||
|
|
||||||
if (!map)
|
|
||||||
return -EINVAL;
|
|
||||||
@@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
|
||||||
+{
|
|
||||||
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
|
|
||||||
+}
|
|
||||||
+EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
|
||||||
+
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
|
||||||
{
|
|
||||||
- return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
|
|
||||||
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
|
||||||
|
|
||||||
-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
|
||||||
- bool dirty)
|
|
||||||
+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
|
||||||
+ struct kvm_host_map *map, bool dirty)
|
|
||||||
{
|
|
||||||
if (!map)
|
|
||||||
return;
|
|
||||||
@@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (dirty) {
|
|
||||||
- kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
|
|
||||||
+ mark_page_dirty_in_slot(memslot, map->gfn);
|
|
||||||
kvm_release_pfn_dirty(map->pfn);
|
|
||||||
} else {
|
|
||||||
kvm_release_pfn_clean(map->pfn);
|
|
||||||
@@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
|
||||||
map->hva = NULL;
|
|
||||||
map->page = NULL;
|
|
||||||
}
|
|
||||||
+
|
|
||||||
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
|
||||||
+{
|
|
||||||
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
|
|
||||||
+ return 0;
|
|
||||||
+}
|
|
||||||
+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
|
||||||
+
|
|
||||||
+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
|
||||||
+{
|
|
||||||
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
|
|
||||||
+}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
|
||||||
|
|
||||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
@ -1,292 +0,0 @@
|
|||||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Date: Fri, 31 Jan 2020 08:06:42 -0300
|
|
||||||
Subject: [PATCH] x86/kvm: Cache gfn to pfn translation
|
|
||||||
|
|
||||||
CVE-2019-3016
|
|
||||||
CVE-2020-3016
|
|
||||||
|
|
||||||
__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
|
|
||||||
* relatively expensive
|
|
||||||
* in certain cases (such as when done from atomic context) cannot be called
|
|
||||||
|
|
||||||
Stashing gfn-to-pfn mapping should help with both cases.
|
|
||||||
|
|
||||||
This is part of CVE-2019-3016.
|
|
||||||
|
|
||||||
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
|
|
||||||
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
|
|
||||||
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
||||||
---
|
|
||||||
arch/x86/include/asm/kvm_host.h | 1 +
|
|
||||||
arch/x86/kvm/x86.c | 10 ++++
|
|
||||||
include/linux/kvm_host.h | 7 ++-
|
|
||||||
include/linux/kvm_types.h | 9 ++-
|
|
||||||
virt/kvm/kvm_main.c | 98 ++++++++++++++++++++++++++-------
|
|
||||||
5 files changed, 103 insertions(+), 22 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
||||||
index f68e174f452f..7c06343614a4 100644
|
|
||||||
--- a/arch/x86/include/asm/kvm_host.h
|
|
||||||
+++ b/arch/x86/include/asm/kvm_host.h
|
|
||||||
@@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
|
|
||||||
u64 last_steal;
|
|
||||||
struct gfn_to_hva_cache stime;
|
|
||||||
struct kvm_steal_time steal;
|
|
||||||
+ struct gfn_to_pfn_cache cache;
|
|
||||||
} st;
|
|
||||||
|
|
||||||
u64 tsc_offset;
|
|
||||||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
||||||
index 80e860bd39d5..cb18560b07bc 100644
|
|
||||||
--- a/arch/x86/kvm/x86.c
|
|
||||||
+++ b/arch/x86/kvm/x86.c
|
|
||||||
@@ -8945,6 +8945,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
|
|
||||||
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
|
|
||||||
+ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
|
|
||||||
+
|
|
||||||
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
|
||||||
|
|
||||||
kvmclock_reset(vcpu);
|
|
||||||
|
|
||||||
@@ -9611,11 +9614,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
|
||||||
|
|
||||||
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
|
|
||||||
{
|
|
||||||
+ struct kvm_vcpu *vcpu;
|
|
||||||
+ int i;
|
|
||||||
+
|
|
||||||
/*
|
|
||||||
* memslots->generation has been incremented.
|
|
||||||
* mmio generation may have reached its maximum value.
|
|
||||||
*/
|
|
||||||
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
|
|
||||||
+
|
|
||||||
+ /* Force re-initialization of steal_time cache */
|
|
||||||
+ kvm_for_each_vcpu(i, vcpu, kvm)
|
|
||||||
+ kvm_vcpu_kick(vcpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|
||||||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
|
|
||||||
index df4cc0ead363..abfc2fbde957 100644
|
|
||||||
--- a/include/linux/kvm_host.h
|
|
||||||
+++ b/include/linux/kvm_host.h
|
|
||||||
@@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
|
|
||||||
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
|
|
||||||
void kvm_get_pfn(kvm_pfn_t pfn);
|
|
||||||
|
|
||||||
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
|
|
||||||
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
|
|
||||||
int len);
|
|
||||||
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
|
|
||||||
@@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
|
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
|
|
||||||
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
|
|
||||||
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
|
||||||
+ struct gfn_to_pfn_cache *cache, bool atomic);
|
|
||||||
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
|
||||||
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
|
|
||||||
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
|
||||||
+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
|
|
||||||
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
|
|
||||||
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
|
|
||||||
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
|
|
||||||
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
|
|
||||||
index bde5374ae021..2382cb58969d 100644
|
|
||||||
--- a/include/linux/kvm_types.h
|
|
||||||
+++ b/include/linux/kvm_types.h
|
|
||||||
@@ -18,7 +18,7 @@ struct kvm_memslots;
|
|
||||||
|
|
||||||
enum kvm_mr_change;
|
|
||||||
|
|
||||||
-#include <asm/types.h>
|
|
||||||
+#include <linux/types.h>
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Address types:
|
|
||||||
@@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
|
|
||||||
struct kvm_memory_slot *memslot;
|
|
||||||
};
|
|
||||||
|
|
||||||
+struct gfn_to_pfn_cache {
|
|
||||||
+ u64 generation;
|
|
||||||
+ gfn_t gfn;
|
|
||||||
+ kvm_pfn_t pfn;
|
|
||||||
+ bool dirty;
|
|
||||||
+};
|
|
||||||
+
|
|
||||||
#endif /* __KVM_TYPES_H__ */
|
|
||||||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
|
|
||||||
index 6614e030ae75..f05e5b5c30e8 100644
|
|
||||||
--- a/virt/kvm/kvm_main.c
|
|
||||||
+++ b/virt/kvm/kvm_main.c
|
|
||||||
@@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(gfn_to_page);
|
|
||||||
|
|
||||||
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
|
|
||||||
+{
|
|
||||||
+ if (pfn == 0)
|
|
||||||
+ return;
|
|
||||||
+
|
|
||||||
+ if (cache)
|
|
||||||
+ cache->pfn = cache->gfn = 0;
|
|
||||||
+
|
|
||||||
+ if (dirty)
|
|
||||||
+ kvm_release_pfn_dirty(pfn);
|
|
||||||
+ else
|
|
||||||
+ kvm_release_pfn_clean(pfn);
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
|
|
||||||
+ struct gfn_to_pfn_cache *cache, u64 gen)
|
|
||||||
+{
|
|
||||||
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
|
|
||||||
+
|
|
||||||
+ cache->pfn = gfn_to_pfn_memslot(slot, gfn);
|
|
||||||
+ cache->gfn = gfn;
|
|
||||||
+ cache->dirty = false;
|
|
||||||
+ cache->generation = gen;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
|
||||||
- struct kvm_host_map *map)
|
|
||||||
+ struct kvm_host_map *map,
|
|
||||||
+ struct gfn_to_pfn_cache *cache,
|
|
||||||
+ bool atomic)
|
|
||||||
{
|
|
||||||
kvm_pfn_t pfn;
|
|
||||||
void *hva = NULL;
|
|
||||||
struct page *page = KVM_UNMAPPED_PAGE;
|
|
||||||
struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
|
|
||||||
+ u64 gen = slots->generation;
|
|
||||||
|
|
||||||
if (!map)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
- pfn = gfn_to_pfn_memslot(slot, gfn);
|
|
||||||
+ if (cache) {
|
|
||||||
+ if (!cache->pfn || cache->gfn != gfn ||
|
|
||||||
+ cache->generation != gen) {
|
|
||||||
+ if (atomic)
|
|
||||||
+ return -EAGAIN;
|
|
||||||
+ kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
|
|
||||||
+ }
|
|
||||||
+ pfn = cache->pfn;
|
|
||||||
+ } else {
|
|
||||||
+ if (atomic)
|
|
||||||
+ return -EAGAIN;
|
|
||||||
+ pfn = gfn_to_pfn_memslot(slot, gfn);
|
|
||||||
+ }
|
|
||||||
if (is_error_noslot_pfn(pfn))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if (pfn_valid(pfn)) {
|
|
||||||
page = pfn_to_page(pfn);
|
|
||||||
- hva = kmap(page);
|
|
||||||
+ if (atomic)
|
|
||||||
+ hva = kmap_atomic(page);
|
|
||||||
+ else
|
|
||||||
+ hva = kmap(page);
|
|
||||||
#ifdef CONFIG_HAS_IOMEM
|
|
||||||
- } else {
|
|
||||||
+ } else if (!atomic) {
|
|
||||||
hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
|
|
||||||
+ } else {
|
|
||||||
+ return -EINVAL;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
|
||||||
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
|
|
||||||
+ struct gfn_to_pfn_cache *cache, bool atomic)
|
|
||||||
{
|
|
||||||
- return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
|
|
||||||
+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
|
|
||||||
+ cache, atomic);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_map_gfn);
|
|
||||||
|
|
||||||
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
|
|
||||||
{
|
|
||||||
- return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
|
|
||||||
+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
|
|
||||||
+ NULL, false);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_map);
|
|
||||||
|
|
||||||
static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
|
||||||
- struct kvm_host_map *map, bool dirty)
|
|
||||||
+ struct kvm_host_map *map,
|
|
||||||
+ struct gfn_to_pfn_cache *cache,
|
|
||||||
+ bool dirty, bool atomic)
|
|
||||||
{
|
|
||||||
if (!map)
|
|
||||||
return;
|
|
||||||
@@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
|
|
||||||
if (!map->hva)
|
|
||||||
return;
|
|
||||||
|
|
||||||
- if (map->page != KVM_UNMAPPED_PAGE)
|
|
||||||
- kunmap(map->page);
|
|
||||||
+ if (map->page != KVM_UNMAPPED_PAGE) {
|
|
||||||
+ if (atomic)
|
|
||||||
+ kunmap_atomic(map->hva);
|
|
||||||
+ else
|
|
||||||
+ kunmap(map->page);
|
|
||||||
+ }
|
|
||||||
#ifdef CONFIG_HAS_IOMEM
|
|
||||||
- else
|
|
||||||
+ else if (!atomic)
|
|
||||||
memunmap(map->hva);
|
|
||||||
+ else
|
|
||||||
+ WARN_ONCE(1, "Unexpected unmapping in atomic context");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
- if (dirty) {
|
|
||||||
+ if (dirty)
|
|
||||||
mark_page_dirty_in_slot(memslot, map->gfn);
|
|
||||||
- kvm_release_pfn_dirty(map->pfn);
|
|
||||||
- } else {
|
|
||||||
- kvm_release_pfn_clean(map->pfn);
|
|
||||||
- }
|
|
||||||
+
|
|
||||||
+ if (cache)
|
|
||||||
+ cache->dirty |= dirty;
|
|
||||||
+ else
|
|
||||||
+ kvm_release_pfn(map->pfn, dirty, NULL);
|
|
||||||
|
|
||||||
map->hva = NULL;
|
|
||||||
map->page = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
|
||||||
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
|
|
||||||
+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
|
|
||||||
{
|
|
||||||
- __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
|
|
||||||
+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
|
|
||||||
+ cache, dirty, atomic);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
|
|
||||||
|
|
||||||
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
|
|
||||||
{
|
|
||||||
- __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
|
|
||||||
+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
|
|
||||||
+ dirty, false);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
|
|
||||||
|
|
@ -1,125 +0,0 @@
|
|||||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Date: Fri, 31 Jan 2020 08:06:43 -0300
|
|
||||||
Subject: [PATCH] x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
|
|
||||||
|
|
||||||
CVE-2019-3016
|
|
||||||
CVE-2020-3016
|
|
||||||
|
|
||||||
There is a potential race in record_steal_time() between setting
|
|
||||||
host-local vcpu->arch.st.steal.preempted to zero (i.e. clearing
|
|
||||||
KVM_VCPU_PREEMPTED) and propagating this value to the guest with
|
|
||||||
kvm_write_guest_cached(). Between those two events the guest may
|
|
||||||
still see KVM_VCPU_PREEMPTED in its copy of kvm_steal_time, set
|
|
||||||
KVM_VCPU_FLUSH_TLB and assume that hypervisor will do the right
|
|
||||||
thing. Which it won't.
|
|
||||||
|
|
||||||
Instad of copying, we should map kvm_steal_time and that will
|
|
||||||
guarantee atomicity of accesses to @preempted.
|
|
||||||
|
|
||||||
This is part of CVE-2019-3016.
|
|
||||||
|
|
||||||
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
|
|
||||||
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
|
|
||||||
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
||||||
---
|
|
||||||
arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++-------------------
|
|
||||||
1 file changed, 29 insertions(+), 20 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
||||||
index cb18560b07bc..f63fa5846f08 100644
|
|
||||||
--- a/arch/x86/kvm/x86.c
|
|
||||||
+++ b/arch/x86/kvm/x86.c
|
|
||||||
@@ -2488,43 +2488,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
|
|
||||||
|
|
||||||
static void record_steal_time(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
+ struct kvm_host_map map;
|
|
||||||
+ struct kvm_steal_time *st;
|
|
||||||
+
|
|
||||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
||||||
return;
|
|
||||||
|
|
||||||
- if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
||||||
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
|
|
||||||
+ /* -EAGAIN is returned in atomic context so we can just return. */
|
|
||||||
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
|
|
||||||
+ &map, &vcpu->arch.st.cache, false))
|
|
||||||
return;
|
|
||||||
|
|
||||||
+ st = map.hva +
|
|
||||||
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
|
||||||
+
|
|
||||||
/*
|
|
||||||
* Doing a TLB flush here, on the guest's behalf, can avoid
|
|
||||||
* expensive IPIs.
|
|
||||||
*/
|
|
||||||
- if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
|
|
||||||
+ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
|
|
||||||
kvm_vcpu_flush_tlb(vcpu, false);
|
|
||||||
|
|
||||||
- if (vcpu->arch.st.steal.version & 1)
|
|
||||||
- vcpu->arch.st.steal.version += 1; /* first time write, random junk */
|
|
||||||
+ vcpu->arch.st.steal.preempted = 0;
|
|
||||||
|
|
||||||
- vcpu->arch.st.steal.version += 1;
|
|
||||||
+ if (st->version & 1)
|
|
||||||
+ st->version += 1; /* first time write, random junk */
|
|
||||||
|
|
||||||
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
||||||
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
|
||||||
+ st->version += 1;
|
|
||||||
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
- vcpu->arch.st.steal.steal += current->sched_info.run_delay -
|
|
||||||
+ st->steal += current->sched_info.run_delay -
|
|
||||||
vcpu->arch.st.last_steal;
|
|
||||||
vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
|
||||||
|
|
||||||
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
||||||
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
|
||||||
-
|
|
||||||
smp_wmb();
|
|
||||||
|
|
||||||
- vcpu->arch.st.steal.version += 1;
|
|
||||||
+ st->version += 1;
|
|
||||||
|
|
||||||
- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
||||||
- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
|
||||||
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
||||||
@@ -3396,18 +3398,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
||||||
|
|
||||||
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|
||||||
{
|
|
||||||
+ struct kvm_host_map map;
|
|
||||||
+ struct kvm_steal_time *st;
|
|
||||||
+
|
|
||||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (vcpu->arch.st.steal.preempted)
|
|
||||||
return;
|
|
||||||
|
|
||||||
- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
|
|
||||||
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
|
|
||||||
+ &vcpu->arch.st.cache, true))
|
|
||||||
+ return;
|
|
||||||
+
|
|
||||||
+ st = map.hva +
|
|
||||||
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
|
||||||
+
|
|
||||||
+ st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
|
|
||||||
|
|
||||||
- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
||||||
- &vcpu->arch.st.steal.preempted,
|
|
||||||
- offsetof(struct kvm_steal_time, preempted),
|
|
||||||
- sizeof(vcpu->arch.st.steal.preempted));
|
|
||||||
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|
@ -1,82 +0,0 @@
|
|||||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Date: Fri, 31 Jan 2020 08:06:44 -0300
|
|
||||||
Subject: [PATCH] x86/KVM: Clean up host's steal time structure
|
|
||||||
|
|
||||||
CVE-2019-3016
|
|
||||||
CVE-2020-3016
|
|
||||||
|
|
||||||
Now that we are mapping kvm_steal_time from the guest directly we
|
|
||||||
don't need keep a copy of it in kvm_vcpu_arch.st. The same is true
|
|
||||||
for the stime field.
|
|
||||||
|
|
||||||
This is part of CVE-2019-3016.
|
|
||||||
|
|
||||||
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
|
|
||||||
Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
|
|
||||||
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
|
|
||||||
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
|
|
||||||
---
|
|
||||||
arch/x86/include/asm/kvm_host.h | 3 +--
|
|
||||||
arch/x86/kvm/x86.c | 11 +++--------
|
|
||||||
2 files changed, 4 insertions(+), 10 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
|
|
||||||
index 7c06343614a4..f62f4ff5f4f4 100644
|
|
||||||
--- a/arch/x86/include/asm/kvm_host.h
|
|
||||||
+++ b/arch/x86/include/asm/kvm_host.h
|
|
||||||
@@ -674,10 +674,9 @@ struct kvm_vcpu_arch {
|
|
||||||
bool pvclock_set_guest_stopped_request;
|
|
||||||
|
|
||||||
struct {
|
|
||||||
+ u8 preempted;
|
|
||||||
u64 msr_val;
|
|
||||||
u64 last_steal;
|
|
||||||
- struct gfn_to_hva_cache stime;
|
|
||||||
- struct kvm_steal_time steal;
|
|
||||||
struct gfn_to_pfn_cache cache;
|
|
||||||
} st;
|
|
||||||
|
|
||||||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
|
|
||||||
index f63fa5846f08..6ce9ace8a801 100644
|
|
||||||
--- a/arch/x86/kvm/x86.c
|
|
||||||
+++ b/arch/x86/kvm/x86.c
|
|
||||||
@@ -2509,7 +2509,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
|
|
||||||
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
|
|
||||||
kvm_vcpu_flush_tlb(vcpu, false);
|
|
||||||
|
|
||||||
- vcpu->arch.st.steal.preempted = 0;
|
|
||||||
+ vcpu->arch.st.preempted = 0;
|
|
||||||
|
|
||||||
if (st->version & 1)
|
|
||||||
st->version += 1; /* first time write, random junk */
|
|
||||||
@@ -2682,11 +2682,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
||||||
if (data & KVM_STEAL_RESERVED_MASK)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
|
|
||||||
- data & KVM_STEAL_VALID_BITS,
|
|
||||||
- sizeof(struct kvm_steal_time)))
|
|
||||||
- return 1;
|
|
||||||
-
|
|
||||||
vcpu->arch.st.msr_val = data;
|
|
||||||
|
|
||||||
if (!(data & KVM_MSR_ENABLED))
|
|
||||||
@@ -3404,7 +3399,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|
||||||
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
||||||
return;
|
|
||||||
|
|
||||||
- if (vcpu->arch.st.steal.preempted)
|
|
||||||
+ if (vcpu->arch.st.preempted)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
|
|
||||||
@@ -3414,7 +3409,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
|
|
||||||
st = map.hva +
|
|
||||||
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
|
|
||||||
|
|
||||||
- st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
|
|
||||||
+ st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
|
|
||||||
|
|
||||||
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user