]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: x86: handle invalid root_hpa everywhere
authorMarcelo Tosatti <mtosatti@redhat.com>
Fri, 3 Jan 2014 19:09:32 +0000 (17:09 -0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Mar 2014 16:58:14 +0000 (09:58 -0700)
commit 37f6a4e237303549c8676dfe1fd1991ceab512eb upstream.

Rom Freiman <rom@stratoscale.com> notes other code paths vulnerable to
bug fixed by 989c6b34f6a9480e397b.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Josh Boyer <jwboyer@fedoraproject.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index 9a4605454afa321bc42c78454129d9e5abb48cbd..711c649f80b7eef961b910f20d98b6a5a2f34736 100644 (file)
@@ -2751,6 +2751,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
        bool ret = false;
        u64 spte = 0ull;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return false;
+
        if (!page_fault_can_be_fast(vcpu, error_code))
                return false;
 
@@ -3142,6 +3145,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
        struct kvm_shadow_walk_iterator iterator;
        u64 spte = 0ull;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return spte;
+
        walk_shadow_page_lockless_begin(vcpu);
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
                if (!is_shadow_present_pte(spte))
@@ -4332,6 +4338,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
        u64 spte;
        int nr_sptes = 0;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               return nr_sptes;
+
        walk_shadow_page_lockless_begin(vcpu);
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
                sptes[iterator.level-1] = spte;
index da20860b457a4c33bc7c17d6cece102b8b8f7216..7e6090e13237326513c07e45e7f038bd4a53bf6f 100644 (file)
@@ -423,6 +423,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        if (FNAME(gpte_changed)(vcpu, gw, top_level))
                goto out_gpte_changed;
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+               goto out_gpte_changed;
+
        for (shadow_walk_init(&it, vcpu, addr);
             shadow_walk_okay(&it) && it.level > gw->level;
             shadow_walk_next(&it)) {
@@ -671,6 +674,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
         */
        mmu_topup_memory_caches(vcpu);
 
+       if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+               WARN_ON(1);
+               return;
+       }
+
        spin_lock(&vcpu->kvm->mmu_lock);
        for_each_shadow_entry(vcpu, gva, iterator) {
                level = iterator.level;