]> git.itanic.dy.fi Git - linux-stable/commitdiff
KVM: x86/mmu: Fold rmap_recycle into rmap_add
authorDavid Matlack <dmatlack@google.com>
Fri, 13 Aug 2021 20:35:00 +0000 (20:35 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Sep 2022 09:11:54 +0000 (11:11 +0200)
[ Upstream commit 68be1306caea8948738cab04014ca4506b590d38 ]

Consolidate rmap_recycle and rmap_add into a single function since they
are only ever called together (and only from one place). This has a nice
side effect of eliminating an extra kvm_vcpu_gfn_to_memslot(). In
addition it makes mmu_set_spte(), which is a very long function, a
little shorter.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20210813203504.2742757-3-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Stable-dep-of: 604f533262ae ("KVM: x86/mmu: add missing update to max_mmu_rmap_size")
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/x86/kvm/mmu/mmu.c

index f267cca9fe09421f2c284540fd4b8662b806cc62..ba1749a770eb1ef1e8a7a65a97ce6dc7e8e0596e 100644 (file)
@@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
        return kvm_mmu_memory_cache_nr_free_objects(mc);
 }
 
-static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
-{
-       struct kvm_memory_slot *slot;
-       struct kvm_mmu_page *sp;
-       struct kvm_rmap_head *rmap_head;
-
-       sp = sptep_to_sp(spte);
-       kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
-       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
-       return pte_list_add(vcpu, spte, rmap_head);
-}
-
-
 static void rmap_remove(struct kvm *kvm, u64 *spte)
 {
        struct kvm_memslots *slots;
@@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
 
        /*
-        * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
-        * context of a vCPU so have to determine which memslots to use based
-        * on context information in sp->role.
+        * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
+        * so we have to determine which memslots to use based on context
+        * information in sp->role.
         */
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
 
@@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
 
 #define RMAP_RECYCLE_THRESHOLD 1000
 
-static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 {
        struct kvm_memory_slot *slot;
-       struct kvm_rmap_head *rmap_head;
        struct kvm_mmu_page *sp;
+       struct kvm_rmap_head *rmap_head;
+       int rmap_count;
 
        sp = sptep_to_sp(spte);
+       kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
        slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_count = pte_list_add(vcpu, spte, rmap_head);
 
-       kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
-       kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
-                       KVM_PAGES_PER_HPAGE(sp->role.level));
+       if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
+               kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
+               kvm_flush_remote_tlbs_with_address(
+                               vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
+       }
 }
 
 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -2718,7 +2709,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        bool host_writable)
 {
        int was_rmapped = 0;
-       int rmap_count;
        int set_spte_ret;
        int ret = RET_PF_FIXED;
        bool flush = false;
@@ -2778,9 +2768,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
        if (!was_rmapped) {
                kvm_update_page_stats(vcpu->kvm, level, 1);
-               rmap_count = rmap_add(vcpu, sptep, gfn);
-               if (rmap_count > RMAP_RECYCLE_THRESHOLD)
-                       rmap_recycle(vcpu, sptep, gfn);
+               rmap_add(vcpu, sptep, gfn);
        }
 
        return ret;