Loading arch/x86/kvm/mmu.c +7 −10 Original line number Diff line number Diff line Loading @@ -2388,16 +2388,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, } static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, int write_fault, int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) unsigned pte_access, int write_fault, int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n", __func__, *sptep, pt_access, write_fault, gfn); pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); if (is_rmap_spte(*sptep)) { /* Loading Loading @@ -2513,7 +2512,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL, mmu_set_spte(vcpu, start, access, 0, NULL, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); Loading Loading @@ -2574,9 +2573,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { unsigned pte_access = ACC_ALL; mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, write, &emulate, level, gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); Loading arch/x86/kvm/paging_tmpl.h +4 −5 Original line number Diff line number Diff line Loading @@ -326,8 +326,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * we call mmu_set_spte() with host_writable = true because * pte_prefetch_gfn_to_pfn always gets a writable pfn. */ mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); return true; } Loading Loading @@ -470,9 +470,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, } clear_sp_write_flooding_count(it.sptep); mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, write_fault, &emulate, it.level, gw->gfn, pfn, prefault, map_writable); mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate, it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); return emulate; Loading Loading
arch/x86/kvm/mmu.c +7 −10 Original line number Diff line number Diff line Loading @@ -2388,16 +2388,15 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, } static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, int write_fault, int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) unsigned pte_access, int write_fault, int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) { int was_rmapped = 0; int rmap_count; pgprintk("%s: spte %llx access %x write_fault %d gfn %llx\n", __func__, *sptep, pt_access, write_fault, gfn); pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, *sptep, write_fault, gfn); if (is_rmap_spte(*sptep)) { /* Loading Loading @@ -2513,7 +2512,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, return -1; for (i = 0; i < ret; i++, gfn++, start++) mmu_set_spte(vcpu, start, ACC_ALL, access, 0, NULL, mmu_set_spte(vcpu, start, access, 0, NULL, sp->role.level, gfn, page_to_pfn(pages[i]), true, true); Loading Loading @@ -2574,9 +2573,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { if (iterator.level == level) { unsigned pte_access = ACC_ALL; mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access, mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, write, &emulate, level, gfn, pfn, prefault, map_writable); direct_pte_prefetch(vcpu, iterator.sptep); Loading
arch/x86/kvm/paging_tmpl.h +4 −5 Original line number Diff line number Diff line Loading @@ -326,8 +326,8 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, * we call mmu_set_spte() with host_writable = true because * pte_prefetch_gfn_to_pfn always gets a writable pfn. */ mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL, gfn, pfn, true, true); return true; } Loading Loading @@ -470,9 +470,8 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, } clear_sp_write_flooding_count(it.sptep); mmu_set_spte(vcpu, it.sptep, access, gw->pte_access, write_fault, &emulate, it.level, gw->gfn, pfn, prefault, map_writable); mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate, it.level, gw->gfn, pfn, prefault, map_writable); FNAME(pte_prefetch)(vcpu, gw, it.sptep); return emulate; Loading