Loading arch/x86/include/asm/kvm_host.h +1 −1 Original line number Diff line number Diff line Loading @@ -250,7 +250,7 @@ struct kvm_mmu { void (*prefetch_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page); int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync); struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); hpa_t root_hpa; int root_level; Loading arch/x86/kvm/mmu.c +4 −4 Original line number Diff line number Diff line Loading @@ -1156,7 +1156,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, } static int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync) struct kvm_mmu_page *sp) { return 1; } Loading Loading @@ -1286,7 +1286,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (clear_unsync) kvm_unlink_unsync_page(vcpu->kvm, sp); if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) { if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; } Loading Loading @@ -1327,12 +1327,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); kvm_unlink_unsync_page(vcpu->kvm, s); if ((s->role.cr4_pae != !!is_pae(vcpu)) || (vcpu->arch.mmu.sync_page(vcpu, s, true))) { (vcpu->arch.mmu.sync_page(vcpu, s))) { kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); continue; } kvm_unlink_unsync_page(vcpu->kvm, s); flush = true; } Loading arch/x86/kvm/paging_tmpl.h +2 −3 Original line number Diff line number Diff line Loading @@ -740,8 +740,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, * - The spte has a reference to the struct page, so the pfn for a given gfn * can't change unless all sptes pointing to it are nuked first. */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int i, offset, nr_present; bool host_writable; Loading Loading @@ -781,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 nonpresent; if (rsvd_bits_set || is_present_gpte(gpte) || !clear_unsync) sp->unsync) nonpresent = shadow_trap_nonpresent_pte; else nonpresent = shadow_notrap_nonpresent_pte; Loading Loading
arch/x86/include/asm/kvm_host.h +1 −1 Original line number Diff line number Diff line Loading @@ -250,7 +250,7 @@ struct kvm_mmu { void (*prefetch_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page); int (*sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync); struct kvm_mmu_page *sp); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); hpa_t root_hpa; int root_level; Loading
arch/x86/kvm/mmu.c +4 −4 Original line number Diff line number Diff line Loading @@ -1156,7 +1156,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, } static int nonpaging_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync) struct kvm_mmu_page *sp) { return 1; } Loading Loading @@ -1286,7 +1286,7 @@ static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, if (clear_unsync) kvm_unlink_unsync_page(vcpu->kvm, sp); if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) { if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); return 1; } Loading Loading @@ -1327,12 +1327,12 @@ static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) continue; WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); kvm_unlink_unsync_page(vcpu->kvm, s); if ((s->role.cr4_pae != !!is_pae(vcpu)) || (vcpu->arch.mmu.sync_page(vcpu, s, true))) { (vcpu->arch.mmu.sync_page(vcpu, s))) { kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list); continue; } kvm_unlink_unsync_page(vcpu->kvm, s); flush = true; } Loading
arch/x86/kvm/paging_tmpl.h +2 −3 Original line number Diff line number Diff line Loading @@ -740,8 +740,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, * - The spte has a reference to the struct page, so the pfn for a given gfn * can't change unless all sptes pointing to it are nuked first. */ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, bool clear_unsync) static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int i, offset, nr_present; bool host_writable; Loading Loading @@ -781,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 nonpresent; if (rsvd_bits_set || is_present_gpte(gpte) || !clear_unsync) sp->unsync) nonpresent = shadow_trap_nonpresent_pte; else nonpresent = shadow_notrap_nonpresent_pte; Loading