Loading arch/x86/include/asm/kvm_host.h +1 −0 Original line number Diff line number Diff line Loading @@ -54,6 +54,7 @@ #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) #define KVM_REQ_LOAD_CR3 KVM_ARCH_REQ(5) #define KVM_REQ_EVENT KVM_ARCH_REQ(6) #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) Loading arch/x86/kvm/mmu.c +1 −2 Original line number Diff line number Diff line Loading @@ -4847,8 +4847,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) kvm_mmu_sync_roots(vcpu); if (r) goto out; /* set_cr3() should ensure TLB has been flushed */ vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); kvm_mmu_load_cr3(vcpu); out: return r; } Loading arch/x86/kvm/mmu.h +7 −0 Original line number Diff line number Diff line Loading @@ -85,6 +85,13 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) return kvm_mmu_load(vcpu); } static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) { /* set_cr3() should ensure TLB has been flushed */ if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); } /* * Currently, we have two sorts of write-protection, a) the first one * write-protects guest page to sync the guest modification, b) another one is Loading arch/x86/kvm/x86.c +2 −0 Original line number Diff line number Diff line Loading @@ -7332,6 +7332,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu)) kvm_mmu_load_cr3(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_vcpu_flush_tlb(vcpu, true); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { Loading Loading
arch/x86/include/asm/kvm_host.h +1 −0 Original line number Diff line number Diff line Loading @@ -54,6 +54,7 @@ #define KVM_REQ_TRIPLE_FAULT KVM_ARCH_REQ(2) #define KVM_REQ_MMU_SYNC KVM_ARCH_REQ(3) #define KVM_REQ_CLOCK_UPDATE KVM_ARCH_REQ(4) #define KVM_REQ_LOAD_CR3 KVM_ARCH_REQ(5) #define KVM_REQ_EVENT KVM_ARCH_REQ(6) #define KVM_REQ_APF_HALT KVM_ARCH_REQ(7) #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(8) Loading
arch/x86/kvm/mmu.c +1 −2 Original line number Diff line number Diff line Loading @@ -4847,8 +4847,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) kvm_mmu_sync_roots(vcpu); if (r) goto out; /* set_cr3() should ensure TLB has been flushed */ vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); kvm_mmu_load_cr3(vcpu); out: return r; } Loading
arch/x86/kvm/mmu.h +7 −0 Original line number Diff line number Diff line Loading @@ -85,6 +85,13 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) return kvm_mmu_load(vcpu); } static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu) { /* set_cr3() should ensure TLB has been flushed */ if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa); } /* * Currently, we have two sorts of write-protection, a) the first one * write-protects guest page to sync the guest modification, b) another one is Loading
arch/x86/kvm/x86.c +2 −0 Original line number Diff line number Diff line Loading @@ -7332,6 +7332,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) kvm_mmu_sync_roots(vcpu); if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu)) kvm_mmu_load_cr3(vcpu); if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) kvm_vcpu_flush_tlb(vcpu, true); if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { Loading