Loading arch/x86/include/asm/kvm_host.h +1 −1 Original line number Diff line number Diff line Loading @@ -611,7 +611,7 @@ struct kvm_vcpu_arch { * Paging state of an L2 guest (used for nested npt) * * This context will save all necessary information to walk page tables * of the an L2 guest. This context is only initialized for page table * of an L2 guest. This context is only initialized for page table * walking and not for faulting since we never handle l2 page faults on * the host. */ Loading arch/x86/kvm/vmx/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -1914,7 +1914,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } /* * Writes msr value into into the appropriate "register". * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ Loading virt/kvm/kvm_main.c +1 −1 Original line number Diff line number Diff line Loading @@ -1519,7 +1519,7 @@ static inline int check_user_page_hwpoison(unsigned long addr) /* * The fast path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. It's also the * only part that runs if we can are in atomic context. * only part that runs if we can in atomic context. */ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *pfn) Loading Loading
arch/x86/include/asm/kvm_host.h +1 −1 Original line number Diff line number Diff line Loading @@ -611,7 +611,7 @@ struct kvm_vcpu_arch { * Paging state of an L2 guest (used for nested npt) * * This context will save all necessary information to walk page tables * of the an L2 guest. This context is only initialized for page table * of an L2 guest. This context is only initialized for page table * walking and not for faulting since we never handle l2 page faults on * the host. */ Loading
arch/x86/kvm/vmx/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -1914,7 +1914,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) } /* * Writes msr value into into the appropriate "register". * Writes msr value into the appropriate "register". * Returns 0 on success, non-0 otherwise. * Assumes vcpu_load() was already called. */ Loading
virt/kvm/kvm_main.c +1 −1 Original line number Diff line number Diff line Loading @@ -1519,7 +1519,7 @@ static inline int check_user_page_hwpoison(unsigned long addr) /* * The fast path to get the writable pfn which will be stored in @pfn, * true indicates success, otherwise false is returned. It's also the * only part that runs if we can are in atomic context. * only part that runs if we can in atomic context. */ static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *pfn) Loading