Commit c8b88b33 authored by Peter Xu's avatar Peter Xu Committed by Paolo Bonzini
Browse files

kvm: Add interruptible flag to __gfn_to_pfn_memslot()



Add a new "interruptible" flag showing that the caller is willing to be
interrupted by signals during the __gfn_to_pfn_memslot() request.  Wire it
up with a FOLL_INTERRUPTIBLE flag that we've just introduced.

This prepares KVM to be able to respond to SIGUSR1 (for QEMU that's the
SIGIPI) even during e.g. handling an userfaultfd page fault.

No functional change intended.

Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20221011195809.557016-4-peterx@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fe5ed56c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1239,7 +1239,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
	 */
	smp_rmb();

	pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
	pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
				   write_fault, &writable, NULL);
	if (pfn == KVM_PFN_ERR_HWPOISON) {
		kvm_send_hwpoison_signal(hva, vma_shift);
+1 −1
Original line number Diff line number Diff line
@@ -598,7 +598,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
		write_ok = true;
	} else {
		/* Call KVM generic code to do the slow-path check */
		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
					   writing, &write_ok, NULL);
		if (is_error_noslot_pfn(pfn))
			return -EFAULT;
+1 −1
Original line number Diff line number Diff line
@@ -846,7 +846,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
		unsigned long pfn;

		/* Call KVM generic code to do the slow-path check */
		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
		pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
					   writing, upgrade_p, NULL);
		if (is_error_noslot_pfn(pfn))
			return -EFAULT;
+2 −2
Original line number Diff line number Diff line
@@ -4170,7 +4170,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
	}

	async = false;
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, &async,
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
					  fault->write, &fault->map_writable,
					  &fault->hva);
	if (!async)
@@ -4187,7 +4187,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
		}
	}

	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, NULL,
					  fault->write, &fault->map_writable,
					  &fault->hva);
	return RET_PF_CONTINUE;
+2 −2
Original line number Diff line number Diff line
@@ -1150,8 +1150,8 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
			       bool atomic, bool *async, bool write_fault,
			       bool *writable, hva_t *hva);
			       bool atomic, bool interruptible, bool *async,
			       bool write_fault, bool *writable, hva_t *hva);

void kvm_release_pfn_clean(kvm_pfn_t pfn);
void kvm_release_pfn_dirty(kvm_pfn_t pfn);
Loading