Commit 5656374b authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge branch 'gpc-fixes' of git://git.infradead.org/users/dwmw2/linux into HEAD



Pull Xen-for-KVM changes from David Woodhouse:

* add support for 32-bit guests in SCHEDOP_poll

* the rest of the gfn-to-pfn cache API cleanup

"I still haven't reinstated the last of those patches to make gpc->len
immutable."

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parents 74bee0ca 06e155c4
Loading
Loading
Loading
Loading
+8 −12
Original line number Diff line number Diff line
@@ -2311,13 +2311,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
	kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);

	/* we verify if the enable bit is set... */
	if (system_time & 1) {
		kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
				 KVM_HOST_USES_PFN, system_time & ~1ULL,
	if (system_time & 1)
		kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
				 sizeof(struct pvclock_vcpu_time_info));
	} else {
		kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
	}
	else
		kvm_gpc_deactivate(&vcpu->arch.pv_time);

	return;
}
@@ -3047,12 +3045,10 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
	unsigned long flags;

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      offset + sizeof(*guest_hv_clock))) {
	while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
				    offset + sizeof(*guest_hv_clock)))
		if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
			return;

		read_lock_irqsave(&gpc->lock, flags);
@@ -3401,7 +3397,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)

static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
	kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
	kvm_gpc_deactivate(&vcpu->arch.pv_time);
	vcpu->arch.time = 0;
}

@@ -11559,7 +11555,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
	vcpu->arch.regs_avail = ~0;
	vcpu->arch.regs_dirty = ~0;

	kvm_gpc_init(&vcpu->arch.pv_time);
	kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);

	if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+70 −54
Original line number Diff line number Diff line
@@ -42,13 +42,12 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
	int idx = srcu_read_lock(&kvm->srcu);

	if (gfn == GPA_INVALID) {
		kvm_gpc_deactivate(kvm, gpc);
		kvm_gpc_deactivate(gpc);
		goto out;
	}

	do {
		ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa,
				       PAGE_SIZE);
		ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE);
		if (ret)
			goto out;

@@ -273,14 +272,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
	 * gfn_to_pfn caches that cover the region.
	 */
	read_lock_irqsave(&gpc1->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
	while (!kvm_gpc_check(gpc1, user_len1)) {
		read_unlock_irqrestore(&gpc1->lock, flags);

		/* When invoked from kvm_sched_out() we cannot sleep */
		if (atomic)
			return;

		if (kvm_gpc_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
		if (kvm_gpc_refresh(gpc1, user_len1))
			return;

		read_lock_irqsave(&gpc1->lock, flags);
@@ -309,7 +308,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
		 */
		read_lock(&gpc2->lock);

		if (!kvm_gpc_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
		if (!kvm_gpc_check(gpc2, user_len2)) {
			read_unlock(&gpc2->lock);
			read_unlock_irqrestore(&gpc1->lock, flags);

@@ -323,8 +322,8 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
			 * to the second page now because the guest changed to
			 * 64-bit mode, the second GPC won't have been set up.
			 */
			if (kvm_gpc_activate(v->kvm, gpc2, NULL, KVM_HOST_USES_PFN,
					     gpc1->gpa + user_len1, user_len2))
			if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1,
					     user_len2))
				return;

			/*
@@ -489,12 +488,10 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
	 * little more honest about it.
	 */
	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      sizeof(struct vcpu_info))) {
	while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
				    sizeof(struct vcpu_info)))
		if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info)))
			return;

		read_lock_irqsave(&gpc->lock, flags);
@@ -554,8 +551,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
		     sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      sizeof(struct vcpu_info))) {
	while (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		/*
@@ -569,8 +565,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
		if (in_atomic() || !task_is_running(current))
			return 1;

		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
				    sizeof(struct vcpu_info))) {
		if (kvm_gpc_refresh(gpc, sizeof(struct vcpu_info))) {
			/*
			 * If this failed, userspace has screwed up the
			 * vcpu_info mapping. No interrupts for you.
@@ -711,15 +706,13 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
			     offsetof(struct compat_vcpu_info, time));

		if (data->u.gpa == GPA_INVALID) {
			kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
			kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
			r = 0;
			break;
		}

		r = kvm_gpc_activate(vcpu->kvm,
				     &vcpu->arch.xen.vcpu_info_cache, NULL,
				     KVM_HOST_USES_PFN, data->u.gpa,
				     sizeof(struct vcpu_info));
		r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache,
				     data->u.gpa, sizeof(struct vcpu_info));
		if (!r)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

@@ -727,15 +720,13 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)

	case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
		if (data->u.gpa == GPA_INVALID) {
			kvm_gpc_deactivate(vcpu->kvm,
					   &vcpu->arch.xen.vcpu_time_info_cache);
			kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
			r = 0;
			break;
		}

		r = kvm_gpc_activate(vcpu->kvm,
				     &vcpu->arch.xen.vcpu_time_info_cache,
				     NULL, KVM_HOST_USES_PFN, data->u.gpa,
		r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache,
				     data->u.gpa,
				     sizeof(struct pvclock_vcpu_time_info));
		if (!r)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -751,10 +742,8 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
		if (data->u.gpa == GPA_INVALID) {
			r = 0;
		deactivate_out:
			kvm_gpc_deactivate(vcpu->kvm,
					   &vcpu->arch.xen.runstate_cache);
			kvm_gpc_deactivate(vcpu->kvm,
					   &vcpu->arch.xen.runstate2_cache);
			kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
			kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
			break;
		}

@@ -770,20 +759,18 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)

		/* How much fits in the (first) page? */
		sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK);
		r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache,
				     NULL, KVM_HOST_USES_PFN, data->u.gpa, sz1);
		r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache,
				     data->u.gpa, sz1);
		if (r)
			goto deactivate_out;

		/* Either map the second page, or deactivate the second GPC */
		if (sz1 >= sz) {
			kvm_gpc_deactivate(vcpu->kvm,
					   &vcpu->arch.xen.runstate2_cache);
			kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
		} else {
			sz2 = sz - sz1;
			BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK);
			r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate2_cache,
					     NULL, KVM_HOST_USES_PFN,
			r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache,
					     data->u.gpa + sz1, sz2);
			if (r)
				goto deactivate_out;
@@ -1167,7 +1154,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,

	idx = srcu_read_lock(&kvm->srcu);
	read_lock_irqsave(&gpc->lock, flags);
	if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
	if (!kvm_gpc_check(gpc, PAGE_SIZE))
		goto out_rcu;

	ret = false;
@@ -1201,19 +1188,44 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
	evtchn_port_t port, *ports;
	gpa_t gpa;

	if (!longmode || !lapic_in_kernel(vcpu) ||
	if (!lapic_in_kernel(vcpu) ||
	    !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
		return false;

	idx = srcu_read_lock(&vcpu->kvm->srcu);
	gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
	if (!gpa) {
		*r = -EFAULT;
		return true;
	}

	if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
		struct compat_sched_poll sp32;

	if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
		/* Sanity check that the compat struct definition is correct */
		BUILD_BUG_ON(sizeof(sp32) != 16);

		if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) {
			*r = -EFAULT;
			return true;
		}

		/*
		 * This is a 32-bit pointer to an array of evtchn_port_t which
		 * are uint32_t, so once it's converted no further compat
		 * handling is needed.
		 */
		sched_poll.ports = (void *)(unsigned long)(sp32.ports);
		sched_poll.nr_ports = sp32.nr_ports;
		sched_poll.timeout = sp32.timeout;
	} else {
		if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
					sizeof(sched_poll))) {
			*r = -EFAULT;
			return true;
		}
	}

	if (unlikely(sched_poll.nr_ports > 1)) {
		/* Xen (unofficially) limits number of pollers to 128 */
@@ -1564,7 +1576,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
	idx = srcu_read_lock(&kvm->srcu);

	read_lock_irqsave(&gpc->lock, flags);
	if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
	if (!kvm_gpc_check(gpc, PAGE_SIZE))
		goto out_rcu;

	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1598,7 +1610,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
		gpc = &vcpu->arch.xen.vcpu_info_cache;

		read_lock_irqsave(&gpc->lock, flags);
		if (!kvm_gpc_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
		if (!kvm_gpc_check(gpc, sizeof(struct vcpu_info))) {
			/*
			 * Could not access the vcpu_info. Set the bit in-kernel
			 * and prod the vCPU to deliver it for itself.
@@ -1696,7 +1708,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
			break;

		idx = srcu_read_lock(&kvm->srcu);
		rc = kvm_gpc_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
		rc = kvm_gpc_refresh(gpc, PAGE_SIZE);
		srcu_read_unlock(&kvm->srcu, idx);
	} while(!rc);

@@ -2026,10 +2038,14 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)

	timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);

	kvm_gpc_init(&vcpu->arch.xen.runstate_cache);
	kvm_gpc_init(&vcpu->arch.xen.runstate2_cache);
	kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache);
	kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache);
	kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
	kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
	kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
	kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm, NULL,
		     KVM_HOST_USES_PFN);
}

void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
@@ -2037,10 +2053,10 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
	if (kvm_xen_timer_enabled(vcpu))
		kvm_xen_stop_timer(vcpu);

	kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache);
	kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate2_cache);
	kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
	kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
	kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);

	del_timer_sync(&vcpu->arch.xen.poll_timer);
}
@@ -2048,7 +2064,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
void kvm_xen_init_vm(struct kvm *kvm)
{
	idr_init(&kvm->arch.xen.evtchn_ports);
	kvm_gpc_init(&kvm->arch.xen.shinfo_cache);
	kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
}

void kvm_xen_destroy_vm(struct kvm *kvm)
@@ -2056,7 +2072,7 @@ void kvm_xen_destroy_vm(struct kvm *kvm)
	struct evtchnfd *evtchnfd;
	int i;

	kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache);
	kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache);

	idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
		if (!evtchnfd->deliver.port.port)
+7 −0
Original line number Diff line number Diff line
@@ -207,4 +207,11 @@ struct compat_vcpu_runstate_info {
    uint64_t time[4];
} __attribute__((packed));

struct compat_sched_poll {
	/* This is actually a guest virtual address which points to ports. */
	uint32_t ports;
	unsigned int nr_ports;
	uint64_t timeout;
};

#endif /* __ARCH_X86_KVM_XEN_H__ */
+22 −41
Original line number Diff line number Diff line
@@ -1260,18 +1260,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 * kvm_gpc_init - initialize gfn_to_pfn_cache.
 *
 * @gpc:	   struct gfn_to_pfn_cache object.
 *
 * This sets up a gfn_to_pfn_cache by initializing locks.  Note, the cache must
 * be zero-allocated (or zeroed by the caller before init).
 */
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);

/**
 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
 *                    physical address.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 * @vcpu:	   vCPU to be used for marking pages dirty and to be woken on
 *		   invalidation.
 * @usage:	   indicates if the resulting host physical PFN is used while
@@ -1280,6 +1269,19 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
 *		   changes!---will also force @vcpu to exit the guest and
 *		   refresh the cache); and/or if the PFN used directly
 *		   by KVM (and thus needs a kernel virtual mapping).
 *
 * This sets up a gfn_to_pfn_cache by initializing locks and assigning the
 * immutable attributes.  Note, the cache must be zero-allocated (or zeroed by
 * the caller before init).
 */
void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
		  struct kvm_vcpu *vcpu, enum pfn_cache_usage usage);

/**
 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
 *                    physical address.
 *
 * @gpc:	   struct gfn_to_pfn_cache object.
 * @gpa:	   guest physical address to map.
 * @len:	   sanity check; the range being access must fit a single page.
 *
@@ -1287,20 +1289,16 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
 *		   -EINVAL for a mapping which would cross a page boundary.
 *		   -EFAULT for an untranslatable guest physical address.
 *
 * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for
 * invalidations to be processed.  Callers are required to use kvm_gpc_check()
 * to ensure that the cache is valid before accessing the target page.
 */
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
		     struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
		     gpa_t gpa, unsigned long len);
int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);

/**
 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 * @gpa:	   current guest physical address to map.
 * @len:	   sanity check; the range being access must fit a single page.
 *
 * @return:	   %true if the cache is still valid and the address matches.
@@ -1313,15 +1311,12 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 * Callers in IN_GUEST_MODE may do so without locking, although they should
 * still hold a read lock on kvm->scru for the memslot checks.
 */
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		   unsigned long len);
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len);

/**
 * kvm_gpc_refresh - update a previously initialized cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 * @gpa:	   updated guest physical address to map.
 * @len:	   sanity check; the range being access must fit a single page.
 *
 * @return:	   0 for success.
@@ -1329,36 +1324,22 @@ bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
 *		   -EFAULT for an untranslatable guest physical address.
 *
 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
 * returm from this function does not mean the page can be immediately
 * return from this function does not mean the page can be immediately
 * accessed because it may have raced with an invalidation. Callers must
 * still lock and check the cache status, as this function does not return
 * with the lock still held to permit access.
 */
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		    unsigned long len);

/**
 * kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 *
 * This unmaps the referenced page. The cache is left in the invalid state
 * but at least the mapping from GPA to userspace HVA will remain cached
 * and can be reused on a subsequent refresh.
 */
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);

/**
 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 *
 * This removes a cache from the @kvm's list to be processed on MMU notifier
 * This removes a cache from the VM's list to be processed on MMU notifier
 * invocation.
 */
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);

void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+1 −0
Original line number Diff line number Diff line
@@ -67,6 +67,7 @@ struct gfn_to_pfn_cache {
	gpa_t gpa;
	unsigned long uhva;
	struct kvm_memory_slot *memslot;
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	struct list_head list;
	rwlock_t lock;
Loading