Commit 0564eeb7 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge branch 'kvm-bugfixes' into HEAD

Merge bugfixes from 5.17 before merging more tricky work.
parents b652de1e 8d25b7be
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1394,7 +1394,7 @@ documentation when it pops into existence).
-------------------

:Capability: KVM_CAP_ENABLE_CAP
:Architectures: mips, ppc, s390
:Architectures: mips, ppc, s390, x86
:Type: vcpu ioctl
:Parameters: struct kvm_enable_cap (in)
:Returns: 0 on success; -1 on error
+1 −2
Original line number Diff line number Diff line
@@ -46,8 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
	 * specification (ARM DEN 0022A). This means all suspend states
	 * for KVM will preserve the register state.
	 */
	kvm_vcpu_halt(vcpu);
	kvm_clear_request(KVM_REQ_UNHALT, vcpu);
	kvm_vcpu_wfi(vcpu);

	return PSCI_RET_SUCCESS;
}
+0 −1
Original line number Diff line number Diff line
@@ -704,7 +704,6 @@ struct kvm_vcpu_arch {
	struct fpu_guest guest_fpu;

	u64 xcr0;
	u64 guest_supported_xcr0;

	struct kvm_pio_request pio;
	void *pio_data;
+4 −1
Original line number Diff line number Diff line
@@ -1558,7 +1558,10 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
		fpregs_restore_userregs();

	newfps->xfeatures = curfps->xfeatures | xfeatures;

	if (!guest_fpu)
		newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;

	newfps->xfd = curfps->xfd & ~xfeatures;

	/* Do the final updates within the locked region */
+9 −4
Original line number Diff line number Diff line
@@ -462,19 +462,24 @@ static bool pv_tlb_flush_supported(void)
{
	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
		!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
		!boot_cpu_has(X86_FEATURE_MWAIT) &&
		(num_possible_cpus() != 1));
}

static bool pv_ipi_supported(void)
{
	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
	return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
	       (num_possible_cpus() != 1));
}

static bool pv_sched_yield_supported(void)
{
	return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
		!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
	    !boot_cpu_has(X86_FEATURE_MWAIT) &&
	    (num_possible_cpus() != 1));
}

#define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
@@ -619,7 +624,7 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)

	/* Make sure other vCPUs get a chance to run if they need to. */
	for_each_cpu(cpu, mask) {
		if (vcpu_is_preempted(cpu)) {
		if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
			break;
		}
Loading