Commit 193015ad authored by Cathy Avery's avatar Cathy Avery Committed by Paolo Bonzini
Browse files

KVM: nSVM: Track the ASID generation of the vmcb vmrun through the vmcb



This patch moves the asid_generation from the vcpu to the vmcb
in order to track the ASID generation that was active the last
time the vmcb was run. If sd->asid_generation changes between
two runs, the old ASID is invalid and must be changed.

Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarCathy Avery <cavery@redhat.com>
Message-Id: <20210112164313.4204-3-cavery@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent af18fa77
Loading
Loading
Loading
Loading
+7 −14
Original line number Diff line number Diff line
@@ -1229,7 +1229,7 @@ static void init_vmcb(struct vcpu_svm *svm)
		save->cr3 = 0;
		save->cr4 = 0;
	}
	svm->asid_generation = 0;
	svm->current_vmcb->asid_generation = 0;
	svm->asid = 0;

	svm->nested.vmcb12_gpa = 0;
@@ -1311,13 +1311,6 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
	svm->vmcb = target_vmcb->ptr;
	svm->vmcb_pa = target_vmcb->pa;

	/*
	* Workaround: we don't yet track the ASID generation
	* that was active the last time target_vmcb was run.
	*/

	svm->asid_generation = 0;

	/*
	* Track the physical CPU the target_vmcb is running on
	* in order to mark the VMCB dirty if the cpu changes at
@@ -1384,7 +1377,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
	if (vmsa_page)
		svm->vmsa = page_address(vmsa_page);

	svm->asid_generation = 0;
	svm->guest_state_loaded = false;

	svm_switch_vmcb(svm, &svm->vmcb01);
@@ -1866,7 +1858,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
		vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
	}

	svm->asid_generation = sd->asid_generation;
	svm->current_vmcb->asid_generation = sd->asid_generation;
	svm->asid = sd->next_asid++;
}

@@ -3434,10 +3426,11 @@ static void pre_svm_run(struct vcpu_svm *svm)
	/*
	 * If the previous vmrun of the vmcb occurred on
	 * a different physical cpu then we must mark the vmcb dirty.
	 * and assign a new asid.
	*/

        if (unlikely(svm->current_vmcb->cpu != svm->vcpu.cpu)) {
		svm->asid_generation = 0;
		svm->current_vmcb->asid_generation = 0;
		vmcb_mark_all_dirty(svm->vmcb);
		svm->current_vmcb->cpu = svm->vcpu.cpu;
        }
@@ -3446,7 +3439,7 @@ static void pre_svm_run(struct vcpu_svm *svm)
		return pre_sev_run(svm, svm->vcpu.cpu);

	/* FIXME: handle wraparound of asid_generation */
	if (svm->asid_generation != sd->asid_generation)
	if (svm->current_vmcb->asid_generation != sd->asid_generation)
		new_asid(svm, sd);
}

@@ -3670,7 +3663,7 @@ void svm_flush_tlb(struct kvm_vcpu *vcpu)
	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
	else
		svm->asid_generation--;
		svm->current_vmcb->asid_generation--;
}

static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
+1 −1
Original line number Diff line number Diff line
@@ -85,6 +85,7 @@ struct kvm_vmcb_info {
	struct vmcb *ptr;
	unsigned long pa;
	int cpu;
	uint64_t asid_generation;
};

struct svm_nested_state {
@@ -114,7 +115,6 @@ struct vcpu_svm {
	struct kvm_vmcb_info *current_vmcb;
	struct svm_cpu_data *svm_data;
	u32 asid;
	uint64_t asid_generation;
	uint64_t sysenter_esp;
	uint64_t sysenter_eip;
	uint64_t tsc_aux;