Commit c62e2e94 authored by Babu Moger's avatar Babu Moger Committed by Paolo Bonzini
Browse files

KVM: SVM: Modify 64 bit intercept field to two 32 bit vectors



Convert all the intercepts to one array of 32 bit vectors in
vmcb_control_area. This makes it easy for future intercept vector
additions. Also update trace functions.

Signed-off-by: default avatarBabu Moger <babu.moger@amd.com>
Reviewed-by: default avatarJim Mattson <jmattson@google.com>
Message-Id: <159985250813.11252.5736581193881040525.stgit@bmoger-ubuntu>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9780d51d
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -14,6 +14,8 @@ enum intercept_words {
	INTERCEPT_CR = 0,
	INTERCEPT_DR,
	INTERCEPT_EXCEPTION,
	INTERCEPT_WORD3,
	INTERCEPT_WORD4,
	MAX_INTERCEPT,
};

@@ -46,10 +48,8 @@ enum {
	INTERCEPT_DR7_WRITE,
	/* Byte offset 008h (word 2) */
	INTERCEPT_EXCEPTION_OFFSET = 64,
};

enum {
	INTERCEPT_INTR,
	/* Byte offset 00Ch (word 3) */
	INTERCEPT_INTR = 96,
	INTERCEPT_NMI,
	INTERCEPT_SMI,
	INTERCEPT_INIT,
@@ -81,7 +81,8 @@ enum {
	INTERCEPT_TASK_SWITCH,
	INTERCEPT_FERR_FREEZE,
	INTERCEPT_SHUTDOWN,
	INTERCEPT_VMRUN,
	/* Byte offset 010h (word 4) */
	INTERCEPT_VMRUN = 128,
	INTERCEPT_VMMCALL,
	INTERCEPT_VMLOAD,
	INTERCEPT_VMSAVE,
@@ -101,8 +102,7 @@ enum {

struct __attribute__ ((__packed__)) vmcb_control_area {
	u32 intercepts[MAX_INTERCEPT];
	u64 intercept;
	u8 reserved_1[40];
	u32 reserved_1[15 - MAX_INTERCEPT];
	u16 pause_filter_thresh;
	u16 pause_filter_count;
	u64 iopm_base_pa;
+10 −15
Original line number Diff line number Diff line
@@ -112,8 +112,6 @@ void recalc_intercepts(struct vcpu_svm *svm)
	for (i = 0; i < MAX_INTERCEPT; i++)
		c->intercepts[i] = h->intercepts[i];

	c->intercept = h->intercept;

	if (g->int_ctl & V_INTR_MASKING_MASK) {
		/* We only want the cr8 intercept bits of L1 */
		vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
@@ -124,16 +122,14 @@ void recalc_intercepts(struct vcpu_svm *svm)
		 * affect any interrupt we may want to inject; therefore,
		 * interrupt window vmexits are irrelevant to L0.
		 */
		c->intercept &= ~(1ULL << INTERCEPT_VINTR);
		vmcb_clr_intercept(c, INTERCEPT_VINTR);
	}

	/* We don't want to see VMMCALLs from a nested guest */
	c->intercept &= ~(1ULL << INTERCEPT_VMMCALL);
	vmcb_clr_intercept(c, INTERCEPT_VMMCALL);

	for (i = 0; i < MAX_INTERCEPT; i++)
		c->intercepts[i] |= g->intercepts[i];

	c->intercept |= g->intercept;
}

static void copy_vmcb_control_area(struct vmcb_control_area *dst,
@@ -144,7 +140,6 @@ static void copy_vmcb_control_area(struct vmcb_control_area *dst,
	for (i = 0; i < MAX_INTERCEPT; i++)
		dst->intercepts[i] = from->intercepts[i];

	dst->intercept            = from->intercept;
	dst->iopm_base_pa         = from->iopm_base_pa;
	dst->msrpm_base_pa        = from->msrpm_base_pa;
	dst->tsc_offset           = from->tsc_offset;
@@ -177,7 +172,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
	 */
	int i;

	if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
		return true;

	for (i = 0; i < MSRPM_OFFSETS; i++) {
@@ -203,7 +198,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)

static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
{
	if ((control->intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
	if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
		return false;

	if (control->asid == 0)
@@ -489,7 +484,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
	trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
				    vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
				    vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
				    vmcb12->control.intercept);
				    vmcb12->control.intercepts[INTERCEPT_WORD3],
				    vmcb12->control.intercepts[INTERCEPT_WORD4]);

	/* Clear internal status */
	kvm_clear_exception_queue(&svm->vcpu);
@@ -708,7 +704,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
	u32 offset, msr, value;
	int write, mask;

	if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_MSR_PROT)))
	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
		return NESTED_EXIT_HOST;

	msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
@@ -735,7 +731,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
	u8 start_bit;
	u64 gpa;

	if (!(svm->nested.ctl.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
	if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
		return NESTED_EXIT_HOST;

	port = svm->vmcb->control.exit_info_1 >> 16;
@@ -789,8 +785,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
		break;
	}
	default: {
		u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
		if (svm->nested.ctl.intercept & exit_bits)
		if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
			vmexit = NESTED_EXIT_DONE;
	}
	}
@@ -898,7 +893,7 @@ static void nested_svm_intr(struct vcpu_svm *svm)

static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{
	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INIT));
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
}

static void nested_svm_init(struct vcpu_svm *svm)
+7 −10
Original line number Diff line number Diff line
@@ -2217,12 +2217,9 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
{
	unsigned long cr0 = svm->vcpu.arch.cr0;
	bool ret = false;
	u64 intercept;

	intercept = svm->nested.ctl.intercept;

	if (!is_guest_mode(&svm->vcpu) ||
	    (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
	    (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
		return false;

	cr0 &= ~SVM_CR0_SELECTIVE_MASK;
@@ -2817,7 +2814,9 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
	pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
	pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
	pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
	pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
	pr_err("%-20s%08x %08x\n", "intercepts:",
              control->intercepts[INTERCEPT_WORD3],
	       control->intercepts[INTERCEPT_WORD4]);
	pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
	pr_err("%-20s%d\n", "pause filter threshold:",
	       control->pause_filter_thresh);
@@ -3734,7 +3733,6 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
		break;
	case SVM_EXIT_WRITE_CR0: {
		unsigned long cr0, val;
		u64 intercept;

		if (info->intercept == x86_intercept_cr_write)
			icpt_info.exit_code += info->modrm_reg;
@@ -3743,9 +3741,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
		    info->intercept == x86_intercept_clts)
			break;

		intercept = svm->nested.ctl.intercept;

		if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
		if (!(vmcb_is_intercept(&svm->nested.ctl,
					INTERCEPT_SELECTIVE_CR0)))
			break;

		cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
@@ -4013,7 +4010,7 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
	 * if an INIT signal is pending.
	 */
	return !gif_set(svm) ||
		   (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
		   (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
}

static void svm_vm_destroy(struct kvm *kvm)
+6 −6
Original line number Diff line number Diff line
@@ -313,7 +313,7 @@ static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept |= (1ULL << bit);
	vmcb_set_intercept(&vmcb->control, bit);

	recalc_intercepts(svm);
}
@@ -322,14 +322,14 @@ static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
{
	struct vmcb *vmcb = get_host_vmcb(svm);

	vmcb->control.intercept &= ~(1ULL << bit);
	vmcb_clr_intercept(&vmcb->control, bit);

	recalc_intercepts(svm);
}

static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
{
	return (svm->vmcb->control.intercept & (1ULL << bit)) != 0;
	return vmcb_is_intercept(&svm->vmcb->control, bit);
}

static inline bool vgif_enabled(struct vcpu_svm *svm)
@@ -393,17 +393,17 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)

static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
{
	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_SMI));
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
}

static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
{
	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_INTR));
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
}

static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{
	return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
}

int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+11 −7
Original line number Diff line number Diff line
@@ -544,26 +544,30 @@ TRACE_EVENT(kvm_nested_vmrun,
);

TRACE_EVENT(kvm_nested_intercepts,
	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u64 intercept),
	    TP_ARGS(cr_read, cr_write, exceptions, intercept),
	    TP_PROTO(__u16 cr_read, __u16 cr_write, __u32 exceptions, __u32 intercept1,
		     __u32 intercept2),
	    TP_ARGS(cr_read, cr_write, exceptions, intercept1, intercept2),

	TP_STRUCT__entry(
		__field(	__u16,		cr_read		)
		__field(	__u16,		cr_write	)
		__field(	__u32,		exceptions	)
		__field(	__u64,		intercept	)
		__field(	__u32,		intercept1	)
		__field(	__u32,		intercept2	)
	),

	TP_fast_assign(
		__entry->cr_read	= cr_read;
		__entry->cr_write	= cr_write;
		__entry->exceptions	= exceptions;
		__entry->intercept	= intercept;
		__entry->intercept1	= intercept1;
		__entry->intercept2	= intercept2;
	),

	TP_printk("cr_read: %04x cr_write: %04x excp: %08x intercept: %016llx",
	TP_printk("cr_read: %04x cr_write: %04x excp: %08x "
		  "intercepts: %08x %08x",
		  __entry->cr_read, __entry->cr_write, __entry->exceptions,
		__entry->intercept)
		  __entry->intercept1, __entry->intercept2)
);
/*
 * Tracepoint for #VMEXIT while nested