Commit d4e36166 authored by Jinrong Liang's avatar Jinrong Liang Committed by Sean Christopherson
Browse files

KVM: selftests: Test if event filter meets expectations on fixed counters



Add tests to cover that pmu event_filter works as expected when it's
applied to fixed performance counters, even if there is none fixed
counter exists (e.g. Intel guest pmu version=1 or AMD guest).

Signed-off-by: default avatarJinrong Liang <cloudliang@tencent.com>
Reviewed-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Link: https://lore.kernel.org/r/20230810090945.16053-6-cloudliang@tencent.com


Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 86ab6af8
Loading
Loading
Loading
Loading
+80 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
#define ARCH_PERFMON_BRANCHES_RETIRED		5

#define NUM_BRANCHES 42
#define INTEL_PMC_IDX_FIXED		32

/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
#define MAX_FILTER_EVENTS		300
@@ -808,6 +809,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
	TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
}

static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
{
	for (;;) {
		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
		wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);

		/* Only OS_EN bit is enabled for fixed counter[idx]. */
		wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
		      BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
		__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);

		GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
	}
}

static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
					       uint32_t action, uint32_t bitmap)
{
	struct __kvm_pmu_event_filter f = {
		.action = action,
		.fixed_counter_bitmap = bitmap,
	};
	set_pmu_event_filter(vcpu, &f);

	return run_vcpu_to_sync(vcpu);
}

static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
					uint8_t nr_fixed_counters)
{
	unsigned int i;
	uint32_t bitmap;
	uint64_t count;

	TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
		    "Invalid nr_fixed_counters");

	/*
	 * Check the fixed performance counter can count normally when KVM
	 * userspace doesn't set any pmu filter.
	 */
	count = run_vcpu_to_sync(vcpu);
	TEST_ASSERT(count, "Unexpected count value: %ld\n", count);

	for (i = 0; i < BIT(nr_fixed_counters); i++) {
		bitmap = BIT(i);
		count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
						       bitmap);
		TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));

		count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
						       bitmap);
		TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
	}
}

static void test_fixed_counter_bitmap(void)
{
	uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
	struct kvm_vm *vm;
	struct kvm_vcpu *vcpu;
	uint8_t idx;

	/*
	 * Check that pmu_event_filter works as expected when it's applied to
	 * fixed performance counters.
	 */
	for (idx = 0; idx < nr_fixed_counters; idx++) {
		vm = vm_create_with_one_vcpu(&vcpu,
					     intel_run_fixed_counter_guest_code);
		vcpu_args_set(vcpu, 1, idx);
		__test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
		kvm_vm_free(vm);
	}
}

int main(int argc, char *argv[])
{
	void (*guest_code)(void);
@@ -851,6 +930,7 @@ int main(int argc, char *argv[])
	kvm_vm_free(vm);

	test_pmu_config_disable(guest_code);
	test_fixed_counter_bitmap();

	return 0;
}