Commit 81af89e1 authored by Marco Elver's avatar Marco Elver Committed by Ingo Molnar
Browse files

kcsan: Add kcsan_set_access_mask() support



When setting up an access mask with kcsan_set_access_mask(), KCSAN will
only report races if concurrent changes to bits set in access_mask are
observed. Conveying access_mask via a separate call avoids introducing
overhead in the common-case fast-path.

Acked-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b738f616
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -68,6 +68,16 @@ void kcsan_flat_atomic_end(void);
 */
void kcsan_atomic_next(int n);

/**
 * kcsan_set_access_mask - set access mask
 *
 * Set the access mask for all accesses for the current context if non-zero.
 * Only value changes to bits set in the mask will be reported.
 *
 * @mask bitmask
 */
void kcsan_set_access_mask(unsigned long mask);

#else /* CONFIG_KCSAN */

static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
@@ -78,6 +88,7 @@ static inline void kcsan_nestable_atomic_end(void) { }
static inline void kcsan_flat_atomic_begin(void)	{ }
static inline void kcsan_flat_atomic_end(void)		{ }
static inline void kcsan_atomic_next(int n)		{ }
static inline void kcsan_set_access_mask(unsigned long mask) { }

#endif /* CONFIG_KCSAN */

+5 −0
Original line number Diff line number Diff line
@@ -35,6 +35,11 @@ struct kcsan_ctx {
	 */
	int atomic_nest_count;
	bool in_flat_atomic;

	/*
	 * Access mask for all accesses if non-zero.
	 */
	unsigned long access_mask;
};

/**
+1 −0
Original line number Diff line number Diff line
@@ -167,6 +167,7 @@ struct task_struct init_task
		.atomic_next		= 0,
		.atomic_nest_count	= 0,
		.in_flat_atomic		= false,
		.access_mask		= 0,
	},
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
+39 −4
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
	.atomic_next		= 0,
	.atomic_nest_count	= 0,
	.in_flat_atomic		= false,
	.access_mask		= 0,
};

/*
@@ -298,6 +299,15 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,

	if (!kcsan_is_enabled())
		return;

	/*
	 * The access_mask check relies on value-change comparison. To avoid
	 * reporting a race where e.g. the writer set up the watchpoint, but the
	 * reader has access_mask!=0, we have to ignore the found watchpoint.
	 */
	if (get_ctx()->access_mask != 0)
		return;

	/*
	 * Consume the watchpoint as soon as possible, to minimize the chances
	 * of !consumed. Consuming the watchpoint must always be guarded by
@@ -341,6 +351,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
		u32 _4;
		u64 _8;
	} expect_value;
	unsigned long access_mask;
	enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
	unsigned long ua_flags = user_access_save();
	unsigned long irq_flags;
@@ -435,18 +446,27 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
	 * Re-read value, and check if it is as expected; if not, we infer a
	 * racy access.
	 */
	access_mask = get_ctx()->access_mask;
	switch (size) {
	case 1:
		expect_value._1 ^= READ_ONCE(*(const u8 *)ptr);
		if (access_mask)
			expect_value._1 &= (u8)access_mask;
		break;
	case 2:
		expect_value._2 ^= READ_ONCE(*(const u16 *)ptr);
		if (access_mask)
			expect_value._2 &= (u16)access_mask;
		break;
	case 4:
		expect_value._4 ^= READ_ONCE(*(const u32 *)ptr);
		if (access_mask)
			expect_value._4 &= (u32)access_mask;
		break;
	case 8:
		expect_value._8 ^= READ_ONCE(*(const u64 *)ptr);
		if (access_mask)
			expect_value._8 &= (u64)access_mask;
		break;
	default:
		break; /* ignore; we do not diff the values */
@@ -460,12 +480,21 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
	if (!remove_watchpoint(watchpoint)) {
		/*
		 * Depending on the access type, map a value_change of MAYBE to
		 * TRUE (require reporting).
		 * TRUE (always report) or FALSE (never report).
		 */
		if (value_change == KCSAN_VALUE_CHANGE_MAYBE && (size > 8 || is_assert)) {
		if (value_change == KCSAN_VALUE_CHANGE_MAYBE) {
			if (access_mask != 0) {
				/*
				 * For access with access_mask, we require a
				 * value-change, as it is likely that races on
				 * ~access_mask bits are expected.
				 */
				value_change = KCSAN_VALUE_CHANGE_FALSE;
			} else if (size > 8 || is_assert) {
				/* Always assume a value-change. */
				value_change = KCSAN_VALUE_CHANGE_TRUE;
			}
		}

		/*
		 * No need to increment 'data_races' counter, as the racing
@@ -622,6 +651,12 @@ void kcsan_atomic_next(int n)
}
EXPORT_SYMBOL(kcsan_atomic_next);

void kcsan_set_access_mask(unsigned long mask)
{
	get_ctx()->access_mask = mask;
}
EXPORT_SYMBOL(kcsan_set_access_mask);

void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
{
	check_access(ptr, size, type);
+5 −0
Original line number Diff line number Diff line
@@ -98,6 +98,11 @@ enum kcsan_value_change {
	 */
	KCSAN_VALUE_CHANGE_MAYBE,

	/*
	 * Did not observe a value-change, and it is invalid to report the race.
	 */
	KCSAN_VALUE_CHANGE_FALSE,

	/*
	 * The value was observed to change, and the race should be reported.
	 */
Loading