Commit b0fb2938 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'x86_cache_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 resource control updates from Borislav Petkov:
 "Avoid IPI-ing a task in certain cases and prevent load/store tearing
  when accessing a task's resctrl fields concurrently"

* tag 'x86_cache_for_v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/resctrl: Apply READ_ONCE/WRITE_ONCE to task_struct.{rmid,closid}
  x86/resctrl: Use task_curr() instead of task_struct->on_cpu to prevent unnecessary IPI
  x86/resctrl: Add printf attribute to log function
parents 0570b693 6d3b47dd
Loading
Loading
Loading
Loading
+7 −4
Original line number Diff line number Diff line
@@ -56,19 +56,22 @@ static void __resctrl_sched_in(void)
	struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
	u32 closid = state->default_closid;
	u32 rmid = state->default_rmid;
	u32 tmp;

	/*
	 * If this task has a closid/rmid assigned, use it.
	 * Else use the closid/rmid assigned to this cpu.
	 */
	if (static_branch_likely(&rdt_alloc_enable_key)) {
		if (current->closid)
			closid = current->closid;
		tmp = READ_ONCE(current->closid);
		if (tmp)
			closid = tmp;
	}

	if (static_branch_likely(&rdt_mon_enable_key)) {
		if (current->rmid)
			rmid = current->rmid;
		tmp = READ_ONCE(current->rmid);
		if (tmp)
			rmid = tmp;
	}

	if (closid != state->cur_closid || rmid != state->cur_rmid) {
+1 −0
Original line number Diff line number Diff line
@@ -572,6 +572,7 @@ union cpuid_0x10_x_edx {

void rdt_last_cmd_clear(void);
void rdt_last_cmd_puts(const char *s);
__printf(1, 2)
void rdt_last_cmd_printf(const char *fmt, ...);

void rdt_ctrl_update(void *arg);
+10 −14
Original line number Diff line number Diff line
@@ -563,11 +563,11 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
	 */

	if (rdtgrp->type == RDTCTRL_GROUP) {
		tsk->closid = rdtgrp->closid;
		tsk->rmid = rdtgrp->mon.rmid;
		WRITE_ONCE(tsk->closid, rdtgrp->closid);
		WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
	} else if (rdtgrp->type == RDTMON_GROUP) {
		if (rdtgrp->mon.parent->closid == tsk->closid) {
			tsk->rmid = rdtgrp->mon.rmid;
			WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
		} else {
			rdt_last_cmd_puts("Can't move task to different control group\n");
			return -EINVAL;
@@ -2310,22 +2310,18 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
	for_each_process_thread(p, t) {
		if (!from || is_closid_match(t, from) ||
		    is_rmid_match(t, from)) {
			t->closid = to->closid;
			t->rmid = to->mon.rmid;
			WRITE_ONCE(t->closid, to->closid);
			WRITE_ONCE(t->rmid, to->mon.rmid);

#ifdef CONFIG_SMP
			/*
			 * This is safe on x86 w/o barriers as the ordering
			 * of writing to task_cpu() and t->on_cpu is
			 * reverse to the reading here. The detection is
			 * inaccurate as tasks might move or schedule
			 * before the smp function call takes place. In
			 * such a case the function call is pointless, but
			 * If the task is on a CPU, set the CPU in the mask.
			 * The detection is inaccurate as tasks might move or
			 * schedule before the smp function call takes place.
			 * In such a case the function call is pointless, but
			 * there is no other side effect.
			 */
			if (mask && t->on_cpu)
			if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
				cpumask_set_cpu(task_cpu(t), mask);
#endif
		}
	}
	read_unlock(&tasklist_lock);