Commit 2b18593e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fix from Borislav Petkov:

 - A single data race fix on the perf event cleanup path to avoid
   endless loops due to insufficient locking

* tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
parents 59c80f05 68e3c698
Loading
Loading
Loading
Loading
+31 −14
Original line number Diff line number Diff line
@@ -6253,10 +6253,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)

		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
			/*
			 * Raced against perf_mmap_close() through
			 * perf_event_set_output(). Try again, hope for better
			 * luck.
			 * Raced against perf_mmap_close(); remove the
			 * event and try again.
			 */
			ring_buffer_attach(event, NULL);
			mutex_unlock(&event->mmap_mutex);
			goto again;
		}
@@ -11825,14 +11825,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
	goto out;
}

static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
	if (b < a)
		swap(a, b);

	mutex_lock(a);
	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}

static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
	struct perf_buffer *rb = NULL;
	int ret = -EINVAL;

	if (!output_event)
	if (!output_event) {
		mutex_lock(&event->mmap_mutex);
		goto set;
	}

	/* don't allow circular references */
	if (event == output_event)
@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
	    event->pmu != output_event->pmu)
		goto out;

	/*
	 * Hold both mmap_mutex to serialize against perf_mmap_close().  Since
	 * output_event is already on rb->event_list, and the list iteration
	 * restarts after every removal, it is guaranteed this new event is
	 * observed *OR* if output_event is already removed, it's guaranteed we
	 * observe !rb->mmap_count.
	 */
	mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
set:
	mutex_lock(&event->mmap_mutex);
	/* Can't redirect output if we've got an active mmap() */
	if (atomic_read(&event->mmap_count))
		goto unlock;
@@ -11881,6 +11899,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
		rb = ring_buffer_get(output_event);
		if (!rb)
			goto unlock;

		/* did we race against perf_mmap_close() */
		if (!atomic_read(&rb->mmap_count)) {
			ring_buffer_put(rb);
			goto unlock;
		}
	}

	ring_buffer_attach(event, rb);
@@ -11888,20 +11912,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
	ret = 0;
unlock:
	mutex_unlock(&event->mmap_mutex);
	if (output_event)
		mutex_unlock(&output_event->mmap_mutex);

out:
	return ret;
}

static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
	if (b < a)
		swap(a, b);

	mutex_lock(a);
	mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}

static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{
	bool nmi_safe = false;