Commit a765a932 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Luo Gengkun
Browse files

perf/aux: Fix AUX buffer serialization

stable inclusion
from stable-v6.6.51
commit c4b69bee3f4ef76809288fe6827bc14d4ae788ef
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAR9C1
CVE: CVE-2024-46713

Reference: https://git.kernel.org/stable/c/c4b69bee3f4ef76809288fe6827bc14d4ae788ef



--------------------------------

commit 2ab9d830262c132ab5db2f571003d80850d56b2a upstream.

Ole reported that event->mmap_mutex is strictly insufficient to
serialize the AUX buffer, add a per RB mutex to fully serialize it.

Note that in the lock order comment the perf_event::mmap_mutex order
was already wrong, that is, it nesting under mmap_lock is not new with
this patch.

Fixes: 45bfb2e5 ("perf: Add AUX area to ring buffer for raw data streams")
Reported-by: default avatarOle <ole@binarygecko.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarLuo Gengkun <luogengkun2@huawei.com>
parent cce00301
Loading
Loading
Loading
Loading
+12 −6
Original line number Diff line number Diff line
@@ -1259,8 +1259,9 @@ static void put_ctx(struct perf_event_context *ctx)
 *	  perf_event_context::mutex
 *	    perf_event::child_mutex;
 *	      perf_event_context::lock
 *	    perf_event::mmap_mutex
 *	    mmap_lock
 *	      perf_event::mmap_mutex
 *	        perf_buffer::aux_mutex
 *	      perf_addr_filters_head::lock
 *
 *    cpu_hotplug_lock
@@ -6360,12 +6361,11 @@ static void perf_mmap_close(struct vm_area_struct *vma)
		event->pmu->event_unmapped(event, vma->vm_mm);

	/*
	 * rb->aux_mmap_count will always drop before rb->mmap_count and
	 * event->mmap_count, so it is ok to use event->mmap_mutex to
	 * serialize with perf_mmap here.
	 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
	 * to avoid complications.
	 */
	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) {
		/*
		 * Stop all AUX events that are writing to this buffer,
		 * so that we can free its AUX pages and corresponding PMU
@@ -6382,7 +6382,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
		rb_free_aux(rb);
		WARN_ON_ONCE(refcount_read(&rb->aux_refcount));

		mutex_unlock(&event->mmap_mutex);
		mutex_unlock(&rb->aux_mutex);
	}

	if (atomic_dec_and_test(&rb->mmap_count))
@@ -6470,6 +6470,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
	struct perf_event *event = file->private_data;
	unsigned long user_locked, user_lock_limit;
	struct user_struct *user = current_user();
	struct mutex *aux_mutex = NULL;
	struct perf_buffer *rb = NULL;
	unsigned long locked, lock_limit;
	unsigned long vma_size;
@@ -6518,6 +6519,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
		if (!rb)
			goto aux_unlock;

		aux_mutex = &rb->aux_mutex;
		mutex_lock(aux_mutex);

		aux_offset = READ_ONCE(rb->user_page->aux_offset);
		aux_size = READ_ONCE(rb->user_page->aux_size);

@@ -6668,6 +6672,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
		atomic_dec(&rb->mmap_count);
	}
aux_unlock:
	if (aux_mutex)
		mutex_unlock(aux_mutex);
	mutex_unlock(&event->mmap_mutex);

	/*
+1 −0
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@ struct perf_buffer {
	struct user_struct		*mmap_user;

	/* AUX area */
	struct mutex			aux_mutex;
	long				aux_head;
	unsigned int			aux_nest;
	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
+2 −0
Original line number Diff line number Diff line
@@ -333,6 +333,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
	 */
	if (!rb->nr_pages)
		rb->paused = 1;

	mutex_init(&rb->aux_mutex);
}

void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)