Unverified Commit a53c4046 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13370 CVE-2024-46713

Merge Pull Request from: @ci-robot 
 
PR sync from: Luo Gengkun <luogengkun2@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/ENPVJTUSZZ55XSKWTDDDDHDZHUE5UEOV/ 
Luo Gengkun (1):
  perf: Fix kabi problem by put mutex in front of perf_buffer

Peter Zijlstra (1):
  perf/aux: Fix AUX buffer serialization


-- 
2.34.1
 
https://gitee.com/src-openeuler/kernel/issues/IAR9C1 
 
Link:https://gitee.com/openeuler/kernel/pulls/13370

 

Reviewed-by: default avatarXu Kuohai <xukuohai@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 67e5d5d2 f16864a7
Loading
Loading
Loading
Loading
+15 −6
Original line number Diff line number Diff line
@@ -1259,8 +1259,9 @@ static void put_ctx(struct perf_event_context *ctx)
 *	  perf_event_context::mutex
 *	    perf_event::child_mutex;
 *	      perf_event_context::lock
 *	    perf_event::mmap_mutex
 *	    mmap_lock
 *	      perf_event::mmap_mutex
 *	        perf_buffer::aux_mutex
 *	      perf_addr_filters_head::lock
 *
 *    cpu_hotplug_lock
@@ -6355,17 +6356,17 @@ static void perf_mmap_close(struct vm_area_struct *vma)
	int mmap_locked = rb->mmap_locked;
	unsigned long size = perf_data_size(rb);
	bool detach_rest = false;
	struct perf_buffer_ext *rb_ext = container_of(rb, struct perf_buffer_ext, perf_buffer);

	if (event->pmu->event_unmapped)
		event->pmu->event_unmapped(event, vma->vm_mm);

	/*
	 * rb->aux_mmap_count will always drop before rb->mmap_count and
	 * event->mmap_count, so it is ok to use event->mmap_mutex to
	 * serialize with perf_mmap here.
	 * The AUX buffer is strictly a sub-buffer, serialize using aux_mutex
	 * to avoid complications.
	 */
	if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
	    atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb_ext->aux_mutex)) {
		/*
		 * Stop all AUX events that are writing to this buffer,
		 * so that we can free its AUX pages and corresponding PMU
@@ -6382,7 +6383,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
		rb_free_aux(rb);
		WARN_ON_ONCE(refcount_read(&rb->aux_refcount));

		mutex_unlock(&event->mmap_mutex);
		mutex_unlock(&rb_ext->aux_mutex);
	}

	if (atomic_dec_and_test(&rb->mmap_count))
@@ -6470,7 +6471,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
	struct perf_event *event = file->private_data;
	unsigned long user_locked, user_lock_limit;
	struct user_struct *user = current_user();
	struct mutex *aux_mutex = NULL;
	struct perf_buffer *rb = NULL;
	struct perf_buffer_ext *rb_ext = NULL;
	unsigned long locked, lock_limit;
	unsigned long vma_size;
	unsigned long nr_pages;
@@ -6518,6 +6521,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
		if (!rb)
			goto aux_unlock;

		rb_ext = container_of(rb, struct perf_buffer_ext, perf_buffer);
		aux_mutex = &rb_ext->aux_mutex;
		mutex_lock(aux_mutex);

		aux_offset = READ_ONCE(rb->user_page->aux_offset);
		aux_size = READ_ONCE(rb->user_page->aux_size);

@@ -6668,6 +6675,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
		atomic_dec(&rb->mmap_count);
	}
aux_unlock:
	if (aux_mutex)
		mutex_unlock(aux_mutex);
	mutex_unlock(&event->mmap_mutex);

	/*
+5 −0
Original line number Diff line number Diff line
@@ -58,6 +58,11 @@ struct perf_buffer {
	void				*data_pages[];
};

struct perf_buffer_ext {
	struct mutex		aux_mutex;
	struct perf_buffer	perf_buffer;
};

extern void rb_free(struct perf_buffer *rb);

static inline void rb_free_rcu(struct rcu_head *rcu_head)
+20 −10
Original line number Diff line number Diff line
@@ -310,6 +310,7 @@ static void
ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
{
	long max_size = perf_data_size(rb);
	struct perf_buffer_ext *rb_ext = container_of(rb, struct perf_buffer_ext, perf_buffer);

	if (watermark)
		rb->watermark = min(max_size, watermark);
@@ -333,6 +334,8 @@ ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
	 */
	if (!rb->nr_pages)
		rb->paused = 1;

	mutex_init(&rb_ext->aux_mutex);
}

void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
@@ -816,21 +819,23 @@ static void perf_mmap_free_page(void *addr)

struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
	struct perf_buffer_ext *rb_ext;
	struct perf_buffer *rb;
	unsigned long size;
	int i, node;

	size = sizeof(struct perf_buffer);
	size = sizeof(struct perf_buffer_ext);
	size += nr_pages * sizeof(void *);

	if (order_base_2(size) > PAGE_SHIFT+MAX_ORDER)
		goto fail;

	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
	rb = kzalloc_node(size, GFP_KERNEL, node);
	if (!rb)
	rb_ext = kzalloc_node(size, GFP_KERNEL, node);
	if (!rb_ext)
		goto fail;

	rb = &rb_ext->perf_buffer;
	rb->user_page = perf_mmap_alloc_page(cpu);
	if (!rb->user_page)
		goto fail_user_page;
@@ -854,7 +859,7 @@ struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
	perf_mmap_free_page(rb->user_page);

fail_user_page:
	kfree(rb);
	kfree(rb_ext);

fail:
	return NULL;
@@ -863,11 +868,12 @@ struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
void rb_free(struct perf_buffer *rb)
{
	int i;
	struct perf_buffer_ext *rb_ext = container_of(rb, struct perf_buffer_ext, perf_buffer);

	perf_mmap_free_page(rb->user_page);
	for (i = 0; i < rb->nr_pages; i++)
		perf_mmap_free_page(rb->data_pages[i]);
	kfree(rb);
	kfree(rb_ext);
}

#else
@@ -890,6 +896,7 @@ static void perf_mmap_unmark_page(void *addr)

static void rb_free_work(struct work_struct *work)
{
	struct perf_buffer_ext *rb_ext;
	struct perf_buffer *rb;
	void *base;
	int i, nr;
@@ -903,7 +910,8 @@ static void rb_free_work(struct work_struct *work)
		perf_mmap_unmark_page(base + (i * PAGE_SIZE));

	vfree(base);
	kfree(rb);
	rb_ext = container_of(rb, struct perf_buffer_ext, perf_buffer);
	kfree(rb_ext);
}

void rb_free(struct perf_buffer *rb)
@@ -913,19 +921,21 @@ void rb_free(struct perf_buffer *rb)

struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
	struct perf_buffer_ext *rb_ext;
	struct perf_buffer *rb;
	unsigned long size;
	void *all_buf;
	int node;

	size = sizeof(struct perf_buffer);
	size = sizeof(struct perf_buffer_ext);
	size += sizeof(void *);

	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
	rb = kzalloc_node(size, GFP_KERNEL, node);
	if (!rb)
	rb_ext = kzalloc_node(size, GFP_KERNEL, node);
	if (!rb_ext)
		goto fail;

	rb = &rb_ext->perf_buffer;
	INIT_WORK(&rb->work, rb_free_work);

	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
@@ -944,7 +954,7 @@ struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
	return rb;

fail_all_buf:
	kfree(rb);
	kfree(rb_ext);

fail:
	return NULL;