Commit cb085634 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'mm-hotfixes-stable-2023-04-19-16-36' of...

Merge tag 'mm-hotfixes-stable-2023-04-19-16-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "22 hotfixes.

  19 are cc:stable and the remainder address issues which were
  introduced during this merge cycle, or aren't considered suitable for
  -stable backporting.

  19 are for MM and the remainder are for other subsystems"

* tag 'mm-hotfixes-stable-2023-04-19-16-36' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (22 commits)
  nilfs2: initialize unused bytes in segment summary blocks
  mm: page_alloc: skip regions with hugetlbfs pages when allocating 1G pages
  mm/mmap: regression fix for unmapped_area{_topdown}
  maple_tree: fix mas_empty_area() search
  maple_tree: make maple state reusable after mas_empty_area_rev()
  mm: kmsan: handle alloc failures in kmsan_ioremap_page_range()
  mm: kmsan: handle alloc failures in kmsan_vmap_pages_range_noflush()
  tools/Makefile: do missed s/vm/mm/
  mm: fix memory leak on mm_init error handling
  mm/page_alloc: fix potential deadlock on zonelist_update_seq seqlock
  kernel/sys.c: fix and improve control flow in __sys_setres[ug]id()
  Revert "userfaultfd: don't fail on unrecognized features"
  writeback, cgroup: fix null-ptr-deref write in bdi_split_work_to_wbs
  maple_tree: fix a potential memory leak, OOB access, or other unpredictable bug
  tools/mm/page_owner_sort.c: fix TGID output when cull=tg is used
  mailmap: update jtoppins' entry to reference correct email
  mm/mempolicy: fix use-after-free of VMA iterator
  mm/huge_memory.c: warn with pr_warn_ratelimited instead of VM_WARN_ON_ONCE_FOLIO
  mm/mprotect: fix do_mprotect_pkey() return on error
  mm/khugepaged: check again on anon uffd-wp during isolation
  ...
parents 23990b1a ef832747
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -232,6 +232,8 @@ Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
John Crispin <john@phrozen.org> <blogic@openwrt.org>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
John Stultz <johnstul@us.ibm.com>
<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
<jon.toppins+linux@gmail.com> <jtoppins@redhat.com>
Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
<josh@joshtriplett.org> <josh@freedesktop.org>
<josh@joshtriplett.org> <josh@kernel.org>
+10 −7
Original line number Diff line number Diff line
@@ -978,6 +978,16 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
			continue;
		}

		/*
		 * If wb_tryget fails, the wb has been shutdown, skip it.
		 *
		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
		 * continuing iteration from @wb after dropping and
		 * regrabbing rcu read lock.
		 */
		if (!wb_tryget(wb))
			continue;

		/* alloc failed, execute synchronously using on-stack fallback */
		work = &fallback_work;
		*work = *base_work;
@@ -986,13 +996,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
		work->done = &fallback_work_done;

		wb_queue_work(wb, work);

		/*
		 * Pin @wb so that it stays on @bdi->wb_list.  This allows
		 * continuing iteration from @wb after dropping and
		 * regrabbing rcu read lock.
		 */
		wb_get(wb);
		last_wb = wb;

		rcu_read_unlock();
+20 −0
Original line number Diff line number Diff line
@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
	return 0;
}

/**
 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
 * @sci: segment constructor object
 *
 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
 * the current segment summary block.
 */
static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
{
	struct nilfs_segsum_pointer *ssp;

	ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
	if (ssp->offset < ssp->bh->b_size)
		memset(ssp->bh->b_data + ssp->offset, 0,
		       ssp->bh->b_size - ssp->offset);
}

static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
{
	sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
				* The current segment is filled up
				* (internal code)
				*/
	nilfs_segctor_zeropad_segsum(sci);
	sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
	return nilfs_segctor_reset_segment_buffer(sci);
}
@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
		goto retry;
	}
	if (unlikely(required)) {
		nilfs_segctor_zeropad_segsum(sci);
		err = nilfs_segbuf_extend_segsum(segbuf);
		if (unlikely(err))
			goto failed;
@@ -1533,6 +1552,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
		nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
		sci->sc_stage = prev_stage;
	}
	nilfs_segctor_zeropad_segsum(sci);
	nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
	return 0;

+4 −2
Original line number Diff line number Diff line
@@ -1955,8 +1955,10 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
	ret = -EFAULT;
	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
		goto out;
	/* Ignore unsupported features (userspace built against newer kernel) */
	features = uffdio_api.features & UFFD_API_FEATURES;
	features = uffdio_api.features;
	ret = -EINVAL;
	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
		goto err_out;
	ret = -EPERM;
	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
		goto err_out;
+21 −18
Original line number Diff line number Diff line
@@ -134,9 +134,10 @@ void kmsan_kfree_large(const void *ptr);
 * @page_shift:	page_shift passed to vmap_range_noflush().
 *
 * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
 * vmalloc metadata address range.
 * vmalloc metadata address range. Returns 0 on success, callers must check
 * for non-zero return value.
 */
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
				   pgprot_t prot, struct page **pages,
				   unsigned int page_shift);

@@ -159,9 +160,10 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
 * @page_shift:	page_shift argument passed to vmap_range_noflush().
 *
 * KMSAN creates new metadata pages for the physical pages mapped into the
 * virtual memory.
 * virtual memory. Returns 0 on success, callers must check for non-zero return
 * value.
 */
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
			     phys_addr_t phys_addr, pgprot_t prot,
			     unsigned int page_shift);

@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
{
}

static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
						 unsigned long end,
						 pgprot_t prot,
						 struct page **pages,
						 unsigned int page_shift)
{
	return 0;
}

static inline void kmsan_vunmap_range_noflush(unsigned long start,
@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
{
}

static inline void kmsan_ioremap_page_range(unsigned long start,
static inline int kmsan_ioremap_page_range(unsigned long start,
					   unsigned long end,
					    phys_addr_t phys_addr,
					    pgprot_t prot,
					   phys_addr_t phys_addr, pgprot_t prot,
					   unsigned int page_shift)
{
	return 0;
}

static inline void kmsan_iounmap_page_range(unsigned long start,
Loading