Commit 9917ff5f authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "5 patches.

  Subsystems affected by this patch series: binfmt, procfs, and mm
  (vmscan, memcg, and kfence)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  kfence: make test case compatible with run time set sample interval
  mm: memcg: synchronize objcg lists with a dedicated spinlock
  mm: vmscan: remove deadlock due to throttling failing to make progress
  fs/proc: task_mmu.c: don't read mapcount for migration entry
  fs/binfmt_elf: fix PT_LOAD p_align values for loaders
parents 83e39664 8913c610
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1117,7 +1117,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
			 * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
			 */
			alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
			if (alignment > ELF_MIN_ALIGN) {
			if (interpreter || alignment > ELF_MIN_ALIGN) {
				load_bias = ELF_ET_DYN_BASE;
				if (current->flags & PF_RANDOMIZE)
					load_bias += arch_mmap_rnd();
+31 −9
Original line number Diff line number Diff line
@@ -440,7 +440,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
}

static void smaps_account(struct mem_size_stats *mss, struct page *page,
		bool compound, bool young, bool dirty, bool locked)
		bool compound, bool young, bool dirty, bool locked,
		bool migration)
{
	int i, nr = compound ? compound_nr(page) : 1;
	unsigned long size = nr * PAGE_SIZE;
@@ -467,8 +468,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
	 * page_count(page) == 1 guarantees the page is mapped exactly once.
	 * If any subpage of the compound page mapped with PTE it would elevate
	 * page_count().
	 *
	 * The page_mapcount() is called to get a snapshot of the mapcount.
	 * Without holding the page lock this snapshot can be slightly wrong as
	 * we cannot always read the mapcount atomically.  It is not safe to
	 * call page_mapcount() even with PTL held if the page is not mapped,
	 * especially for migration entries.  Treat regular migration entries
	 * as mapcount == 1.
	 */
	if (page_count(page) == 1) {
	if ((page_count(page) == 1) || migration) {
		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
			locked, true);
		return;
@@ -517,6 +525,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
	struct vm_area_struct *vma = walk->vma;
	bool locked = !!(vma->vm_flags & VM_LOCKED);
	struct page *page = NULL;
	bool migration = false;

	if (pte_present(*pte)) {
		page = vm_normal_page(vma, addr, *pte);
@@ -536,8 +545,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
			} else {
				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
			}
		} else if (is_pfn_swap_entry(swpent))
		} else if (is_pfn_swap_entry(swpent)) {
			if (is_migration_entry(swpent))
				migration = true;
			page = pfn_swap_entry_to_page(swpent);
		}
	} else {
		smaps_pte_hole_lookup(addr, walk);
		return;
@@ -546,7 +558,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
	if (!page)
		return;

	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
	smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
		      locked, migration);
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -557,6 +570,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
	struct vm_area_struct *vma = walk->vma;
	bool locked = !!(vma->vm_flags & VM_LOCKED);
	struct page *page = NULL;
	bool migration = false;

	if (pmd_present(*pmd)) {
		/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -564,9 +578,11 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
		swp_entry_t entry = pmd_to_swp_entry(*pmd);

		if (is_migration_entry(entry))
		if (is_migration_entry(entry)) {
			migration = true;
			page = pfn_swap_entry_to_page(entry);
		}
	}
	if (IS_ERR_OR_NULL(page))
		return;
	if (PageAnon(page))
@@ -577,7 +593,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
		/* pass */;
	else
		mss->file_thp += HPAGE_PMD_SIZE;
	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);

	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
		      locked, migration);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -1378,6 +1396,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
{
	u64 frame = 0, flags = 0;
	struct page *page = NULL;
	bool migration = false;

	if (pte_present(pte)) {
		if (pm->show_pfn)
@@ -1399,13 +1418,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
			frame = swp_type(entry) |
				(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
		flags |= PM_SWAP;
		migration = is_migration_entry(entry);
		if (is_pfn_swap_entry(entry))
			page = pfn_swap_entry_to_page(entry);
	}

	if (page && !PageAnon(page))
		flags |= PM_FILE;
	if (page && page_mapcount(page) == 1)
	if (page && !migration && page_mapcount(page) == 1)
		flags |= PM_MMAP_EXCLUSIVE;
	if (vma->vm_flags & VM_SOFTDIRTY)
		flags |= PM_SOFT_DIRTY;
@@ -1421,8 +1441,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
	spinlock_t *ptl;
	pte_t *pte, *orig_pte;
	int err = 0;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	bool migration = false;

	ptl = pmd_trans_huge_lock(pmdp, vma);
	if (ptl) {
		u64 flags = 0, frame = 0;
@@ -1461,11 +1482,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
			if (pmd_swp_uffd_wp(pmd))
				flags |= PM_UFFD_WP;
			VM_BUG_ON(!is_pmd_migration_entry(pmd));
			migration = is_migration_entry(entry);
			page = pfn_swap_entry_to_page(entry);
		}
#endif

		if (page && page_mapcount(page) == 1)
		if (page && !migration && page_mapcount(page) == 1)
			flags |= PM_MMAP_EXCLUSIVE;

		for (; addr != end; addr += PAGE_SIZE) {
+2 −0
Original line number Diff line number Diff line
@@ -17,6 +17,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>

extern unsigned long kfence_sample_interval;

/*
 * We allocate an even number of pages, as it simplifies calculations to map
 * address to metadata indices; effectively, the very first page serves as an
+3 −2
Original line number Diff line number Diff line
@@ -219,7 +219,7 @@ struct obj_cgroup {
	struct mem_cgroup *memcg;
	atomic_t nr_charged_bytes;
	union {
		struct list_head list;
		struct list_head list; /* protected by objcg_lock */
		struct rcu_head rcu;
	};
};
@@ -315,7 +315,8 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
	int kmemcg_id;
	struct obj_cgroup __rcu *objcg;
	struct list_head objcg_list; /* list of inherited objcgs */
	/* list of inherited objcgs, protected by objcg_lock */
	struct list_head objcg_list;
#endif

	MEMCG_PADDING(_pad2_);
+2 −1
Original line number Diff line number Diff line
@@ -47,7 +47,8 @@

static bool kfence_enabled __read_mostly;

static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */

#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
Loading