Commit b45e2da6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "10 patches.

  Subsystems affected by this patch series: MAINTAINERS and mm (slub,
  pagealloc, memcg, kasan, vmalloc, migration, hugetlb, memory-failure,
  and process_vm_access)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/process_vm_access.c: include compat.h
  mm,hwpoison: fix printing of page flags
  MAINTAINERS: add Vlastimil as slab allocators maintainer
  mm/hugetlb: fix potential missing huge page size info
  mm: migrate: initialize err in do_migrate_pages
  mm/vmalloc.c: fix potential memory leak
  arm/kasan: fix the array size of kasan_early_shadow_pte[]
  mm/memcontrol: fix warning in mem_cgroup_page_lruvec()
  mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint
  mm, slub: consider rest of partial list if acquire_slab() fails
parents 8cbe71e7 eb351d75
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -16313,6 +16313,7 @@ M: Pekka Enberg <penberg@kernel.org>
M:	David Rientjes <rientjes@google.com>
M:	Joonsoo Kim <iamjoonsoo.kim@lge.com>
M:	Andrew Morton <akpm@linux-foundation.org>
M:	Vlastimil Babka <vbabka@suse.cz>
L:	linux-mm@kvack.org
S:	Maintained
F:	include/linux/sl?b*.h
+5 −1
Original line number Diff line number Diff line
@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
#define KASAN_SHADOW_INIT 0
#endif

#ifndef PTE_HWTABLE_PTRS
#define PTE_HWTABLE_PTRS 0
#endif

extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
+1 −1
Original line number Diff line number Diff line
@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
{
	struct mem_cgroup *memcg = page_memcg(page);

	VM_WARN_ON_ONCE_PAGE(!memcg, page);
	VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
	return mem_cgroup_lruvec(memcg, pgdat);
}

+1 −1
Original line number Diff line number Diff line
@@ -4371,7 +4371,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
		 * So we need to block hugepage fault by PG_hwpoison bit check.
		 */
		if (unlikely(PageHWPoison(page))) {
			ret = VM_FAULT_HWPOISON |
			ret = VM_FAULT_HWPOISON_LARGE |
				VM_FAULT_SET_HINDEX(hstate_index(h));
			goto backout_unlocked;
		}
+2 −1
Original line number Diff line number Diff line
@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
	return false;
}
#endif
pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
	__page_aligned_bss;

static inline bool kasan_pte_table(pmd_t pmd)
{
Loading