Commit aeb542a1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "7 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  arch/Kconfig: update HAVE_RELIABLE_STACKTRACE description
  mm, hotplug: fix page online with DEBUG_PAGEALLOC compiled but not enabled
  mm/z3fold.c: do not include rwlock.h directly
  fat: fix uninit-memory access for partial initialized inode
  mm: avoid data corruption on CoW fault into PFN-mapped VMA
  mm: fix possible PMD dirty bit lost in set_pmd_migration_entry()
  mm, numa: fix bad pmd by atomically check for pmd_trans_huge when marking page tables prot_numa
parents b0b8a945 140d7e88
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -738,8 +738,9 @@ config HAVE_STACK_VALIDATION
config HAVE_RELIABLE_STACKTRACE
	bool
	help
	  Architecture has a save_stack_trace_tsk_reliable() function which
	  only returns a stack trace if it can guarantee the trace is reliable.
	  Architecture has either save_stack_trace_tsk_reliable() or
	  arch_stack_walk_reliable() function which only returns a stack trace
	  if it can guarantee the trace is reliable.

config HAVE_ARCH_HASH
	bool
+7 −12
Original line number Diff line number Diff line
@@ -750,6 +750,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
		return NULL;

	init_rwsem(&ei->truncate_lock);
	/* Zeroing to allow iput() even if partial initialized inode. */
	ei->mmu_private = 0;
	ei->i_start = 0;
	ei->i_logstart = 0;
	ei->i_attrs = 0;
	ei->i_pos = 0;

	return &ei->vfs_inode;
}

@@ -1374,16 +1381,6 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
	return 0;
}

static void fat_dummy_inode_init(struct inode *inode)
{
	/* Initialize this dummy inode to work as no-op. */
	MSDOS_I(inode)->mmu_private = 0;
	MSDOS_I(inode)->i_start = 0;
	MSDOS_I(inode)->i_logstart = 0;
	MSDOS_I(inode)->i_attrs = 0;
	MSDOS_I(inode)->i_pos = 0;
}

static int fat_read_root(struct inode *inode)
{
	struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1844,13 +1841,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
	fat_inode = new_inode(sb);
	if (!fat_inode)
		goto out_fail;
	fat_dummy_inode_init(fat_inode);
	sbi->fat_inode = fat_inode;

	fsinfo_inode = new_inode(sb);
	if (!fsinfo_inode)
		goto out_fail;
	fat_dummy_inode_init(fsinfo_inode);
	fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
	sbi->fsinfo_inode = fsinfo_inode;
	insert_inode_hash(fsinfo_inode);
+4 −0
Original line number Diff line number Diff line
@@ -2715,6 +2715,10 @@ static inline bool debug_pagealloc_enabled_static(void)
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
extern void __kernel_map_pages(struct page *page, int numpages, int enable);

/*
 * When called in DEBUG_PAGEALLOC context, the call should most likely be
 * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
 */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
+1 −2
Original line number Diff line number Diff line
@@ -3043,8 +3043,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
		return;

	flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
	pmdval = *pvmw->pmd;
	pmdp_invalidate(vma, address, pvmw->pmd);
	pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
	if (pmd_dirty(pmdval))
		set_page_dirty(page);
	entry = make_migration_entry(page, pmd_write(pmdval));
+27 −8
Original line number Diff line number Diff line
@@ -2257,7 +2257,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
	bool ret;
	void *kaddr;
	void __user *uaddr;
	bool force_mkyoung;
	bool locked = false;
	struct vm_area_struct *vma = vmf->vma;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long addr = vmf->address;
@@ -2282,11 +2282,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
	 * On architectures with software "accessed" bits, we would
	 * take a double page fault, so mark it accessed here.
	 */
	force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
	if (force_mkyoung) {
	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
		pte_t entry;

		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
		locked = true;
		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
			/*
			 * Other thread has already handled the fault
@@ -2309,19 +2309,38 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
	 * in which case we just give up and fill the result with
	 * zeroes.
	 */
	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
		if (locked)
			goto warn;

		/* Re-validate under PTL if the page is still mapped */
		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
		locked = true;
		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
			/* The PTE changed under us. Retry page fault. */
			ret = false;
			goto pte_unlock;
		}

		/*
		 * The same page can be mapped back since last copy attampt.
		 * Try to copy again under PTL.
		 */
		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
			/*
			 * Give a warn in case there can be some obscure
			 * use-case
			 */
warn:
			WARN_ON_ONCE(1);
			clear_page(kaddr);
		}
	}

	ret = true;

pte_unlock:
	if (force_mkyoung)
	if (locked)
		pte_unmap_unlock(vmf->pte, vmf->ptl);
	kunmap_atomic(kaddr);
	flush_dcache_page(dst);
Loading