Commit e5220dd1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "13 patches.

  Subsystems affected by this patch series: mips, mm (kfence, debug,
  pagealloc, memory-hotplug, hugetlb, kasan, and hugetlb), init, proc,
  lib, ocfs2, and mailmap"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mailmap: use private address for Michel Lespinasse
  ocfs2: fix data corruption by fallocate
  lib: crc64: fix kernel-doc warning
  mm, hugetlb: fix simple resv_huge_pages underflow on UFFDIO_COPY
  mm/kasan/init.c: fix doc warning
  proc: add .gitignore for proc-subset-pid selftest
  hugetlb: pass head page to remove_hugetlb_page()
  drivers/base/memory: fix trying offlining memory blocks with memory holes on aarch64
  mm/page_alloc: fix counting of free pages after take off from buddy
  mm/debug_vm_pgtable: fix alignment for pmd/pud_advanced_tests()
  pid: take a reference when initializing `cad_pid`
  kfence: use TASK_IDLE when awaiting allocation
  Revert "MIPS: make userspace mapping young by default"
parents af8d9eb8 2eff0573
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -243,6 +243,9 @@ Maxime Ripard <mripard@kernel.org> <maxime.ripard@free-electrons.com>
Mayuresh Janorkar <mayur@ti.com>
Michael Buesch <m@bues.ch>
Michel Dänzer <michel@tungstengraphics.com>
Michel Lespinasse <michel@lespinasse.org>
Michel Lespinasse <michel@lespinasse.org> <walken@google.com>
Michel Lespinasse <michel@lespinasse.org> <walken@zoy.org>
Miguel Ojeda <ojeda@kernel.org> <miguel.ojeda.sandonis@gmail.com>
Mike Rapoport <rppt@kernel.org> <mike@compulab.co.il>
Mike Rapoport <rppt@kernel.org> <mike.rapoport@gmail.com>
+14 −16
Original line number Diff line number Diff line
@@ -158,31 +158,29 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);

#define PM(p)	__pgprot(_page_cachable_default | (p))
#define PVA(p)	PM(_PAGE_VALID | _PAGE_ACCESSED | (p))

static inline void setup_protection_map(void)
{
	protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
	protection_map[1]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
	protection_map[2]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
	protection_map[3]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
	protection_map[4]  = PVA(_PAGE_PRESENT);
	protection_map[5]  = PVA(_PAGE_PRESENT);
	protection_map[6]  = PVA(_PAGE_PRESENT);
	protection_map[7]  = PVA(_PAGE_PRESENT);
	protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
	protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
	protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
	protection_map[4]  = PM(_PAGE_PRESENT);
	protection_map[5]  = PM(_PAGE_PRESENT);
	protection_map[6]  = PM(_PAGE_PRESENT);
	protection_map[7]  = PM(_PAGE_PRESENT);

	protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
	protection_map[9]  = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
	protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
	protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
	protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
				_PAGE_NO_READ);
	protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
	protection_map[12] = PVA(_PAGE_PRESENT);
	protection_map[13] = PVA(_PAGE_PRESENT);
	protection_map[14] = PVA(_PAGE_PRESENT);
	protection_map[15] = PVA(_PAGE_PRESENT);
	protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
	protection_map[12] = PM(_PAGE_PRESENT);
	protection_map[13] = PM(_PAGE_PRESENT);
	protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
	protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}

#undef _PVA
#undef PM

void cpu_cache_init(void)
+3 −3
Original line number Diff line number Diff line
@@ -218,14 +218,14 @@ static int memory_block_offline(struct memory_block *mem)
	struct zone *zone;
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));

	/*
	 * Unaccount before offlining, such that unpopulated zone and kthreads
	 * can properly be torn down in offline_pages().
	 */
	if (nr_vmemmap_pages)
	if (nr_vmemmap_pages) {
		zone = page_zone(pfn_to_page(start_pfn));
		adjust_present_page_count(zone, -nr_vmemmap_pages);
	}

	ret = offline_pages(start_pfn + nr_vmemmap_pages,
			    nr_pages - nr_vmemmap_pages);
+50 −5
Original line number Diff line number Diff line
@@ -1855,6 +1855,45 @@ int ocfs2_remove_inode_range(struct inode *inode,
	return ret;
}

/*
 * zero out partial blocks of one cluster.
 *
 * start: file offset where zero starts, will be made upper block aligned.
 * len: it will be trimmed to the end of current cluster if "start + len"
 *      is bigger than it.
 */
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
					u64 start, u64 len)
{
	int ret;
	u64 start_block, end_block, nr_blocks;
	u64 p_block, offset;
	u32 cluster, p_cluster, nr_clusters;
	struct super_block *sb = inode->i_sb;
	u64 end = ocfs2_align_bytes_to_clusters(sb, start);

	if (start + len < end)
		end = start + len;

	start_block = ocfs2_blocks_for_bytes(sb, start);
	end_block = ocfs2_blocks_for_bytes(sb, end);
	nr_blocks = end_block - start_block;
	if (!nr_blocks)
		return 0;

	cluster = ocfs2_bytes_to_clusters(sb, start);
	ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
				&nr_clusters, NULL);
	if (ret)
		return ret;
	if (!p_cluster)
		return 0;

	offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
	p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
	return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}

/*
 * Parts of this function taken from xfs_change_file_space()
 */
@@ -1865,7 +1904,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
{
	int ret;
	s64 llen;
	loff_t size;
	loff_t size, orig_isize;
	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
	struct buffer_head *di_bh = NULL;
	handle_t *handle;
@@ -1896,6 +1935,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
		goto out_inode_unlock;
	}

	orig_isize = i_size_read(inode);
	switch (sr->l_whence) {
	case 0: /*SEEK_SET*/
		break;
@@ -1903,7 +1943,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
		sr->l_start += f_pos;
		break;
	case 2: /*SEEK_END*/
		sr->l_start += i_size_read(inode);
		sr->l_start += orig_isize;
		break;
	default:
		ret = -EINVAL;
@@ -1957,6 +1997,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
	default:
		ret = -EINVAL;
	}

	/* zeroout eof blocks in the cluster. */
	if (!ret && change_size && orig_isize < size) {
		ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
					size - orig_isize);
		if (!ret)
			i_size_write(inode, size);
	}
	up_write(&OCFS2_I(inode)->ip_alloc_sem);
	if (ret) {
		mlog_errno(ret);
@@ -1973,9 +2021,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
		goto out_inode_unlock;
	}

	if (change_size && i_size_read(inode) < size)
		i_size_write(inode, size);

	inode->i_ctime = inode->i_mtime = current_time(inode);
	ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
	if (ret < 0)
+8 −0
Original line number Diff line number Diff line
@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
 * To be differentiate with macro pte_mkyoung, this macro is used on platforms
 * where software maintains page access bit.
 */
#ifndef pte_sw_mkyoung
static inline pte_t pte_sw_mkyoung(pte_t pte)
{
	return pte;
}
#define pte_sw_mkyoung	pte_sw_mkyoung
#endif

#ifndef pte_savedwrite
#define pte_savedwrite pte_write
#endif
Loading