Commit e77a830c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "5 patches.

  Subsystems affected by this patch series: coda, overlayfs, and
  mm (pagecache and memcg)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  tools/cgroup/slabinfo.py: updated to work on current kernel
  mm/filemap: fix mapping_seek_hole_data on THP & 32-bit
  mm/filemap: fix find_lock_entries hang on 32-bit THP
  ovl: fix reference counting in ovl_mmap error path
  coda: fix reference counting in coda_file_mmap error path
parents 95838bd9 1974c45d
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -175,10 +175,10 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
	ret = call_mmap(vma->vm_file, vma);

	if (ret) {
		/* if call_mmap fails, our caller will put coda_file so we
		 * should drop the reference to the host_file that we got.
		/* if call_mmap fails, our caller will put host_file so we
		 * should drop the reference to the coda_file that we got.
		 */
		fput(host_file);
		fput(coda_file);
		kfree(cvm_ops);
	} else {
		/* here we add redirects for the open/close vm_operations */
+1 −10
Original line number Diff line number Diff line
@@ -430,20 +430,11 @@ static int ovl_mmap(struct file *file, struct vm_area_struct *vma)
	if (WARN_ON(file != vma->vm_file))
		return -EIO;

	vma->vm_file = get_file(realfile);
	vma_set_file(vma, realfile);

	old_cred = ovl_override_creds(file_inode(file)->i_sb);
	ret = call_mmap(vma->vm_file, vma);
	revert_creds(old_cred);

	if (ret) {
		/* Drop reference count from new vm_file value */
		fput(realfile);
	} else {
		/* Drop reference count from previous vm_file value */
		fput(file);
	}

	ovl_file_accessed(file);

	return ret;
+19 −12
Original line number Diff line number Diff line
@@ -1969,8 +1969,14 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
put:
		put_page(page);
next:
		if (!xa_is_value(page) && PageTransHuge(page))
			xas_set(&xas, page->index + thp_nr_pages(page));
		if (!xa_is_value(page) && PageTransHuge(page)) {
			unsigned int nr_pages = thp_nr_pages(page);

			/* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
			xas_set(&xas, page->index + nr_pages);
			if (xas.xa_index < nr_pages)
				break;
		}
	}
	rcu_read_unlock();

@@ -2672,7 +2678,7 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
		loff_t end, int whence)
{
	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
	pgoff_t max = (end - 1) / PAGE_SIZE;
	pgoff_t max = (end - 1) >> PAGE_SHIFT;
	bool seek_data = (whence == SEEK_DATA);
	struct page *page;

@@ -2681,7 +2687,8 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,

	rcu_read_lock();
	while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
		loff_t pos = xas.xa_index * PAGE_SIZE;
		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
		unsigned int seek_size;

		if (start < pos) {
			if (!seek_data)
@@ -2689,25 +2696,25 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
			start = pos;
		}

		pos += seek_page_size(&xas, page);
		seek_size = seek_page_size(&xas, page);
		pos = round_up(pos + 1, seek_size);
		start = page_seek_hole_data(&xas, mapping, page, start, pos,
				seek_data);
		if (start < pos)
			goto unlock;
		if (start >= end)
			break;
		if (seek_size > PAGE_SIZE)
			xas_set(&xas, pos >> PAGE_SHIFT);
		if (!xa_is_value(page))
			put_page(page);
	}
	rcu_read_unlock();

	if (seek_data)
		return -ENXIO;
	goto out;

		start = -ENXIO;
unlock:
	rcu_read_unlock();
	if (!xa_is_value(page))
	if (page && !xa_is_value(page))
		put_page(page);
out:
	if (start > end)
		return end;
	return start;
+4 −4
Original line number Diff line number Diff line
@@ -128,9 +128,9 @@ def detect_kernel_config():

    cfg['nr_nodes'] = prog['nr_online_nodes'].value_()

    if prog.type('struct kmem_cache').members[1][1] == 'flags':
    if prog.type('struct kmem_cache').members[1].name == 'flags':
        cfg['allocator'] = 'SLUB'
    elif prog.type('struct kmem_cache').members[1][1] == 'batchcount':
    elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
        cfg['allocator'] = 'SLAB'
    else:
        err('Can\'t determine the slab allocator')
@@ -193,7 +193,7 @@ def main():
        # look over all slab pages, belonging to non-root memcgs
        # and look for objects belonging to the given memory cgroup
        for page in for_each_slab_page(prog):
            objcg_vec_raw = page.obj_cgroups.value_()
            objcg_vec_raw = page.memcg_data.value_()
            if objcg_vec_raw == 0:
                continue
            cache = page.slab_cache
@@ -202,7 +202,7 @@ def main():
            addr = cache.value_()
            caches[addr] = cache
            # clear the lowest bit to get the true obj_cgroups
            objcg_vec = Object(prog, page.obj_cgroups.type_,
            objcg_vec = Object(prog, 'struct obj_cgroup **',
                               value=objcg_vec_raw & ~1)

            if addr not in stats: