Commit f8f238ff authored by Andrew Morton's avatar Andrew Morton
Browse files

sync mm-stable with mm-hotfixes-stable to pick up depended-upon upstream changes

parents e492cd61 ef832747
Loading
Loading
Loading
Loading
+20 −0
Original line number Diff line number Diff line
@@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
	return 0;
}

/**
 * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
 * @sci: segment constructor object
 *
 * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
 * the current segment summary block.
 */
static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
{
	struct nilfs_segsum_pointer *ssp;

	ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
	if (ssp->offset < ssp->bh->b_size)
		memset(ssp->bh->b_data + ssp->offset, 0,
		       ssp->bh->b_size - ssp->offset);
}

static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
{
	sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
@@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
				* The current segment is filled up
				* (internal code)
				*/
	nilfs_segctor_zeropad_segsum(sci);
	sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
	return nilfs_segctor_reset_segment_buffer(sci);
}
@@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
		goto retry;
	}
	if (unlikely(required)) {
		nilfs_segctor_zeropad_segsum(sci);
		err = nilfs_segbuf_extend_segsum(segbuf);
		if (unlikely(err))
			goto failed;
@@ -1533,6 +1552,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
		nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
		sci->sc_stage = prev_stage;
	}
	nilfs_segctor_zeropad_segsum(sci);
	nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
	return 0;

+21 −18
Original line number Diff line number Diff line
@@ -134,9 +134,10 @@ void kmsan_kfree_large(const void *ptr);
 * @page_shift:	page_shift passed to vmap_range_noflush().
 *
 * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
 * vmalloc metadata address range.
 * vmalloc metadata address range. Returns 0 on success, callers must check
 * for non-zero return value.
 */
void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
				   pgprot_t prot, struct page **pages,
				   unsigned int page_shift);

@@ -159,9 +160,10 @@ void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
 * @page_shift:	page_shift argument passed to vmap_range_noflush().
 *
 * KMSAN creates new metadata pages for the physical pages mapped into the
 * virtual memory.
 * virtual memory. Returns 0 on success, callers must check for non-zero return
 * value.
 */
void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
int kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
			     phys_addr_t phys_addr, pgprot_t prot,
			     unsigned int page_shift);

@@ -281,12 +283,13 @@ static inline void kmsan_kfree_large(const void *ptr)
{
}

static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
static inline int kmsan_vmap_pages_range_noflush(unsigned long start,
						 unsigned long end,
						 pgprot_t prot,
						 struct page **pages,
						 unsigned int page_shift)
{
	return 0;
}

static inline void kmsan_vunmap_range_noflush(unsigned long start,
@@ -294,12 +297,12 @@ static inline void kmsan_vunmap_range_noflush(unsigned long start,
{
}

static inline void kmsan_ioremap_page_range(unsigned long start,
static inline int kmsan_ioremap_page_range(unsigned long start,
					   unsigned long end,
					    phys_addr_t phys_addr,
					    pgprot_t prot,
					   phys_addr_t phys_addr, pgprot_t prot,
					   unsigned int page_shift)
{
	return 0;
}

static inline void kmsan_iounmap_page_range(unsigned long start,
+1 −0
Original line number Diff line number Diff line
@@ -1308,6 +1308,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
fail_pcpu:
	while (i > 0)
		percpu_counter_destroy(&mm->rss_stat[--i]);
	destroy_context(mm);
fail_nocontext:
	mm_free_pgd(mm);
fail_nopgd:
+40 −29
Original line number Diff line number Diff line
@@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
	struct cred *new;
	int retval;
	kuid_t kruid, keuid, ksuid;
	bool ruid_new, euid_new, suid_new;

	kruid = make_kuid(ns, ruid);
	keuid = make_kuid(ns, euid);
@@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
		return -EINVAL;

	old = current_cred();

	/* check for no-op */
	if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
	    (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
				    uid_eq(keuid, old->fsuid))) &&
	    (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
		return 0;

	ruid_new = ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
		   !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
	euid_new = euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
		   !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
	suid_new = suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
		   !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
	if ((ruid_new || euid_new || suid_new) &&
	    !ns_capable_setid(old->user_ns, CAP_SETUID))
		return -EPERM;

	new = prepare_creds();
	if (!new)
		return -ENOMEM;

	old = current_cred();

	retval = -EPERM;
	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
			goto error;
		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
			goto error;
		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
			goto error;
	}

	if (ruid != (uid_t) -1) {
		new->uid = kruid;
		if (!uid_eq(kruid, old->uid)) {
@@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
	struct cred *new;
	int retval;
	kgid_t krgid, kegid, ksgid;
	bool rgid_new, egid_new, sgid_new;

	krgid = make_kgid(ns, rgid);
	kegid = make_kgid(ns, egid);
@@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
		return -EINVAL;

	old = current_cred();

	/* check for no-op */
	if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
	    (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
				    gid_eq(kegid, old->fsgid))) &&
	    (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
		return 0;

	rgid_new = rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
		   !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
	egid_new = egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
		   !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
	sgid_new = sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
		   !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
	if ((rgid_new || egid_new || sgid_new) &&
	    !ns_capable_setid(old->user_ns, CAP_SETGID))
		return -EPERM;

	new = prepare_creds();
	if (!new)
		return -ENOMEM;
	old = current_cred();

	retval = -EPERM;
	if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
			goto error;
		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
			goto error;
		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
			goto error;
	}

	if (rgid != (gid_t) -1)
		new->gid = krgid;
+24 −23
Original line number Diff line number Diff line
@@ -4965,7 +4965,8 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
 * Return: True if found in a leaf, false otherwise.
 *
 */
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
		unsigned long *gap_min, unsigned long *gap_max)
{
	enum maple_type type = mte_node_type(mas->node);
	struct maple_node *node = mas_mn(mas);
@@ -5030,8 +5031,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)

	if (unlikely(ma_is_leaf(type))) {
		mas->offset = offset;
		mas->min = min;
		mas->max = min + gap - 1;
		*gap_min = min;
		*gap_max = min + gap - 1;
		return true;
	}

@@ -5055,10 +5056,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
{
	enum maple_type type = mte_node_type(mas->node);
	unsigned long pivot, min, gap = 0;
	unsigned char offset;
	unsigned long *gaps;
	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
	void __rcu **slots = ma_slots(mas_mn(mas), type);
	unsigned char offset, data_end;
	unsigned long *gaps, *pivots;
	void __rcu **slots;
	struct maple_node *node;
	bool found = false;

	if (ma_is_dense(type)) {
@@ -5066,13 +5067,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
		return true;
	}

	gaps = ma_gaps(mte_to_node(mas->node), type);
	node = mas_mn(mas);
	pivots = ma_pivots(node, type);
	slots = ma_slots(node, type);
	gaps = ma_gaps(node, type);
	offset = mas->offset;
	min = mas_safe_min(mas, pivots, offset);
	for (; offset < mt_slots[type]; offset++) {
		pivot = mas_safe_pivot(mas, pivots, offset, type);
		if (offset && !pivot)
			break;
	data_end = ma_data_end(node, type, pivots, mas->max);
	for (; offset <= data_end; offset++) {
		pivot = mas_logical_pivot(mas, pivots, offset, type);

		/* Not within lower bounds */
		if (mas->index > pivot)
@@ -5307,6 +5310,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
	unsigned long *pivots;
	enum maple_type mt;

	if (min >= max)
		return -EINVAL;

	if (mas_is_start(mas))
		mas_start(mas);
	else if (mas->offset >= 2)
@@ -5361,6 +5367,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
{
	struct maple_enode *last = mas->node;

	if (min >= max)
		return -EINVAL;

	if (mas_is_start(mas)) {
		mas_start(mas);
		mas->offset = mas_data_end(mas);
@@ -5380,7 +5389,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
	mas->index = min;
	mas->last = max;

	while (!mas_rev_awalk(mas, size)) {
	while (!mas_rev_awalk(mas, size, &min, &max)) {
		if (last == mas->node) {
			if (!mas_rewind_node(mas))
				return -EBUSY;
@@ -5395,17 +5404,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
		return -EBUSY;

	/*
	 * mas_rev_awalk() has set mas->min and mas->max to the gap values.  If
	 * the maximum is outside the window we are searching, then use the last
	 * location in the search.
	 * mas->max and mas->min is the range of the gap.
	 * mas->index and mas->last are currently set to the search range.
	 */

	/* Trim the upper limit to the max. */
	if (mas->max <= mas->last)
		mas->last = mas->max;
	if (max <= mas->last)
		mas->last = max;

	mas->index = mas->last - size + 1;
	return 0;
Loading