Commit 15fb96a3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'mm-stable-2023-05-03-16-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull more MM updates from Andrew Morton:

 - Some DAMON cleanups from Kefeng Wang

 - Some KSM work from David Hildenbrand, to make the PR_SET_MEMORY_MERGE
   ioctl's behavior more similar to KSM's behavior.

[ Andrew called these "final", but I suspect we'll have a series fixing
  up the fact that the last commit in the dmapools series in the
  previous pull seems to have unintentionally just reverted all the
  other commits in the same series..   - Linus ]

* tag 'mm-stable-2023-05-03-16-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mm: hwpoison: coredump: support recovery from dump_user_range()
  mm/page_alloc: add some comments to explain the possible hole in __pageblock_pfn_to_page()
  mm/ksm: move disabling KSM from s390/gmap code to KSM code
  selftests/ksm: ksm_functional_tests: add prctl unmerge test
  mm/ksm: unmerge and clear VM_MERGEABLE when setting PR_SET_MEMORY_MERGE=0
  mm/damon/paddr: fix missing folio_sz update in damon_pa_young()
  mm/damon/paddr: minor refactor of damon_pa_mark_accessed_or_deactivate()
  mm/damon/paddr: minor refactor of damon_pa_pageout()
parents 671e148d 245f0922
Loading
Loading
Loading
Loading
+1 −19
Original line number Diff line number Diff line
@@ -2585,30 +2585,12 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);

int gmap_mark_unmergeable(void)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long vm_flags;
	int ret;
	VMA_ITERATOR(vmi, mm, 0);

	/*
	 * Make sure to disable KSM (if enabled for the whole process or
	 * individual VMAs). Note that nothing currently hinders user space
	 * from re-enabling it.
	 */
	clear_bit(MMF_VM_MERGE_ANY, &mm->flags);

	for_each_vma(vmi, vma) {
		/* Copy vm_flags to avoid partial modifications in ksm_madvise */
		vm_flags = vma->vm_flags;
		ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
				  MADV_UNMERGEABLE, &vm_flags);
		if (ret)
			return ret;
		vm_flags_reset(vma, vm_flags);
	}
	mm->def_flags &= ~VM_MERGEABLE;
	return 0;
	return ksm_disable(current->mm);
}
EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);

+1 −0
Original line number Diff line number Diff line
@@ -882,6 +882,7 @@ static int dump_emit_page(struct coredump_params *cprm, struct page *page)
	pos = file->f_pos;
	bvec_set_page(&bvec, page, PAGE_SIZE, 0);
	iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
	iov_iter_set_copy_mc(&iter);
	n = __kernel_write_iter(cprm->file, &iter, &pos);
	if (n != PAGE_SIZE)
		return 0;
+7 −0
Original line number Diff line number Diff line
@@ -21,6 +21,8 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,

void ksm_add_vma(struct vm_area_struct *vma);
int ksm_enable_merge_any(struct mm_struct *mm);
int ksm_disable_merge_any(struct mm_struct *mm);
int ksm_disable(struct mm_struct *mm);

int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
@@ -79,6 +81,11 @@ static inline void ksm_add_vma(struct vm_area_struct *vma)
{
}

static inline int ksm_disable(struct mm_struct *mm)
{
	return 0;
}

static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
	return 0;
+16 −0
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ struct iov_iter_state {

struct iov_iter {
	u8 iter_type;
	bool copy_mc;
	bool nofault;
	bool data_source;
	bool user_backed;
@@ -256,8 +257,22 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);

#ifdef CONFIG_ARCH_HAS_COPY_MC
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
static inline void iov_iter_set_copy_mc(struct iov_iter *i)
{
	i->copy_mc = true;
}

static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
{
	return i->copy_mc;
}
#else
#define _copy_mc_to_iter _copy_to_iter
static inline void iov_iter_set_copy_mc(struct iov_iter *i) { }
static inline bool iov_iter_is_copy_mc(const struct iov_iter *i)
{
	return false;
}
#endif

size_t iov_iter_zero(size_t bytes, struct iov_iter *);
@@ -380,6 +395,7 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
	WARN_ON(direction & ~(READ | WRITE));
	*i = (struct iov_iter) {
		.iter_type = ITER_UBUF,
		.copy_mc = false,
		.user_backed = true,
		.data_source = direction,
		.ubuf = buf,
+3 −9
Original line number Diff line number Diff line
@@ -2695,16 +2695,10 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
		if (mmap_write_lock_killable(me->mm))
			return -EINTR;

		if (arg2) {
		if (arg2)
			error = ksm_enable_merge_any(me->mm);
		} else {
			/*
			 * TODO: we might want disable KSM on all VMAs and
			 * trigger unsharing to completely disable KSM.
			 */
			clear_bit(MMF_VM_MERGE_ANY, &me->mm->flags);
			error = 0;
		}
		else
			error = ksm_disable_merge_any(me->mm);
		mmap_write_unlock(me->mm);
		break;
	case PR_GET_MEMORY_MERGE:
Loading