Commit 6080d19f authored by xu xin's avatar xu xin Committed by Andrew Morton
Browse files

ksm: add ksm zero pages for each process

As the number of ksm zero pages is not included in ksm_merging_pages per
process when enabling use_zero_pages, it's unclear of how many actual
pages are merged by KSM. To let users accurately estimate their memory
demands when unsharing KSM zero-pages, it's necessary to show KSM zero-
pages per process. In addition, it help users to know the actual KSM
profit because KSM-placed zero pages are also benefit from KSM.

since unsharing zero pages placed by KSM accurately is achieved, then
tracking empty pages merging and unmerging is not a difficult thing any
longer.

Since we already have /proc/<pid>/ksm_stat, just add the information of
'ksm_zero_pages' in it.

Link: https://lkml.kernel.org/r/20230613030938.185993-1-yang.yang29@zte.com.cn


Signed-off-by: default avatarxu xin <xu.xin16@zte.com.cn>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarXiaokai Ran <ran.xiaokai@zte.com.cn>
Reviewed-by: default avatarYang Yang <yang.yang29@zte.com.cn>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Xuexin Jiang <jiang.xuexin@zte.com.cn>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e2942062
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -3207,6 +3207,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
	mm = get_task_mm(task);
	if (mm) {
		seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
		seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
		seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
		seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
		mmput(mm);
+5 −3
Original line number Diff line number Diff line
@@ -35,10 +35,12 @@ void __ksm_exit(struct mm_struct *mm);

extern unsigned long ksm_zero_pages;

static inline void ksm_might_unmap_zero_page(pte_t pte)
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
	if (is_ksm_zero_pte(pte))
	if (is_ksm_zero_pte(pte)) {
		ksm_zero_pages--;
		mm->ksm_zero_pages--;
	}
}

static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -109,7 +111,7 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}

static inline void ksm_might_unmap_zero_page(pte_t pte)
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
}

+7 −2
Original line number Diff line number Diff line
@@ -812,7 +812,7 @@ struct mm_struct {
#ifdef CONFIG_KSM
		/*
		 * Represent how many pages of this process are involved in KSM
		 * merging.
		 * merging (not including ksm_zero_pages).
		 */
		unsigned long ksm_merging_pages;
		/*
@@ -820,7 +820,12 @@ struct mm_struct {
		 * including merged and not merged.
		 */
		unsigned long ksm_rmap_items;
#endif
		/*
		 * Represent how many empty pages are merged with kernel zero
		 * pages when enabling KSM use_zero_pages.
		 */
		unsigned long ksm_zero_pages;
#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN
		struct {
			/* this mm_struct is on lru_gen_mm_list */
+1 −1
Original line number Diff line number Diff line
@@ -710,7 +710,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte,
				spin_lock(ptl);
				ptep_clear(vma->vm_mm, address, _pte);
				spin_unlock(ptl);
				ksm_might_unmap_zero_page(pteval);
				ksm_might_unmap_zero_page(vma->vm_mm, pteval);
			}
		} else {
			src_page = pte_page(pteval);
+1 −0
Original line number Diff line number Diff line
@@ -1233,6 +1233,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
		 */
		newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
		ksm_zero_pages++;
		mm->ksm_zero_pages++;
		/*
		 * We're replacing an anonymous page with a zero page, which is
		 * not anonymous. We need to do proper accounting otherwise we
Loading