Commit d81e9624 authored by Peng Wu's avatar Peng Wu Committed by Wang Wensheng
Browse files

proc: Count reliable memory usage of reliable tasks

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S


CVE: NA

--------------------------------

Counting reliable memory allocated by the reliable user tasks.

The policy of counting reliable memory usage is based on RSS statistics.
Anywhere with counter of mm need count reliable pages too. Reliable page
which is checked by page_reliable() need to update the reliable page
counter by calling reliable_page_counter().

Updating the reliable pages should be considered if the following logic is
added:
- add_mm_counter
- dec_mm_counter
- inc_mm_counter_fast
- dec_mm_counter_fast
- rss[mm_counter(page)]

Signed-off-by: default avatarPeng Wu <wupeng58@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
parent cb562ce3
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -195,6 +195,7 @@ read the file /proc/PID/status::
  VmPTE:        20 kb
  VmSwap:        0 kB
  HugetlbPages:          0 kB
  Reliable:         1608 kB
  CoreDumping:    0
  THP_enabled:	  1
  Threads:        1
@@ -275,6 +276,7 @@ It's slow but very precise.
 VmSwap                      amount of swap used by anonymous private data
                             (shmem swap usage is not included)
 HugetlbPages                size of hugetlb memory portions
 Reliable                    size of reliable memory used
 CoreDumping                 process's memory is currently being dumped
                             (killing the process may lead to a corrupted core)
 THP_enabled		     process is allowed to use THP (returns 0 when
+1 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
	seq_puts(m, " kB\n");
	hugetlb_report_usage(m, mm);
	reliable_report_usage(m, mm);
}
#undef SEQ_PUT_DEC

+14 −0
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@ extern bool pagecache_use_reliable_mem;
extern struct percpu_counter pagecache_reliable_pages;
extern struct percpu_counter anon_reliable_pages;
extern unsigned long task_reliable_limit __read_mostly;
extern atomic_long_t reliable_user_used_nr_page;

extern void mem_reliable_init(bool has_unmirrored_mem,
			      unsigned long *zone_movable_pfn,
@@ -39,6 +40,8 @@ extern bool mem_reliable_counter_initialized(void);
extern void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order,
				       int preferred_nid, nodemask_t *nodemask);
extern void reliable_show_mem_info(void);
extern void reliable_report_usage(struct seq_file *m,
		struct mm_struct *mm);

static inline bool mem_reliable_is_enabled(void)
{
@@ -125,6 +128,13 @@ static inline bool reliable_allow_fb_enabled(void)
{
	return reliable_allow_fallback;
}

static inline void reliable_page_counter(struct page *page,
		struct mm_struct *mm, int val)
{
	if (page_reliable(page))
		atomic_long_add(val, &mm->reliable_nr_page);
}
#else
#define reliable_enabled 0
#define pagecache_use_reliable_mem 0
@@ -164,6 +174,10 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask,
					      nodemask_t *nodemask) {}
static inline bool reliable_allow_fb_enabled(void) { return false; }
static inline void reliable_show_mem_info(void) {}
static inline void reliable_page_counter(struct page *page,
		struct mm_struct *mm, int val) {}
static inline void reliable_report_usage(struct seq_file *m,
		struct mm_struct *mm) {}
#endif

#endif
+2 −0
Original line number Diff line number Diff line
@@ -183,6 +183,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,

	if (new_page) {
		get_page(new_page);
		reliable_page_counter(new_page, mm, 1);
		page_add_new_anon_rmap(new_page, vma, addr, false);
		lru_cache_add_inactive_or_unevictable(new_page, vma);
	} else
@@ -194,6 +195,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
		inc_mm_counter(mm, MM_ANONPAGES);
	}

	reliable_page_counter(old_page, mm, -1);
	flush_cache_page(vma, addr, pte_pfn(*pvmw.pte));
	ptep_clear_flush_notify(vma, addr, pvmw.pte);
	if (new_page)
+8 −0
Original line number Diff line number Diff line
@@ -652,6 +652,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
		reliable_page_counter(page, vma->vm_mm, HPAGE_PMD_NR);
		mm_inc_nr_ptes(vma->vm_mm);
		spin_unlock(vmf->ptl);
		count_vm_event(THP_FAULT_ALLOC);
@@ -1115,6 +1116,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
	get_page(src_page);
	page_dup_rmap(src_page, true);
	add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
	reliable_page_counter(src_page, dst_mm, HPAGE_PMD_NR);
out_zero_page:
	mm_inc_nr_ptes(dst_mm);
	pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
@@ -1696,6 +1698,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,

		if (pmd_present(orig_pmd)) {
			page = pmd_page(orig_pmd);
			reliable_page_counter(page, tlb->mm, -HPAGE_PMD_NR);
			page_remove_rmap(page, true);
			VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
			VM_BUG_ON_PAGE(!PageHead(page), page);
@@ -2077,6 +2080,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
				set_page_dirty(page);
			if (!PageReferenced(page) && pmd_young(old_pmd))
				SetPageReferenced(page);
			reliable_page_counter(page, mm, -HPAGE_PMD_NR);
			page_remove_rmap(page, true);
			put_page(page);
		}
@@ -2212,6 +2216,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,

	if (freeze) {
		for (i = 0; i < HPAGE_PMD_NR; i++) {
			reliable_page_counter(page + i, mm, -1);
			page_remove_rmap(page + i, false);
			put_page(page + i);
		}
@@ -3004,6 +3009,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
	if (pmd_soft_dirty(pmdval))
		pmdswp = pmd_swp_mksoft_dirty(pmdswp);
	set_pmd_at(mm, address, pvmw->pmd, pmdswp);
	reliable_page_counter(page, mm, -HPAGE_PMD_NR);
	page_remove_rmap(page, true);
	put_page(page);
}
@@ -3031,6 +3037,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
		pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));

	flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
	reliable_page_counter(new, mm, HPAGE_PMD_NR);
	if (PageAnon(new))
		page_add_anon_rmap(new, vma, mmun_start, true);
	else
@@ -3087,6 +3094,7 @@ vm_fault_t do_anon_huge_page_remap(struct vm_area_struct *vma, unsigned long add
		pgtable_trans_huge_deposit(vma->vm_mm, pmd, pgtable);
		set_pmd_at(vma->vm_mm, address, pmd, entry);
		add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR);
		reliable_page_counter(page, vma->vm_mm, HPAGE_PMD_NR);
		mm_inc_nr_ptes(vma->vm_mm);
		spin_unlock(ptl);
		count_vm_event(THP_FAULT_ALLOC);
Loading