Commit 8fc2546f authored by Ma Wupeng's avatar Ma Wupeng Committed by Wupeng Ma
Browse files

proc: mem_reliable: Count reliable memory usage of reliable tasks

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8USBA


CVE: NA

--------------------------------

Counting reliable memory allocated by the reliable user tasks.

The policy of counting reliable memory usage is based on RSS statistics.
Anywhere with counter of mm need count reliable pages too. Reliable page
which is checked by page_reliable() need to update the reliable page
counter by calling reliable_page_counter().

Updating the reliable pages should be considered if the following logic is
added:
- add_mm_counter
- dec_mm_counter
- inc_mm_counter_fast
- dec_mm_counter_fast
- rss[mm_counter(page)]

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
parent cd1a72f5
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -197,6 +197,7 @@ read the file /proc/PID/status::
  VmPTE:        20 kb
  VmSwap:        0 kB
  HugetlbPages:          0 kB
  Reliable:         1608 kB
  CoreDumping:    0
  THP_enabled:	  1
  Threads:        1
@@ -280,6 +281,7 @@ It's slow but very precise.
 VmSwap                      amount of swap used by anonymous private data
                             (shmem swap usage is not included)
 HugetlbPages                size of hugetlb memory portions
 Reliable                    size of reliable memory used
 CoreDumping                 process's memory is currently being dumped
                             (killing the process may lead to a corrupted core)
 THP_enabled		     process is allowed to use THP (returns 0 when
+1 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
	seq_puts(m, " kB\n");
	hugetlb_report_usage(m, mm);
	reliable_report_usage(m, mm);
}
#undef SEQ_PUT_DEC

+39 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@ bool mem_reliable_counter_initialized(void);
void reliable_report_meminfo(struct seq_file *m);
void mem_reliable_out_of_memory(gfp_t gfp_mask, unsigned int order,
				int preferred_nid, nodemask_t *nodemask);
void reliable_report_usage(struct seq_file *m, struct mm_struct *mm);

static inline bool mem_reliable_is_enabled(void)
{
@@ -180,6 +181,38 @@ static inline bool mem_reliable_should_reclaim(void)

	return false;
}

static inline void reliable_page_counter_inner(struct mm_struct *mm, int val)
{
	atomic_long_add(val, &mm->reliable_nr_page);

	/*
	 * Update reliable page counter to zero if underflows.
	 *
	 * Since reliable page counter is used for debug purpose only,
	 * there is no real function problem by doing this.
	 */
	if (unlikely(atomic_long_read(&mm->reliable_nr_page) < 0))
		atomic_long_set(&mm->reliable_nr_page, 0);
}

static inline void add_reliable_folio_counter(struct folio *folio,
		struct mm_struct *mm, int val)
{
	if (!folio_reliable(folio))
		return;

	reliable_page_counter_inner(mm, val);
}

static inline void add_reliable_page_counter(struct page *page,
		struct mm_struct *mm, int val)
{
	if (!page_reliable(page))
		return;

	reliable_page_counter_inner(mm, val);
}
#else
#define reliable_enabled 0

@@ -217,6 +250,12 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask,
					      int preferred_nid,
					      nodemask_t *nodemask) {}
static inline bool reliable_allow_fb_enabled(void) { return false; }
static inline void add_reliable_page_counter(struct page *page,
		struct mm_struct *mm, int val) {}
static inline void add_reliable_folio_counter(struct folio *folio,
		struct mm_struct *mm, int val) {}
static inline void reliable_report_usage(struct seq_file *m,
		struct mm_struct *mm) {}
#endif

#endif
+4 −0
Original line number Diff line number Diff line
@@ -936,6 +936,10 @@ struct mm_struct {
#endif /* CONFIG_LRU_GEN */
#ifdef CONFIG_SHARE_POOL
		struct sp_group_master *sp_group_master;
#endif
#ifdef CONFIG_MEMORY_RELIABLE
		/* total used reliable pages */
		atomic_long_t reliable_nr_page;
#endif
	} __randomize_layout;

+2 −0
Original line number Diff line number Diff line
@@ -181,6 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,

	if (new_page) {
		folio_get(new_folio);
		add_reliable_folio_counter(new_folio, mm, folio_nr_pages(new_folio));
		page_add_new_anon_rmap(new_page, vma, addr);
		folio_add_lru_vma(new_folio, vma);
	} else
@@ -198,6 +199,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
		set_pte_at_notify(mm, addr, pvmw.pte,
				  mk_pte(new_page, vma->vm_page_prot));

	add_reliable_page_counter(old_page, mm, -1);
	page_remove_rmap(old_page, vma, false);
	if (!folio_mapped(old_folio))
		folio_free_swap(old_folio);
Loading