Commit 0f6a5cff authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: vmscan: split shrink_node() into node part and memcgs part

This function is getting long and unwieldy, split out the memcg bits.

The updated shrink_node() handles the generic (node) reclaim aspects:
  - global vmpressure notifications
  - writeback and congestion throttling
  - reclaim/compaction management
  - kswapd giving up on unreclaimable nodes

It then calls a new shrink_node_memcgs() which handles cgroup specifics:
  - the cgroup tree traversal
  - memory.low considerations
  - per-cgroup slab shrinking callbacks
  - per-cgroup vmpressure notifications

[hannes@cmpxchg.org: rename "root" to "target_memcg", per Roman]
  Link: http://lkml.kernel.org/r/20191025143640.GA386981@cmpxchg.org
Link: http://lkml.kernel.org/r/20191022144803.302233-8-hannes@cmpxchg.org


Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent afaf07a6
Loading
Loading
Loading
Loading
+25 −16
Original line number Diff line number Diff line
@@ -2722,26 +2722,18 @@ static bool pgdat_memcg_congested(pg_data_t *pgdat, struct mem_cgroup *memcg)
		(memcg && memcg_congested(pgdat, memcg));
}

static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
{
	struct reclaim_state *reclaim_state = current->reclaim_state;
	struct mem_cgroup *root = sc->target_mem_cgroup;
	unsigned long nr_reclaimed, nr_scanned;
	bool reclaimable = false;
	struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
	struct mem_cgroup *memcg;
again:
	memset(&sc->nr, 0, sizeof(sc->nr));

	nr_reclaimed = sc->nr_reclaimed;
	nr_scanned = sc->nr_scanned;

	memcg = mem_cgroup_iter(root, NULL, NULL);
	memcg = mem_cgroup_iter(target_memcg, NULL, NULL);
	do {
		struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
		unsigned long reclaimed;
		unsigned long scanned;

		switch (mem_cgroup_protected(root, memcg)) {
		switch (mem_cgroup_protected(target_memcg, memcg)) {
		case MEMCG_PROT_MIN:
			/*
			 * Hard protection.
@@ -2785,7 +2777,23 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
			   sc->nr_scanned - scanned,
			   sc->nr_reclaimed - reclaimed);

	} while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
	} while ((memcg = mem_cgroup_iter(target_memcg, memcg, NULL)));
}

static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
{
	struct reclaim_state *reclaim_state = current->reclaim_state;
	struct mem_cgroup *target_memcg = sc->target_mem_cgroup;
	unsigned long nr_reclaimed, nr_scanned;
	bool reclaimable = false;

again:
	memset(&sc->nr, 0, sizeof(sc->nr));

	nr_reclaimed = sc->nr_reclaimed;
	nr_scanned = sc->nr_scanned;

	shrink_node_memcgs(pgdat, sc);

	if (reclaim_state) {
		sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -2793,7 +2801,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	}

	/* Record the subtree's reclaim efficiency */
	vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
	vmpressure(sc->gfp_mask, target_memcg, true,
		   sc->nr_scanned - nr_scanned,
		   sc->nr_reclaimed - nr_reclaimed);

@@ -2849,7 +2857,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	 */
	if (cgroup_reclaim(sc) && writeback_throttling_sane(sc) &&
	    sc->nr.dirty && sc->nr.dirty == sc->nr.congested)
		set_memcg_congestion(pgdat, root, true);
		set_memcg_congestion(pgdat, target_memcg, true);

	/*
	 * Stall direct reclaim for IO completions if underlying BDIs
@@ -2858,7 +2866,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
	 * the LRU too quickly.
	 */
	if (!sc->hibernation_mode && !current_is_kswapd() &&
	    current_may_throttle() && pgdat_memcg_congested(pgdat, root))
	    current_may_throttle() &&
	    pgdat_memcg_congested(pgdat, target_memcg))
		wait_iff_congested(BLK_RW_ASYNC, HZ/10);

	if (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,