From f889e1689ea15e459d5c997f0a30ce04bf3e6cc3 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 12 May 2022 20:22:58 -0700 Subject: [PATCH] mm/vmscan: take min_slab_pages into account when try to call shrink_node Since commit d3db6a53b2df ("mm: vmscan: invoke slab shrinkers from shrink_zone()"), slab reclaim and lru page reclaim are done together in the shrink_node. So we should take min_slab_pages into account when try to call shrink_node. Link: https://lkml.kernel.org/r/20220425112118.20924-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Cc: Huang Ying Cc: Christoph Hellwig Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/vmscan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index a9761b04564c4..5ac0a71dc0dfc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4713,7 +4713,8 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in noreclaim_flag = memalloc_noreclaim_save(); set_task_reclaim_state(p, &sc.reclaim_state); - if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) { + if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || + node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { /* * Free memory by calling shrink node with increasing * priorities until we have enough memory freed. -- 2.39.5