From f93bee9807e047070b441a0d707322d53f807edb Mon Sep 17 00:00:00 2001 From: Wonhyuk Yang Date: Thu, 12 May 2022 20:22:51 -0700 Subject: [PATCH] mm/page_alloc: cache the result of node_dirty_ok() To spread dirty pages, nodes are checked whether they have reached the dirty limit using the expensive node_dirty_ok(). To reduce the frequency of calling node_dirty_ok(), the last node that hit the dirty limit can be cached. Instead of caching the node, caching both the node and its node_dirty_ok() status can reduce the number of calle to node_dirty_ok(). [akpm@linux-foundation.org: rename last_pgdat_dirty_limit to last_pgdat_dirty_ok] Link: https://lkml.kernel.org/r/20220430011032.64071-1-vvghjk1234@gmail.com Signed-off-by: Wonhyuk Yang Acked-by: Johannes Weiner Acked-by: Mel Gorman Cc: Donghyeok Kim Cc: JaeSang Yoo Cc: Jiyoup Kim Cc: Ohhoon Kwon Signed-off-by: Andrew Morton --- mm/page_alloc.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e85a0dc227613..ddb0575c6b4a8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4021,7 +4021,8 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, { struct zoneref *z; struct zone *zone; - struct pglist_data *last_pgdat_dirty_limit = NULL; + struct pglist_data *last_pgdat = NULL; + bool last_pgdat_dirty_ok = false; bool no_fallback; retry: @@ -4060,13 +4061,13 @@ retry: * dirty-throttling and the flusher threads. */ if (ac->spread_dirty_pages) { - if (last_pgdat_dirty_limit == zone->zone_pgdat) - continue; + if (last_pgdat != zone->zone_pgdat) { + last_pgdat = zone->zone_pgdat; + last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); + } - if (!node_dirty_ok(zone->zone_pgdat)) { - last_pgdat_dirty_limit = zone->zone_pgdat; + if (!last_pgdat_dirty_ok) continue; - } } if (no_fallback && nr_online_nodes > 1 && -- 2.39.5