]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/damon/schemes: skip already charged targets and regions
authorSeongJae Park <sj@kernel.org>
Fri, 5 Nov 2021 20:47:20 +0000 (13:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Nov 2021 20:30:45 +0000 (13:30 -0700)
If DAMOS has stopped applying action in the middle of a group of memory
regions due to its size quota, it starts the work again from the
beginning of the address space in the next charge window.  If there is a
huge memory region at the beginning of the address space and it fulfills
the scheme's target data access pattern always, the action will applied
to only the region.

This mitigates the case by skipping memory regions that charged in
current charge window at the beginning of next charge window.

Link: https://lkml.kernel.org/r/20211019150731.16699-4-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Cc: Amit Shah <amit@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David Woodhouse <dwmw@amazon.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Leonard Foerster <foersleo@amazon.de>
Cc: Marco Elver <elver@google.com>
Cc: Markus Boehme <markubo@amazon.de>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/damon.h
mm/damon/core.c

index 3a1ce9d9921c8138dc124b7ba46d64b9c5a9ab2f..585d985768fd1b369ab4cb6e0d66fe20a439ee54 100644 (file)
@@ -107,6 +107,8 @@ struct damos_quota {
 /* private: For charging the quota */
        unsigned long charged_sz;
        unsigned long charged_from;
+       struct damon_target *charge_target_from;
+       unsigned long charge_addr_from;
 };
 
 /**
@@ -307,6 +309,9 @@ struct damon_ctx {
 #define damon_prev_region(r) \
        (container_of(r->list.prev, struct damon_region, list))
 
+#define damon_last_region(t) \
+       (list_last_entry(&t->regions_list, struct damon_region, list))
+
 #define damon_for_each_region(r, t) \
        list_for_each_entry(r, &t->regions_list, list)
 
index cce14a0d5c725eb03dc9187cff671ce01c1b97f5..693b75bc34505bda294380d02996de35a7277eed 100644 (file)
@@ -111,6 +111,8 @@ struct damos *damon_new_scheme(
        scheme->quota.reset_interval = quota->reset_interval;
        scheme->quota.charged_sz = 0;
        scheme->quota.charged_from = 0;
+       scheme->quota.charge_target_from = NULL;
+       scheme->quota.charge_addr_from = 0;
 
        return scheme;
 }
@@ -553,6 +555,37 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
                if (quota->sz && quota->charged_sz >= quota->sz)
                        continue;
 
+               /* Skip previously charged regions */
+               if (quota->charge_target_from) {
+                       if (t != quota->charge_target_from)
+                               continue;
+                       if (r == damon_last_region(t)) {
+                               quota->charge_target_from = NULL;
+                               quota->charge_addr_from = 0;
+                               continue;
+                       }
+                       if (quota->charge_addr_from &&
+                                       r->ar.end <= quota->charge_addr_from)
+                               continue;
+
+                       if (quota->charge_addr_from && r->ar.start <
+                                       quota->charge_addr_from) {
+                               sz = ALIGN_DOWN(quota->charge_addr_from -
+                                               r->ar.start, DAMON_MIN_REGION);
+                               if (!sz) {
+                                       if (r->ar.end - r->ar.start <=
+                                                       DAMON_MIN_REGION)
+                                               continue;
+                                       sz = DAMON_MIN_REGION;
+                               }
+                               damon_split_region_at(c, t, r, sz);
+                               r = damon_next_region(r);
+                               sz = r->ar.end - r->ar.start;
+                       }
+                       quota->charge_target_from = NULL;
+                       quota->charge_addr_from = 0;
+               }
+
                /* Check the target regions condition */
                if (sz < s->min_sz_region || s->max_sz_region < sz)
                        continue;
@@ -573,6 +606,10 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
                        }
                        c->primitive.apply_scheme(c, t, r, s);
                        quota->charged_sz += sz;
+                       if (quota->sz && quota->charged_sz >= quota->sz) {
+                               quota->charge_target_from = t;
+                               quota->charge_addr_from = r->ar.end + 1;
+                       }
                }
                if (s->action != DAMOS_STAT)
                        r->age = 0;