]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: cma: add trace events for CMA alloc perf testing
authorLiam Mark <lmark@codeaurora.org>
Wed, 5 May 2021 01:37:25 +0000 (18:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 5 May 2021 18:27:24 +0000 (11:27 -0700)
Add cma and migrate trace events to enable CMA allocation performance to
be measured via ftrace.

[georgi.djakov@linaro.org: add the CMA instance name to the cma_alloc_start trace event]
Link: https://lkml.kernel.org/r/20210326155414.25006-1-georgi.djakov@linaro.org
Link: https://lkml.kernel.org/r/20210324160740.15901-1-georgi.djakov@linaro.org
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/trace/events/cma.h
include/trace/events/migrate.h
mm/cma.c
mm/migrate.c

index 5017a8829270ac56f5990f4694b317a65898316b..be1525a10457b3d79c49d366fbc0d7f6dcc2b90d 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/types.h>
 #include <linux/tracepoint.h>
 
-TRACE_EVENT(cma_alloc,
+DECLARE_EVENT_CLASS(cma_alloc_class,
 
        TP_PROTO(unsigned long pfn, const struct page *page,
                 unsigned int count, unsigned int align),
@@ -61,6 +61,46 @@ TRACE_EVENT(cma_release,
                  __entry->count)
 );
 
+TRACE_EVENT(cma_alloc_start,
+
+       TP_PROTO(const char *name, unsigned int count, unsigned int align),
+
+       TP_ARGS(name, count, align),
+
+       TP_STRUCT__entry(
+               __string(name, name)
+               __field(unsigned int, count)
+               __field(unsigned int, align)
+       ),
+
+       TP_fast_assign(
+               __assign_str(name, name);
+               __entry->count = count;
+               __entry->align = align;
+       ),
+
+       TP_printk("name=%s count=%u align=%u",
+                 __get_str(name),
+                 __entry->count,
+                 __entry->align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc,
+
+       TP_PROTO(unsigned long pfn, const struct page *page,
+                unsigned int count, unsigned int align),
+
+       TP_ARGS(pfn, page, count, align)
+);
+
+DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
+
+       TP_PROTO(unsigned long pfn, const struct page *page,
+                unsigned int count, unsigned int align),
+
+       TP_ARGS(pfn, page, count, align)
+);
+
 #endif /* _TRACE_CMA_H */
 
 /* This part must be outside protection */
index 4d434398d64d0bb042f2bc2a76510dca1e726858..f2c9906038882ddb5a6361c94e99a4abcc3656eb 100644 (file)
@@ -81,6 +81,28 @@ TRACE_EVENT(mm_migrate_pages,
                __print_symbolic(__entry->mode, MIGRATE_MODE),
                __print_symbolic(__entry->reason, MIGRATE_REASON))
 );
+
+TRACE_EVENT(mm_migrate_pages_start,
+
+       TP_PROTO(enum migrate_mode mode, int reason),
+
+       TP_ARGS(mode, reason),
+
+       TP_STRUCT__entry(
+               __field(enum migrate_mode, mode)
+               __field(int, reason)
+       ),
+
+       TP_fast_assign(
+               __entry->mode   = mode;
+               __entry->reason = reason;
+       ),
+
+       TP_printk("mode=%s reason=%s",
+                 __print_symbolic(__entry->mode, MIGRATE_MODE),
+                 __print_symbolic(__entry->reason, MIGRATE_REASON))
+);
+
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
index a862ff8c23ce815bebb90bc5907cc7661f8afed7..d1a4c06b50397b5994f83400d8a1e69837270d31 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -443,6 +443,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
        if (!count)
                goto out;
 
+       trace_cma_alloc_start(cma->name, count, align);
+
        mask = cma_bitmap_aligned_mask(cma, align);
        offset = cma_bitmap_aligned_offset(cma, align);
        bitmap_maxno = cma_bitmap_maxno(cma);
@@ -483,6 +485,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
 
                pr_debug("%s(): memory range at %p is busy, retrying\n",
                         __func__, pfn_to_page(pfn));
+
+               trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
                /* try again with a bit different memory target */
                start = bitmap_no + mask + 1;
        }
index 30c65c2be30ba3e3986c0508f3f1535aec366a78..6b37d00890ca59d7859f5af77a81e5abf551405f 100644 (file)
@@ -1418,6 +1418,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
        int rc, nr_subpages;
        LIST_HEAD(ret_pages);
 
+       trace_mm_migrate_pages_start(mode, reason);
+
        if (!swapwrite)
                current->flags |= PF_SWAPWRITE;