]> git.baikalelectronics.ru Git - kernel.git/commitdiff
perf stat: Support old kernels for bperf cgroup counting
authorNamhyung Kim <namhyung@kernel.org>
Tue, 11 Oct 2022 05:28:08 +0000 (22:28 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Fri, 14 Oct 2022 13:29:05 +0000 (10:29 -0300)
The recent change in the cgroup will break the backward compatiblity in
the BPF program.  It should support both old and new kernels using BPF
CO-RE technique.

Like the task_struct->__state handling in the offcpu analysis, we can
check the field name in the cgroup struct.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: bpf@vger.kernel.org
Cc: cgroups@vger.kernel.org
Cc: zefan li <lizefan.x@bytedance.com>
Link: http://lore.kernel.org/lkml/20221011052808.282394-1-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/bpf_skel/bperf_cgroup.bpf.c

index 435a875566881dd994d3035a1fa5dd0eae3e62ed..6a438e0102c5a2cbc724ca70cbc3a4e2dd96a474 100644 (file)
@@ -43,6 +43,18 @@ struct {
        __uint(value_size, sizeof(struct bpf_perf_event_value));
 } cgrp_readings SEC(".maps");
 
+/* new kernel cgroup definition */
+struct cgroup___new {
+       int level;
+       struct cgroup *ancestors[];
+} __attribute__((preserve_access_index));
+
+/* old kernel cgroup definition */
+struct cgroup___old {
+       int level;
+       u64 ancestor_ids[];
+} __attribute__((preserve_access_index));
+
 const volatile __u32 num_events = 1;
 const volatile __u32 num_cpus = 1;
 
@@ -50,6 +62,21 @@ int enabled = 0;
 int use_cgroup_v2 = 0;
 int perf_subsys_id = -1;
 
+static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
+{
+       /* recast pointer to capture new type for compiler */
+       struct cgroup___new *cgrp_new = (void *)cgrp;
+
+       if (bpf_core_field_exists(cgrp_new->ancestors)) {
+               return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id);
+       } else {
+               /* recast pointer to capture old type for compiler */
+               struct cgroup___old *cgrp_old = (void *)cgrp;
+
+               return BPF_CORE_READ(cgrp_old, ancestor_ids[level]);
+       }
+}
+
 static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
 {
        struct task_struct *p = (void *)bpf_get_current_task();
@@ -77,7 +104,7 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
                        break;
 
                // convert cgroup-id to a map index
-               cgrp_id = BPF_CORE_READ(cgrp, ancestors[i], kn, id);
+               cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i);
                elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
                if (!elem)
                        continue;