1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2021 Facebook */
4 /* Copyright (c) 2021 Google */
11 #include <sys/resource.h>
12 #include <linux/err.h>
13 #include <linux/zalloc.h>
14 #include <linux/perf_event.h>
15 #include <api/fs/fs.h>
16 #include <perf/bpf_perf.h>
19 #include "bpf_counter.h"
27 #include "thread_map.h"
29 #include "bpf_skel/bperf_cgroup.skel.h"
31 static struct perf_event_attr cgrp_switch_attr = {
32 .type = PERF_TYPE_SOFTWARE,
33 .config = PERF_COUNT_SW_CGROUP_SWITCHES,
34 .size = sizeof(cgrp_switch_attr),
39 static struct evsel *cgrp_switch;
40 static struct bperf_cgroup_bpf *skel;
42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
44 static int bperf_load_program(struct evlist *evlist)
46 struct bpf_link *link;
48 struct cgroup *cgrp, *leader_cgrp;
51 int total_cpus = cpu__max_cpu().cpu;
55 skel = bperf_cgroup_bpf__open();
57 pr_err("Failed to open cgroup skeleton\n");
61 skel->rodata->num_cpus = total_cpus;
62 skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
64 BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
66 /* we need one copy of events per cpu for reading */
67 map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
68 bpf_map__set_max_entries(skel->maps.events, map_size);
69 bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
70 /* previous result is saved in a per-cpu array */
71 map_size = evlist->core.nr_entries / nr_cgroups;
72 bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
73 /* cgroup result needs all events (per-cpu) */
74 map_size = evlist->core.nr_entries;
75 bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
79 err = bperf_cgroup_bpf__load(skel);
81 pr_err("Failed to load cgroup skeleton\n");
85 if (cgroup_is_v2("perf_event") > 0)
86 skel->bss->use_cgroup_v2 = 1;
90 cgrp_switch = evsel__new(&cgrp_switch_attr);
91 if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
92 pr_err("Failed to open cgroup switches event\n");
96 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
97 link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
100 pr_err("Failed to attach cgroup program\n");
107 * Update cgrp_idx map from cgroup-id to event index.
112 evlist__for_each_entry(evlist, evsel) {
113 if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
114 leader_cgrp = evsel->cgrp;
117 /* open single copy of the events w/o cgroup */
118 err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
120 pr_err("Failed to open first cgroup events\n");
124 map_fd = bpf_map__fd(skel->maps.events);
125 perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
126 int fd = FD(evsel, j);
127 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
129 err = bpf_map_update_elem(map_fd, &idx, &fd,
132 pr_err("Failed to update perf_event fd\n");
137 evsel->cgrp = leader_cgrp;
139 evsel->supported = true;
141 if (evsel->cgrp == cgrp)
146 if (read_cgroup_id(cgrp) < 0) {
147 pr_err("Failed to get cgroup id\n");
152 map_fd = bpf_map__fd(skel->maps.cgrp_idx);
153 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
155 pr_err("Failed to update cgroup index map\n");
163 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
164 * whether the kernel support it
166 prog_fd = bpf_program__fd(skel->progs.trigger_read);
167 err = bperf_trigger_reading(prog_fd, 0);
169 pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
170 "Therefore, --for-each-cgroup might show inaccurate readings\n");
178 static int bperf_cgrp__load(struct evsel *evsel,
179 struct target *target __maybe_unused)
181 static bool bperf_loaded = false;
183 evsel->bperf_leader_prog_fd = -1;
184 evsel->bperf_leader_link_fd = -1;
186 if (!bperf_loaded && bperf_load_program(evsel->evlist))
190 /* just to bypass bpf_counter_skip() */
191 evsel->follower_skel = (struct bperf_follower_bpf *)skel;
196 static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
197 int cpu __maybe_unused, int fd __maybe_unused)
204 * trigger the leader prog on each cpu, so the cgrp_reading map could get
205 * the latest results.
207 static int bperf_cgrp__sync_counters(struct evlist *evlist)
211 int prog_fd = bpf_program__fd(skel->progs.trigger_read);
213 perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
214 bperf_trigger_reading(prog_fd, cpu.cpu);
219 static int bperf_cgrp__enable(struct evsel *evsel)
224 bperf_cgrp__sync_counters(evsel->evlist);
226 skel->bss->enabled = 1;
230 static int bperf_cgrp__disable(struct evsel *evsel)
235 bperf_cgrp__sync_counters(evsel->evlist);
237 skel->bss->enabled = 0;
241 static int bperf_cgrp__read(struct evsel *evsel)
243 struct evlist *evlist = evsel->evlist;
244 int total_cpus = cpu__max_cpu().cpu;
245 struct perf_counts_values *counts;
246 struct bpf_perf_event_value *values;
247 int reading_map_fd, err = 0;
252 bperf_cgrp__sync_counters(evsel->evlist);
254 values = calloc(total_cpus, sizeof(*values));
258 reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
260 evlist__for_each_entry(evlist, evsel) {
261 __u32 idx = evsel->core.idx;
265 err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
267 pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
268 idx, evsel__name(evsel), evsel->cgrp->name);
272 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
273 counts = perf_counts(evsel->counts, i, 0);
274 counts->val = values[cpu.cpu].counter;
275 counts->ena = values[cpu.cpu].enabled;
276 counts->run = values[cpu.cpu].running;
285 static int bperf_cgrp__destroy(struct evsel *evsel)
290 bperf_cgroup_bpf__destroy(skel);
291 evsel__delete(cgrp_switch); // it'll destroy on_switch progs too
296 struct bpf_counter_ops bperf_cgrp_ops = {
297 .load = bperf_cgrp__load,
298 .enable = bperf_cgrp__enable,
299 .disable = bperf_cgrp__disable,
300 .read = bperf_cgrp__read,
301 .install_pe = bperf_cgrp__install_pe,
302 .destroy = bperf_cgrp__destroy,