]> git.itanic.dy.fi Git - linux-stable/blob - tools/perf/util/bpf_counter_cgroup.c
perf stat: Fix cpu map index in bperf cgroup code
[linux-stable] / tools / perf / util / bpf_counter_cgroup.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2021 Facebook */
4 /* Copyright (c) 2021 Google */
5
6 #include <assert.h>
7 #include <limits.h>
8 #include <unistd.h>
9 #include <sys/file.h>
10 #include <sys/time.h>
11 #include <sys/resource.h>
12 #include <linux/err.h>
13 #include <linux/zalloc.h>
14 #include <linux/perf_event.h>
15 #include <api/fs/fs.h>
16 #include <perf/bpf_perf.h>
17
18 #include "affinity.h"
19 #include "bpf_counter.h"
20 #include "cgroup.h"
21 #include "counts.h"
22 #include "debug.h"
23 #include "evsel.h"
24 #include "evlist.h"
25 #include "target.h"
26 #include "cpumap.h"
27 #include "thread_map.h"
28
29 #include "bpf_skel/bperf_cgroup.skel.h"
30
31 static struct perf_event_attr cgrp_switch_attr = {
32         .type = PERF_TYPE_SOFTWARE,
33         .config = PERF_COUNT_SW_CGROUP_SWITCHES,
34         .size = sizeof(cgrp_switch_attr),
35         .sample_period = 1,
36         .disabled = 1,
37 };
38
39 static struct evsel *cgrp_switch;
40 static struct bperf_cgroup_bpf *skel;
41
42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
43
44 static int bperf_load_program(struct evlist *evlist)
45 {
46         struct bpf_link *link;
47         struct evsel *evsel;
48         struct cgroup *cgrp, *leader_cgrp;
49         int i, j;
50         struct perf_cpu cpu;
51         int total_cpus = cpu__max_cpu().cpu;
52         int map_size, map_fd;
53         int prog_fd, err;
54
55         skel = bperf_cgroup_bpf__open();
56         if (!skel) {
57                 pr_err("Failed to open cgroup skeleton\n");
58                 return -1;
59         }
60
61         skel->rodata->num_cpus = total_cpus;
62         skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
63
64         BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
65
66         /* we need one copy of events per cpu for reading */
67         map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
68         bpf_map__set_max_entries(skel->maps.events, map_size);
69         bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
70         /* previous result is saved in a per-cpu array */
71         map_size = evlist->core.nr_entries / nr_cgroups;
72         bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
73         /* cgroup result needs all events (per-cpu) */
74         map_size = evlist->core.nr_entries;
75         bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
76
77         set_max_rlimit();
78
79         err = bperf_cgroup_bpf__load(skel);
80         if (err) {
81                 pr_err("Failed to load cgroup skeleton\n");
82                 goto out;
83         }
84
85         if (cgroup_is_v2("perf_event") > 0)
86                 skel->bss->use_cgroup_v2 = 1;
87
88         err = -1;
89
90         cgrp_switch = evsel__new(&cgrp_switch_attr);
91         if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
92                 pr_err("Failed to open cgroup switches event\n");
93                 goto out;
94         }
95
96         perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
97                 link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
98                                                       FD(cgrp_switch, i));
99                 if (IS_ERR(link)) {
100                         pr_err("Failed to attach cgroup program\n");
101                         err = PTR_ERR(link);
102                         goto out;
103                 }
104         }
105
106         /*
107          * Update cgrp_idx map from cgroup-id to event index.
108          */
109         cgrp = NULL;
110         i = 0;
111
112         evlist__for_each_entry(evlist, evsel) {
113                 if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
114                         leader_cgrp = evsel->cgrp;
115                         evsel->cgrp = NULL;
116
117                         /* open single copy of the events w/o cgroup */
118                         err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
119                         if (err) {
120                                 pr_err("Failed to open first cgroup events\n");
121                                 goto out;
122                         }
123
124                         map_fd = bpf_map__fd(skel->maps.events);
125                         perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
126                                 int fd = FD(evsel, j);
127                                 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
128
129                                 err = bpf_map_update_elem(map_fd, &idx, &fd,
130                                                           BPF_ANY);
131                                 if (err < 0) {
132                                         pr_err("Failed to update perf_event fd\n");
133                                         goto out;
134                                 }
135                         }
136
137                         evsel->cgrp = leader_cgrp;
138                 }
139                 evsel->supported = true;
140
141                 if (evsel->cgrp == cgrp)
142                         continue;
143
144                 cgrp = evsel->cgrp;
145
146                 if (read_cgroup_id(cgrp) < 0) {
147                         pr_err("Failed to get cgroup id\n");
148                         err = -1;
149                         goto out;
150                 }
151
152                 map_fd = bpf_map__fd(skel->maps.cgrp_idx);
153                 err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
154                 if (err < 0) {
155                         pr_err("Failed to update cgroup index map\n");
156                         goto out;
157                 }
158
159                 i++;
160         }
161
162         /*
163          * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
164          * whether the kernel support it
165          */
166         prog_fd = bpf_program__fd(skel->progs.trigger_read);
167         err = bperf_trigger_reading(prog_fd, 0);
168         if (err) {
169                 pr_warning("The kernel does not support test_run for raw_tp BPF programs.\n"
170                            "Therefore, --for-each-cgroup might show inaccurate readings\n");
171                 err = 0;
172         }
173
174 out:
175         return err;
176 }
177
178 static int bperf_cgrp__load(struct evsel *evsel,
179                             struct target *target __maybe_unused)
180 {
181         static bool bperf_loaded = false;
182
183         evsel->bperf_leader_prog_fd = -1;
184         evsel->bperf_leader_link_fd = -1;
185
186         if (!bperf_loaded && bperf_load_program(evsel->evlist))
187                 return -1;
188
189         bperf_loaded = true;
190         /* just to bypass bpf_counter_skip() */
191         evsel->follower_skel = (struct bperf_follower_bpf *)skel;
192
193         return 0;
194 }
195
196 static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
197                                   int cpu __maybe_unused, int fd __maybe_unused)
198 {
199         /* nothing to do */
200         return 0;
201 }
202
203 /*
204  * trigger the leader prog on each cpu, so the cgrp_reading map could get
205  * the latest results.
206  */
207 static int bperf_cgrp__sync_counters(struct evlist *evlist)
208 {
209         struct perf_cpu cpu;
210         int idx;
211         int prog_fd = bpf_program__fd(skel->progs.trigger_read);
212
213         perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
214                 bperf_trigger_reading(prog_fd, cpu.cpu);
215
216         return 0;
217 }
218
219 static int bperf_cgrp__enable(struct evsel *evsel)
220 {
221         if (evsel->core.idx)
222                 return 0;
223
224         bperf_cgrp__sync_counters(evsel->evlist);
225
226         skel->bss->enabled = 1;
227         return 0;
228 }
229
230 static int bperf_cgrp__disable(struct evsel *evsel)
231 {
232         if (evsel->core.idx)
233                 return 0;
234
235         bperf_cgrp__sync_counters(evsel->evlist);
236
237         skel->bss->enabled = 0;
238         return 0;
239 }
240
241 static int bperf_cgrp__read(struct evsel *evsel)
242 {
243         struct evlist *evlist = evsel->evlist;
244         int total_cpus = cpu__max_cpu().cpu;
245         struct perf_counts_values *counts;
246         struct bpf_perf_event_value *values;
247         int reading_map_fd, err = 0;
248
249         if (evsel->core.idx)
250                 return 0;
251
252         bperf_cgrp__sync_counters(evsel->evlist);
253
254         values = calloc(total_cpus, sizeof(*values));
255         if (values == NULL)
256                 return -ENOMEM;
257
258         reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
259
260         evlist__for_each_entry(evlist, evsel) {
261                 __u32 idx = evsel->core.idx;
262                 int i;
263                 struct perf_cpu cpu;
264
265                 err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
266                 if (err) {
267                         pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
268                                idx, evsel__name(evsel), evsel->cgrp->name);
269                         goto out;
270                 }
271
272                 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
273                         counts = perf_counts(evsel->counts, i, 0);
274                         counts->val = values[cpu.cpu].counter;
275                         counts->ena = values[cpu.cpu].enabled;
276                         counts->run = values[cpu.cpu].running;
277                 }
278         }
279
280 out:
281         free(values);
282         return err;
283 }
284
285 static int bperf_cgrp__destroy(struct evsel *evsel)
286 {
287         if (evsel->core.idx)
288                 return 0;
289
290         bperf_cgroup_bpf__destroy(skel);
291         evsel__delete(cgrp_switch);  // it'll destroy on_switch progs too
292
293         return 0;
294 }
295
296 struct bpf_counter_ops bperf_cgrp_ops = {
297         .load       = bperf_cgrp__load,
298         .enable     = bperf_cgrp__enable,
299         .disable    = bperf_cgrp__disable,
300         .read       = bperf_cgrp__read,
301         .install_pe = bperf_cgrp__install_pe,
302         .destroy    = bperf_cgrp__destroy,
303 };