1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
14 #include <linux/time64.h>
16 #include "util/debug.h"
17 #include "util/evsel.h"
18 #include "util/kwork.h"
21 #include <perf/cpumap.h>
23 #include "util/bpf_skel/kwork_top.skel.h"
26 * This should be in sync with "util/kwork_top.bpf.c"
28 #define MAX_COMMAND_LEN 16
41 char comm[MAX_COMMAND_LEN];
55 struct kwork_class_bpf {
56 struct kwork_class *class;
57 void (*load_prepare)(void);
60 static struct kwork_top_bpf *skel;
62 void perf_kwork__top_start(void)
66 clock_gettime(CLOCK_MONOTONIC, &ts);
67 skel->bss->from_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
68 skel->bss->enabled = 1;
69 pr_debug("perf kwork top start at: %lld\n", skel->bss->from_timestamp);
72 void perf_kwork__top_finish(void)
76 skel->bss->enabled = 0;
77 clock_gettime(CLOCK_MONOTONIC, &ts);
78 skel->bss->to_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
79 pr_debug("perf kwork top finish at: %lld\n", skel->bss->to_timestamp);
82 static void irq_load_prepare(void)
84 bpf_program__set_autoload(skel->progs.on_irq_handler_entry, true);
85 bpf_program__set_autoload(skel->progs.on_irq_handler_exit, true);
88 static struct kwork_class_bpf kwork_irq_bpf = {
89 .load_prepare = irq_load_prepare,
92 static void softirq_load_prepare(void)
94 bpf_program__set_autoload(skel->progs.on_softirq_entry, true);
95 bpf_program__set_autoload(skel->progs.on_softirq_exit, true);
98 static struct kwork_class_bpf kwork_softirq_bpf = {
99 .load_prepare = softirq_load_prepare,
102 static void sched_load_prepare(void)
104 bpf_program__set_autoload(skel->progs.on_switch, true);
107 static struct kwork_class_bpf kwork_sched_bpf = {
108 .load_prepare = sched_load_prepare,
111 static struct kwork_class_bpf *
112 kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
113 [KWORK_CLASS_IRQ] = &kwork_irq_bpf,
114 [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
115 [KWORK_CLASS_SCHED] = &kwork_sched_bpf,
118 static bool valid_kwork_class_type(enum kwork_class_type type)
120 return type >= 0 && type < KWORK_CLASS_MAX;
123 static int setup_filters(struct perf_kwork *kwork)
127 struct perf_cpu_map *map;
129 if (kwork->cpu_list) {
130 fd = bpf_map__fd(skel->maps.kwork_top_cpu_filter);
132 pr_debug("Invalid cpu filter fd\n");
136 map = perf_cpu_map__new(kwork->cpu_list);
138 pr_debug("Invalid cpu_list\n");
142 nr_cpus = libbpf_num_possible_cpus();
143 for (i = 0; i < perf_cpu_map__nr(map); i++) {
144 struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
146 if (cpu.cpu >= nr_cpus) {
147 perf_cpu_map__put(map);
148 pr_err("Requested cpu %d too large\n", cpu.cpu);
151 bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
153 perf_cpu_map__put(map);
155 skel->bss->has_cpu_filter = 1;
161 int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork __maybe_unused)
163 struct bpf_program *prog;
164 struct kwork_class *class;
165 struct kwork_class_bpf *class_bpf;
166 enum kwork_class_type type;
168 skel = kwork_top_bpf__open();
170 pr_debug("Failed to open kwork top skeleton\n");
175 * set all progs to non-autoload,
176 * then set corresponding progs according to config
178 bpf_object__for_each_program(prog, skel->obj)
179 bpf_program__set_autoload(prog, false);
181 list_for_each_entry(class, &kwork->class_list, list) {
183 if (!valid_kwork_class_type(type) ||
184 !kwork_class_bpf_supported_list[type]) {
185 pr_err("Unsupported bpf trace class %s\n", class->name);
189 class_bpf = kwork_class_bpf_supported_list[type];
190 class_bpf->class = class;
192 if (class_bpf->load_prepare)
193 class_bpf->load_prepare();
196 if (kwork_top_bpf__load(skel)) {
197 pr_debug("Failed to load kwork top skeleton\n");
201 if (setup_filters(kwork))
204 if (kwork_top_bpf__attach(skel)) {
205 pr_debug("Failed to attach kwork top skeleton\n");
212 kwork_top_bpf__destroy(skel);
216 static void read_task_info(struct kwork_work *work)
219 struct task_data data;
220 struct task_key key = {
225 fd = bpf_map__fd(skel->maps.kwork_top_tasks);
227 pr_debug("Invalid top tasks map fd\n");
231 if (!bpf_map_lookup_elem(fd, &key, &data)) {
232 work->tgid = data.tgid;
233 work->is_kthread = data.is_kthread;
234 work->name = strdup(data.comm);
237 static int add_work(struct perf_kwork *kwork, struct work_key *key,
238 struct work_data *data, int cpu)
240 struct kwork_class_bpf *bpf_trace;
241 struct kwork_work *work;
242 struct kwork_work tmp = {
247 enum kwork_class_type type = key->type;
249 if (!valid_kwork_class_type(type)) {
250 pr_debug("Invalid class type %d to add work\n", type);
254 bpf_trace = kwork_class_bpf_supported_list[type];
255 tmp.class = bpf_trace->class;
257 work = perf_kwork_add_work(kwork, tmp.class, &tmp);
261 work->total_runtime = data->runtime;
262 read_task_info(work);
267 int perf_kwork__top_read_bpf(struct perf_kwork *kwork)
270 struct work_data *data;
271 struct work_key key, prev;
273 fd = bpf_map__fd(skel->maps.kwork_top_works);
275 pr_debug("Invalid top runtime fd\n");
279 nr_cpus = libbpf_num_possible_cpus();
280 data = calloc(nr_cpus, sizeof(struct work_data));
284 memset(&prev, 0, sizeof(prev));
285 while (!bpf_map_get_next_key(fd, &prev, &key)) {
286 if ((bpf_map_lookup_elem(fd, &key, data)) != 0) {
287 pr_debug("Failed to lookup top elem\n");
291 for (i = 0; i < nr_cpus; i++) {
292 if (data[i].runtime == 0)
295 if (add_work(kwork, &key, &data[i], i))
305 void perf_kwork__top_cleanup_bpf(void)
307 kwork_top_bpf__destroy(skel);