Merge branch 'for-6.10/hid-bpf' into for-linus
[linux-2.6-block.git] / drivers / hid / bpf / hid_bpf_dispatch.c
CommitLineData
f5c27da4
BT
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * HID-BPF support for Linux
5 *
6 * Copyright (c) 2022 Benjamin Tissoires
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10#include <linux/bitops.h>
11#include <linux/btf.h>
12#include <linux/btf_ids.h>
13#include <linux/filter.h>
14#include <linux/hid.h>
15#include <linux/hid_bpf.h>
16#include <linux/init.h>
17#include <linux/kfifo.h>
ad190df1 18#include <linux/minmax.h>
f5c27da4
BT
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include "hid_bpf_dispatch.h"
22#include "entrypoints/entrypoints.lskel.h"
23
24struct hid_bpf_ops *hid_bpf_ops;
25EXPORT_SYMBOL(hid_bpf_ops);
26
27/**
28 * hid_bpf_device_event - Called whenever an event is coming in from the device
29 *
30 * @ctx: The HID-BPF context
31 *
658ee5a6
BT
32 * @return %0 on success and keep processing; a positive value to change the
33 * incoming size buffer; a negative error code to interrupt the processing
34 * of this event
f5c27da4
BT
35 *
36 * Declare an %fmod_ret tracing bpf program to this function and attach this
37 * program through hid_bpf_attach_prog() to have this helper called for
38 * any incoming event from the device itself.
39 *
40 * The function is called while on IRQ context, so we can not sleep.
41 */
42/* never used by the kernel but declared so we can load and attach a tracepoint */
43__weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx)
44{
45 return 0;
46}
f5c27da4 47
658ee5a6 48u8 *
f5c27da4 49dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
658ee5a6 50 u32 *size, int interrupt)
f5c27da4
BT
51{
52 struct hid_bpf_ctx_kern ctx_kern = {
53 .ctx = {
54 .hid = hdev,
55 .report_type = type,
658ee5a6
BT
56 .allocated_size = hdev->bpf.allocated_data,
57 .size = *size,
f5c27da4 58 },
658ee5a6 59 .data = hdev->bpf.device_data,
f5c27da4 60 };
658ee5a6 61 int ret;
f5c27da4
BT
62
63 if (type >= HID_REPORT_TYPES)
658ee5a6
BT
64 return ERR_PTR(-EINVAL);
65
66 /* no program has been attached yet */
67 if (!hdev->bpf.device_data)
68 return data;
69
70 memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
71 memcpy(ctx_kern.data, data, *size);
72
73 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
74 if (ret < 0)
75 return ERR_PTR(ret);
f5c27da4 76
658ee5a6
BT
77 if (ret) {
78 if (ret > ctx_kern.ctx.allocated_size)
79 return ERR_PTR(-EINVAL);
80
81 *size = ret;
82 }
83
84 return ctx_kern.data;
f5c27da4
BT
85}
86EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
87
ad190df1
BT
88/**
89 * hid_bpf_rdesc_fixup - Called when the probe function parses the report
90 * descriptor of the HID device
91 *
92 * @ctx: The HID-BPF context
93 *
94 * @return 0 on success and keep processing; a positive value to change the
95 * incoming size buffer; a negative error code to interrupt the processing
96 * of this event
97 *
98 * Declare an %fmod_ret tracing bpf program to this function and attach this
99 * program through hid_bpf_attach_prog() to have this helper called before any
100 * parsing of the report descriptor by HID.
101 */
102/* never used by the kernel but declared so we can load and attach a tracepoint */
103__weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
104{
105 return 0;
106}
ad190df1
BT
107
108u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
109{
110 int ret;
111 struct hid_bpf_ctx_kern ctx_kern = {
112 .ctx = {
113 .hid = hdev,
114 .size = *size,
115 .allocated_size = HID_MAX_DESCRIPTOR_SIZE,
116 },
117 };
118
119 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
120 if (!ctx_kern.data)
121 goto ignore_bpf;
122
123 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
124
125 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
126 if (ret < 0)
127 goto ignore_bpf;
128
129 if (ret) {
130 if (ret > ctx_kern.ctx.allocated_size)
131 goto ignore_bpf;
132
133 *size = ret;
134 }
135
136 rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
137
138 return rdesc;
139
140 ignore_bpf:
141 kfree(ctx_kern.data);
142 return kmemdup(rdesc, *size, GFP_KERNEL);
143}
144EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
145
f5c27da4
BT
146static int device_match_id(struct device *dev, const void *id)
147{
148 struct hid_device *hdev = to_hid_device(dev);
149
150 return hdev->id == *(int *)id;
151}
152
658ee5a6
BT
153static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
154{
155 u8 *alloc_data;
156 unsigned int i, j, max_report_len = 0;
157 size_t alloc_size = 0;
158
159 /* compute the maximum report length for this device */
160 for (i = 0; i < HID_REPORT_TYPES; i++) {
161 struct hid_report_enum *report_enum = hdev->report_enum + i;
162
163 for (j = 0; j < HID_MAX_IDS; j++) {
164 struct hid_report *report = report_enum->report_id_hash[j];
165
166 if (report)
167 max_report_len = max(max_report_len, hid_report_len(report));
168 }
169 }
170
171 /*
172 * Give us a little bit of extra space and some predictability in the
173 * buffer length we create. This way, we can tell users that they can
174 * work on chunks of 64 bytes of memory without having the bpf verifier
175 * scream at them.
176 */
177 alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
178
179 alloc_data = kzalloc(alloc_size, GFP_KERNEL);
180 if (!alloc_data)
181 return -ENOMEM;
182
183 *data = alloc_data;
184 *size = alloc_size;
185
186 return 0;
187}
188
189static int hid_bpf_allocate_event_data(struct hid_device *hdev)
190{
191 /* hdev->bpf.device_data is already allocated, abort */
192 if (hdev->bpf.device_data)
193 return 0;
194
195 return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
196}
197
ad190df1
BT
198int hid_bpf_reconnect(struct hid_device *hdev)
199{
200 if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
201 return device_reprobe(&hdev->dev);
202
203 return 0;
204}
205
7cdd2108
BT
206static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
207 __u32 flags)
208{
209 int fd, err, prog_type;
210
211 prog_type = hid_bpf_get_prog_attach_type(prog);
212 if (prog_type < 0)
213 return prog_type;
214
215 if (prog_type >= HID_BPF_PROG_TYPE_MAX)
216 return -EINVAL;
217
218 if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
219 err = hid_bpf_allocate_event_data(hdev);
220 if (err)
221 return err;
222 }
223
224 fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
225 if (fd < 0)
226 return fd;
227
228 if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
229 err = hid_bpf_reconnect(hdev);
230 if (err) {
231 close_fd(fd);
232 return err;
233 }
234 }
235
236 return fd;
237}
238
764ad6b0
BT
239/* Disables missing prototype warnings */
240__bpf_kfunc_start_defs();
241
4171954f
BT
242/**
243 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
244 *
245 * @ctx: The HID-BPF context
246 * @offset: The offset within the memory
247 * @rdwr_buf_size: the const size of the buffer
248 *
249 * @returns %NULL on error, an %__u8 memory pointer on success
250 */
251__bpf_kfunc __u8 *
252hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
253{
254 struct hid_bpf_ctx_kern *ctx_kern;
255
256 if (!ctx)
257 return NULL;
258
259 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
260
261 if (rdwr_buf_size + offset > ctx->allocated_size)
262 return NULL;
263
264 return ctx_kern->data + offset;
265}
266
f5c27da4
BT
267/**
268 * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
269 *
270 * @hid_id: the system unique identifier of the HID device
271 * @prog_fd: an fd in the user process representing the program to attach
272 * @flags: any logical OR combination of &enum hid_bpf_attach_flags
273 *
4b9a3f49
BT
274 * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
275 * Closing this fd will detach the program from the HID device (unless the bpf_link
276 * is pinned to the BPF file system).
f5c27da4
BT
277 */
278/* called from syscall */
764ad6b0 279__bpf_kfunc int
f5c27da4
BT
280hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
281{
282 struct hid_device *hdev;
7cdd2108 283 struct bpf_prog *prog;
f5c27da4 284 struct device *dev;
89be8aa5 285 int err, fd;
f5c27da4
BT
286
287 if (!hid_bpf_ops)
288 return -EINVAL;
289
f5c27da4
BT
290 if ((flags & ~HID_BPF_FLAG_MASK))
291 return -EINVAL;
292
293 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
294 if (!dev)
295 return -EINVAL;
296
297 hdev = to_hid_device(dev);
298
7cdd2108
BT
299 /*
300 * take a ref on the prog itself, it will be released
301 * on errors or when it'll be detached
302 */
303 prog = bpf_prog_get(prog_fd);
89be8aa5
BT
304 if (IS_ERR(prog)) {
305 err = PTR_ERR(prog);
306 goto out_dev_put;
307 }
658ee5a6 308
7cdd2108 309 fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
89be8aa5
BT
310 if (fd < 0) {
311 err = fd;
312 goto out_prog_put;
313 }
ad190df1 314
4b9a3f49 315 return fd;
89be8aa5
BT
316
317 out_prog_put:
318 bpf_prog_put(prog);
319 out_dev_put:
320 put_device(dev);
321 return err;
f5c27da4
BT
322}
323
91a7f802
BT
324/**
325 * hid_bpf_allocate_context - Allocate a context to the given HID device
326 *
327 * @hid_id: the system unique identifier of the HID device
328 *
329 * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
330 */
764ad6b0 331__bpf_kfunc struct hid_bpf_ctx *
91a7f802
BT
332hid_bpf_allocate_context(unsigned int hid_id)
333{
334 struct hid_device *hdev;
335 struct hid_bpf_ctx_kern *ctx_kern = NULL;
336 struct device *dev;
337
338 if (!hid_bpf_ops)
339 return NULL;
340
341 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
342 if (!dev)
343 return NULL;
344
345 hdev = to_hid_device(dev);
346
347 ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
89be8aa5
BT
348 if (!ctx_kern) {
349 put_device(dev);
91a7f802 350 return NULL;
89be8aa5 351 }
91a7f802
BT
352
353 ctx_kern->ctx.hid = hdev;
354
355 return &ctx_kern->ctx;
356}
357
358/**
359 * hid_bpf_release_context - Release the previously allocated context @ctx
360 *
361 * @ctx: the HID-BPF context to release
362 *
363 */
764ad6b0 364__bpf_kfunc void
91a7f802
BT
365hid_bpf_release_context(struct hid_bpf_ctx *ctx)
366{
367 struct hid_bpf_ctx_kern *ctx_kern;
89be8aa5 368 struct hid_device *hid;
91a7f802 369
91a7f802 370 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
89be8aa5 371 hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
91a7f802
BT
372
373 kfree(ctx_kern);
89be8aa5
BT
374
375 /* get_device() is called by bus_find_device() */
376 put_device(&hid->dev);
91a7f802
BT
377}
378
5599f801
BT
379static int
380__hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz,
381 enum hid_report_type rtype)
382{
383 struct hid_report_enum *report_enum;
384 struct hid_report *report;
385 struct hid_device *hdev;
386 u32 report_len;
387
388 /* check arguments */
389 if (!ctx || !hid_bpf_ops || !buf)
390 return -EINVAL;
391
392 switch (rtype) {
393 case HID_INPUT_REPORT:
394 case HID_OUTPUT_REPORT:
395 case HID_FEATURE_REPORT:
396 break;
397 default:
398 return -EINVAL;
399 }
400
401 if (*buf__sz < 1)
402 return -EINVAL;
403
404 hdev = (struct hid_device *)ctx->hid; /* discard const */
405
406 report_enum = hdev->report_enum + rtype;
407 report = hid_bpf_ops->hid_get_report(report_enum, buf);
408 if (!report)
409 return -EINVAL;
410
411 report_len = hid_report_len(report);
412
413 if (*buf__sz > report_len)
414 *buf__sz = report_len;
415
416 return 0;
417}
418
91a7f802
BT
419/**
420 * hid_bpf_hw_request - Communicate with a HID device
421 *
422 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
423 * @buf: a %PTR_TO_MEM buffer
424 * @buf__sz: the size of the data to transfer
425 * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
426 * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
427 *
428 * @returns %0 on success, a negative error code otherwise.
429 */
764ad6b0 430__bpf_kfunc int
91a7f802
BT
431hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
432 enum hid_report_type rtype, enum hid_class_request reqtype)
433{
434 struct hid_device *hdev;
5599f801 435 size_t size = buf__sz;
91a7f802 436 u8 *dma_data;
91a7f802
BT
437 int ret;
438
439 /* check arguments */
5599f801
BT
440 ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype);
441 if (ret)
442 return ret;
91a7f802
BT
443
444 switch (reqtype) {
445 case HID_REQ_GET_REPORT:
446 case HID_REQ_GET_IDLE:
447 case HID_REQ_GET_PROTOCOL:
448 case HID_REQ_SET_REPORT:
449 case HID_REQ_SET_IDLE:
450 case HID_REQ_SET_PROTOCOL:
451 break;
452 default:
453 return -EINVAL;
454 }
455
91a7f802
BT
456 hdev = (struct hid_device *)ctx->hid; /* discard const */
457
5599f801 458 dma_data = kmemdup(buf, size, GFP_KERNEL);
91a7f802
BT
459 if (!dma_data)
460 return -ENOMEM;
461
462 ret = hid_bpf_ops->hid_hw_raw_request(hdev,
463 dma_data[0],
464 dma_data,
5599f801 465 size,
91a7f802
BT
466 rtype,
467 reqtype);
468
469 if (ret > 0)
470 memcpy(buf, dma_data, ret);
471
472 kfree(dma_data);
473 return ret;
474}
5599f801
BT
475
476/**
477 * hid_bpf_hw_output_report - Send an output report to a HID device
478 *
479 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
480 * @buf: a %PTR_TO_MEM buffer
481 * @buf__sz: the size of the data to transfer
482 *
483 * Returns the number of bytes transferred on success, a negative error code otherwise.
484 */
485__bpf_kfunc int
486hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz)
487{
488 struct hid_device *hdev;
489 size_t size = buf__sz;
490 u8 *dma_data;
491 int ret;
492
493 /* check arguments */
494 ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT);
495 if (ret)
496 return ret;
497
498 hdev = (struct hid_device *)ctx->hid; /* discard const */
499
500 dma_data = kmemdup(buf, size, GFP_KERNEL);
501 if (!dma_data)
502 return -ENOMEM;
503
504 ret = hid_bpf_ops->hid_hw_output_report(hdev,
505 dma_data,
506 size);
507
508 kfree(dma_data);
509 return ret;
510}
9be50ac3
BT
511
512/**
513 * hid_bpf_input_report - Inject a HID report in the kernel from a HID device
514 *
515 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
516 * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
517 * @buf: a %PTR_TO_MEM buffer
518 * @buf__sz: the size of the data to transfer
519 *
520 * Returns %0 on success, a negative error code otherwise.
521 */
522__bpf_kfunc int
523hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf,
524 const size_t buf__sz)
525{
526 struct hid_device *hdev;
527 size_t size = buf__sz;
528 int ret;
529
530 /* check arguments */
531 ret = __hid_bpf_hw_check_params(ctx, buf, &size, type);
532 if (ret)
533 return ret;
534
535 hdev = (struct hid_device *)ctx->hid; /* discard const */
536
b912cf04 537 return hid_bpf_ops->hid_input_report(hdev, type, buf, size, 0);
9be50ac3 538}
764ad6b0 539__bpf_kfunc_end_defs();
91a7f802 540
4171954f
BT
541/*
542 * The following set contains all functions we agree BPF programs
543 * can use.
544 */
545BTF_KFUNCS_START(hid_bpf_kfunc_ids)
546BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
685dadaf
BT
547BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
548BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE | KF_SLEEPABLE)
549BTF_ID_FLAGS(func, hid_bpf_hw_request, KF_SLEEPABLE)
550BTF_ID_FLAGS(func, hid_bpf_hw_output_report, KF_SLEEPABLE)
551BTF_ID_FLAGS(func, hid_bpf_input_report, KF_SLEEPABLE)
4171954f
BT
552BTF_KFUNCS_END(hid_bpf_kfunc_ids)
553
554static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
555 .owner = THIS_MODULE,
556 .set = &hid_bpf_kfunc_ids,
557};
558
86020156
BT
559/* our HID-BPF entrypoints */
560BTF_SET8_START(hid_bpf_fmodret_ids)
561BTF_ID_FLAGS(func, hid_bpf_device_event)
562BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup)
563BTF_ID_FLAGS(func, __hid_bpf_tail_call)
564BTF_SET8_END(hid_bpf_fmodret_ids)
565
566static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
567 .owner = THIS_MODULE,
568 .set = &hid_bpf_fmodret_ids,
569};
570
f5c27da4 571/* for syscall HID-BPF */
6f3189f3 572BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
f5c27da4 573BTF_ID_FLAGS(func, hid_bpf_attach_prog)
91a7f802
BT
574BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
575BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
576BTF_ID_FLAGS(func, hid_bpf_hw_request)
5599f801 577BTF_ID_FLAGS(func, hid_bpf_hw_output_report)
9be50ac3 578BTF_ID_FLAGS(func, hid_bpf_input_report)
6f3189f3 579BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
f5c27da4
BT
580
581static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
582 .owner = THIS_MODULE,
583 .set = &hid_bpf_syscall_kfunc_ids,
584};
585
658ee5a6
BT
586int hid_bpf_connect_device(struct hid_device *hdev)
587{
588 struct hid_bpf_prog_list *prog_list;
589
590 rcu_read_lock();
591 prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
592 rcu_read_unlock();
593
594 /* only allocate BPF data if there are programs attached */
595 if (!prog_list)
596 return 0;
597
598 return hid_bpf_allocate_event_data(hdev);
599}
600EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
601
602void hid_bpf_disconnect_device(struct hid_device *hdev)
603{
604 kfree(hdev->bpf.device_data);
605 hdev->bpf.device_data = NULL;
606 hdev->bpf.allocated_data = 0;
607}
608EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
609
f5c27da4
BT
610void hid_bpf_destroy_device(struct hid_device *hdev)
611{
612 if (!hdev)
613 return;
614
615 /* mark the device as destroyed in bpf so we don't reattach it */
616 hdev->bpf.destroyed = true;
617
618 __hid_bpf_destroy_device(hdev);
619}
620EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
621
622void hid_bpf_device_init(struct hid_device *hdev)
623{
624 spin_lock_init(&hdev->bpf.progs_lock);
625}
626EXPORT_SYMBOL_GPL(hid_bpf_device_init);
627
628static int __init hid_bpf_init(void)
629{
630 int err;
631
632 /* Note: if we exit with an error any time here, we would entirely break HID, which
633 * is probably not something we want. So we log an error and return success.
634 *
635 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device
636 * will not be available, so nobody will be able to use the functionality.
637 */
638
86020156
BT
639 err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set);
640 if (err) {
641 pr_warn("error while registering fmodret entrypoints: %d", err);
642 return 0;
643 }
644
0c2d5728 645 err = hid_bpf_preload_skel();
f5c27da4 646 if (err) {
0c2d5728 647 pr_warn("error while preloading HID BPF dispatcher: %d", err);
f5c27da4
BT
648 return 0;
649 }
650
0c2d5728
BT
651 /* register tracing kfuncs after we are sure we can load our preloaded bpf program */
652 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set);
f5c27da4 653 if (err) {
0c2d5728 654 pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
f5c27da4
BT
655 return 0;
656 }
657
658 /* register syscalls after we are sure we can load our preloaded bpf program */
659 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
660 if (err) {
661 pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
662 return 0;
663 }
664
665 return 0;
666}
667
668static void __exit hid_bpf_exit(void)
669{
670 /* HID depends on us, so if we hit that code, we are guaranteed that hid
671 * has been removed and thus we do not need to clear the HID devices
672 */
673 hid_bpf_free_links_and_skel();
674}
675
676late_initcall(hid_bpf_init);
677module_exit(hid_bpf_exit);
678MODULE_AUTHOR("Benjamin Tissoires");
679MODULE_LICENSE("GPL");