HID: bpf/dispatch: regroup kfuncs definitions
[linux-block.git] / drivers / hid / bpf / hid_bpf_dispatch.c
CommitLineData
f5c27da4
BT
1// SPDX-License-Identifier: GPL-2.0-only
2
3/*
4 * HID-BPF support for Linux
5 *
6 * Copyright (c) 2022 Benjamin Tissoires
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10#include <linux/bitops.h>
11#include <linux/btf.h>
12#include <linux/btf_ids.h>
13#include <linux/filter.h>
14#include <linux/hid.h>
15#include <linux/hid_bpf.h>
16#include <linux/init.h>
17#include <linux/kfifo.h>
ad190df1 18#include <linux/minmax.h>
f5c27da4
BT
19#include <linux/module.h>
20#include <linux/workqueue.h>
21#include "hid_bpf_dispatch.h"
22#include "entrypoints/entrypoints.lskel.h"
23
24struct hid_bpf_ops *hid_bpf_ops;
25EXPORT_SYMBOL(hid_bpf_ops);
26
27/**
28 * hid_bpf_device_event - Called whenever an event is coming in from the device
29 *
30 * @ctx: The HID-BPF context
31 *
658ee5a6
BT
32 * @return %0 on success and keep processing; a positive value to change the
33 * incoming size buffer; a negative error code to interrupt the processing
34 * of this event
f5c27da4
BT
35 *
36 * Declare an %fmod_ret tracing bpf program to this function and attach this
37 * program through hid_bpf_attach_prog() to have this helper called for
38 * any incoming event from the device itself.
39 *
40 * The function is called while on IRQ context, so we can not sleep.
41 */
42/* never used by the kernel but declared so we can load and attach a tracepoint */
43__weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx)
44{
45 return 0;
46}
f5c27da4 47
658ee5a6 48u8 *
f5c27da4 49dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
658ee5a6 50 u32 *size, int interrupt)
f5c27da4
BT
51{
52 struct hid_bpf_ctx_kern ctx_kern = {
53 .ctx = {
54 .hid = hdev,
55 .report_type = type,
658ee5a6
BT
56 .allocated_size = hdev->bpf.allocated_data,
57 .size = *size,
f5c27da4 58 },
658ee5a6 59 .data = hdev->bpf.device_data,
f5c27da4 60 };
658ee5a6 61 int ret;
f5c27da4
BT
62
63 if (type >= HID_REPORT_TYPES)
658ee5a6
BT
64 return ERR_PTR(-EINVAL);
65
66 /* no program has been attached yet */
67 if (!hdev->bpf.device_data)
68 return data;
69
70 memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
71 memcpy(ctx_kern.data, data, *size);
72
73 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
74 if (ret < 0)
75 return ERR_PTR(ret);
f5c27da4 76
658ee5a6
BT
77 if (ret) {
78 if (ret > ctx_kern.ctx.allocated_size)
79 return ERR_PTR(-EINVAL);
80
81 *size = ret;
82 }
83
84 return ctx_kern.data;
f5c27da4
BT
85}
86EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
87
ad190df1
BT
88/**
89 * hid_bpf_rdesc_fixup - Called when the probe function parses the report
90 * descriptor of the HID device
91 *
92 * @ctx: The HID-BPF context
93 *
94 * @return 0 on success and keep processing; a positive value to change the
95 * incoming size buffer; a negative error code to interrupt the processing
96 * of this event
97 *
98 * Declare an %fmod_ret tracing bpf program to this function and attach this
99 * program through hid_bpf_attach_prog() to have this helper called before any
100 * parsing of the report descriptor by HID.
101 */
102/* never used by the kernel but declared so we can load and attach a tracepoint */
103__weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
104{
105 return 0;
106}
ad190df1
BT
107
108u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
109{
110 int ret;
111 struct hid_bpf_ctx_kern ctx_kern = {
112 .ctx = {
113 .hid = hdev,
114 .size = *size,
115 .allocated_size = HID_MAX_DESCRIPTOR_SIZE,
116 },
117 };
118
119 ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
120 if (!ctx_kern.data)
121 goto ignore_bpf;
122
123 memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
124
125 ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
126 if (ret < 0)
127 goto ignore_bpf;
128
129 if (ret) {
130 if (ret > ctx_kern.ctx.allocated_size)
131 goto ignore_bpf;
132
133 *size = ret;
134 }
135
136 rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
137
138 return rdesc;
139
140 ignore_bpf:
141 kfree(ctx_kern.data);
142 return kmemdup(rdesc, *size, GFP_KERNEL);
143}
144EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
145
f5c27da4
BT
146static int device_match_id(struct device *dev, const void *id)
147{
148 struct hid_device *hdev = to_hid_device(dev);
149
150 return hdev->id == *(int *)id;
151}
152
658ee5a6
BT
153static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
154{
155 u8 *alloc_data;
156 unsigned int i, j, max_report_len = 0;
157 size_t alloc_size = 0;
158
159 /* compute the maximum report length for this device */
160 for (i = 0; i < HID_REPORT_TYPES; i++) {
161 struct hid_report_enum *report_enum = hdev->report_enum + i;
162
163 for (j = 0; j < HID_MAX_IDS; j++) {
164 struct hid_report *report = report_enum->report_id_hash[j];
165
166 if (report)
167 max_report_len = max(max_report_len, hid_report_len(report));
168 }
169 }
170
171 /*
172 * Give us a little bit of extra space and some predictability in the
173 * buffer length we create. This way, we can tell users that they can
174 * work on chunks of 64 bytes of memory without having the bpf verifier
175 * scream at them.
176 */
177 alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
178
179 alloc_data = kzalloc(alloc_size, GFP_KERNEL);
180 if (!alloc_data)
181 return -ENOMEM;
182
183 *data = alloc_data;
184 *size = alloc_size;
185
186 return 0;
187}
188
189static int hid_bpf_allocate_event_data(struct hid_device *hdev)
190{
191 /* hdev->bpf.device_data is already allocated, abort */
192 if (hdev->bpf.device_data)
193 return 0;
194
195 return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
196}
197
ad190df1
BT
198int hid_bpf_reconnect(struct hid_device *hdev)
199{
200 if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
201 return device_reprobe(&hdev->dev);
202
203 return 0;
204}
205
7cdd2108
BT
206static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
207 __u32 flags)
208{
209 int fd, err, prog_type;
210
211 prog_type = hid_bpf_get_prog_attach_type(prog);
212 if (prog_type < 0)
213 return prog_type;
214
215 if (prog_type >= HID_BPF_PROG_TYPE_MAX)
216 return -EINVAL;
217
218 if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
219 err = hid_bpf_allocate_event_data(hdev);
220 if (err)
221 return err;
222 }
223
224 fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
225 if (fd < 0)
226 return fd;
227
228 if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
229 err = hid_bpf_reconnect(hdev);
230 if (err) {
231 close_fd(fd);
232 return err;
233 }
234 }
235
236 return fd;
237}
238
764ad6b0
BT
239/* Disables missing prototype warnings */
240__bpf_kfunc_start_defs();
241
4171954f
BT
242/**
243 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
244 *
245 * @ctx: The HID-BPF context
246 * @offset: The offset within the memory
247 * @rdwr_buf_size: the const size of the buffer
248 *
249 * @returns %NULL on error, an %__u8 memory pointer on success
250 */
251__bpf_kfunc __u8 *
252hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
253{
254 struct hid_bpf_ctx_kern *ctx_kern;
255
256 if (!ctx)
257 return NULL;
258
259 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
260
261 if (rdwr_buf_size + offset > ctx->allocated_size)
262 return NULL;
263
264 return ctx_kern->data + offset;
265}
266
f5c27da4
BT
267/**
268 * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
269 *
270 * @hid_id: the system unique identifier of the HID device
271 * @prog_fd: an fd in the user process representing the program to attach
272 * @flags: any logical OR combination of &enum hid_bpf_attach_flags
273 *
4b9a3f49
BT
274 * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
275 * Closing this fd will detach the program from the HID device (unless the bpf_link
276 * is pinned to the BPF file system).
f5c27da4
BT
277 */
278/* called from syscall */
764ad6b0 279__bpf_kfunc int
f5c27da4
BT
280hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
281{
282 struct hid_device *hdev;
7cdd2108 283 struct bpf_prog *prog;
f5c27da4 284 struct device *dev;
89be8aa5 285 int err, fd;
f5c27da4
BT
286
287 if (!hid_bpf_ops)
288 return -EINVAL;
289
f5c27da4
BT
290 if ((flags & ~HID_BPF_FLAG_MASK))
291 return -EINVAL;
292
293 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
294 if (!dev)
295 return -EINVAL;
296
297 hdev = to_hid_device(dev);
298
7cdd2108
BT
299 /*
300 * take a ref on the prog itself, it will be released
301 * on errors or when it'll be detached
302 */
303 prog = bpf_prog_get(prog_fd);
89be8aa5
BT
304 if (IS_ERR(prog)) {
305 err = PTR_ERR(prog);
306 goto out_dev_put;
307 }
658ee5a6 308
7cdd2108 309 fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
89be8aa5
BT
310 if (fd < 0) {
311 err = fd;
312 goto out_prog_put;
313 }
ad190df1 314
4b9a3f49 315 return fd;
89be8aa5
BT
316
317 out_prog_put:
318 bpf_prog_put(prog);
319 out_dev_put:
320 put_device(dev);
321 return err;
f5c27da4
BT
322}
323
91a7f802
BT
324/**
325 * hid_bpf_allocate_context - Allocate a context to the given HID device
326 *
327 * @hid_id: the system unique identifier of the HID device
328 *
329 * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
330 */
764ad6b0 331__bpf_kfunc struct hid_bpf_ctx *
91a7f802
BT
332hid_bpf_allocate_context(unsigned int hid_id)
333{
334 struct hid_device *hdev;
335 struct hid_bpf_ctx_kern *ctx_kern = NULL;
336 struct device *dev;
337
338 if (!hid_bpf_ops)
339 return NULL;
340
341 dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
342 if (!dev)
343 return NULL;
344
345 hdev = to_hid_device(dev);
346
347 ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
89be8aa5
BT
348 if (!ctx_kern) {
349 put_device(dev);
91a7f802 350 return NULL;
89be8aa5 351 }
91a7f802
BT
352
353 ctx_kern->ctx.hid = hdev;
354
355 return &ctx_kern->ctx;
356}
357
358/**
359 * hid_bpf_release_context - Release the previously allocated context @ctx
360 *
361 * @ctx: the HID-BPF context to release
362 *
363 */
764ad6b0 364__bpf_kfunc void
91a7f802
BT
365hid_bpf_release_context(struct hid_bpf_ctx *ctx)
366{
367 struct hid_bpf_ctx_kern *ctx_kern;
89be8aa5 368 struct hid_device *hid;
91a7f802 369
91a7f802 370 ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
89be8aa5 371 hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
91a7f802
BT
372
373 kfree(ctx_kern);
89be8aa5
BT
374
375 /* get_device() is called by bus_find_device() */
376 put_device(&hid->dev);
91a7f802
BT
377}
378
379/**
380 * hid_bpf_hw_request - Communicate with a HID device
381 *
382 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
383 * @buf: a %PTR_TO_MEM buffer
384 * @buf__sz: the size of the data to transfer
385 * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
386 * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
387 *
388 * @returns %0 on success, a negative error code otherwise.
389 */
764ad6b0 390__bpf_kfunc int
91a7f802
BT
391hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
392 enum hid_report_type rtype, enum hid_class_request reqtype)
393{
394 struct hid_device *hdev;
395 struct hid_report *report;
396 struct hid_report_enum *report_enum;
397 u8 *dma_data;
398 u32 report_len;
399 int ret;
400
401 /* check arguments */
402 if (!ctx || !hid_bpf_ops || !buf)
403 return -EINVAL;
404
405 switch (rtype) {
406 case HID_INPUT_REPORT:
407 case HID_OUTPUT_REPORT:
408 case HID_FEATURE_REPORT:
409 break;
410 default:
411 return -EINVAL;
412 }
413
414 switch (reqtype) {
415 case HID_REQ_GET_REPORT:
416 case HID_REQ_GET_IDLE:
417 case HID_REQ_GET_PROTOCOL:
418 case HID_REQ_SET_REPORT:
419 case HID_REQ_SET_IDLE:
420 case HID_REQ_SET_PROTOCOL:
421 break;
422 default:
423 return -EINVAL;
424 }
425
426 if (buf__sz < 1)
427 return -EINVAL;
428
429 hdev = (struct hid_device *)ctx->hid; /* discard const */
430
431 report_enum = hdev->report_enum + rtype;
432 report = hid_bpf_ops->hid_get_report(report_enum, buf);
433 if (!report)
434 return -EINVAL;
435
436 report_len = hid_report_len(report);
437
438 if (buf__sz > report_len)
439 buf__sz = report_len;
440
441 dma_data = kmemdup(buf, buf__sz, GFP_KERNEL);
442 if (!dma_data)
443 return -ENOMEM;
444
445 ret = hid_bpf_ops->hid_hw_raw_request(hdev,
446 dma_data[0],
447 dma_data,
448 buf__sz,
449 rtype,
450 reqtype);
451
452 if (ret > 0)
453 memcpy(buf, dma_data, ret);
454
455 kfree(dma_data);
456 return ret;
457}
764ad6b0 458__bpf_kfunc_end_defs();
91a7f802 459
4171954f
BT
460/*
461 * The following set contains all functions we agree BPF programs
462 * can use.
463 */
464BTF_KFUNCS_START(hid_bpf_kfunc_ids)
465BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
466BTF_KFUNCS_END(hid_bpf_kfunc_ids)
467
468static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
469 .owner = THIS_MODULE,
470 .set = &hid_bpf_kfunc_ids,
471};
472
86020156
BT
473/* our HID-BPF entrypoints */
474BTF_SET8_START(hid_bpf_fmodret_ids)
475BTF_ID_FLAGS(func, hid_bpf_device_event)
476BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup)
477BTF_ID_FLAGS(func, __hid_bpf_tail_call)
478BTF_SET8_END(hid_bpf_fmodret_ids)
479
480static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
481 .owner = THIS_MODULE,
482 .set = &hid_bpf_fmodret_ids,
483};
484
f5c27da4 485/* for syscall HID-BPF */
6f3189f3 486BTF_KFUNCS_START(hid_bpf_syscall_kfunc_ids)
f5c27da4 487BTF_ID_FLAGS(func, hid_bpf_attach_prog)
91a7f802
BT
488BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
489BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
490BTF_ID_FLAGS(func, hid_bpf_hw_request)
6f3189f3 491BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids)
f5c27da4
BT
492
493static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
494 .owner = THIS_MODULE,
495 .set = &hid_bpf_syscall_kfunc_ids,
496};
497
658ee5a6
BT
498int hid_bpf_connect_device(struct hid_device *hdev)
499{
500 struct hid_bpf_prog_list *prog_list;
501
502 rcu_read_lock();
503 prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
504 rcu_read_unlock();
505
506 /* only allocate BPF data if there are programs attached */
507 if (!prog_list)
508 return 0;
509
510 return hid_bpf_allocate_event_data(hdev);
511}
512EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
513
514void hid_bpf_disconnect_device(struct hid_device *hdev)
515{
516 kfree(hdev->bpf.device_data);
517 hdev->bpf.device_data = NULL;
518 hdev->bpf.allocated_data = 0;
519}
520EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
521
f5c27da4
BT
522void hid_bpf_destroy_device(struct hid_device *hdev)
523{
524 if (!hdev)
525 return;
526
527 /* mark the device as destroyed in bpf so we don't reattach it */
528 hdev->bpf.destroyed = true;
529
530 __hid_bpf_destroy_device(hdev);
531}
532EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
533
534void hid_bpf_device_init(struct hid_device *hdev)
535{
536 spin_lock_init(&hdev->bpf.progs_lock);
537}
538EXPORT_SYMBOL_GPL(hid_bpf_device_init);
539
540static int __init hid_bpf_init(void)
541{
542 int err;
543
544 /* Note: if we exit with an error any time here, we would entirely break HID, which
545 * is probably not something we want. So we log an error and return success.
546 *
547 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device
548 * will not be available, so nobody will be able to use the functionality.
549 */
550
86020156
BT
551 err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set);
552 if (err) {
553 pr_warn("error while registering fmodret entrypoints: %d", err);
554 return 0;
555 }
556
0c2d5728 557 err = hid_bpf_preload_skel();
f5c27da4 558 if (err) {
0c2d5728 559 pr_warn("error while preloading HID BPF dispatcher: %d", err);
f5c27da4
BT
560 return 0;
561 }
562
0c2d5728
BT
563 /* register tracing kfuncs after we are sure we can load our preloaded bpf program */
564 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set);
f5c27da4 565 if (err) {
0c2d5728 566 pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
f5c27da4
BT
567 return 0;
568 }
569
570 /* register syscalls after we are sure we can load our preloaded bpf program */
571 err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
572 if (err) {
573 pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
574 return 0;
575 }
576
577 return 0;
578}
579
580static void __exit hid_bpf_exit(void)
581{
582 /* HID depends on us, so if we hit that code, we are guaranteed that hid
583 * has been removed and thus we do not need to clear the HID devices
584 */
585 hid_bpf_free_links_and_skel();
586}
587
588late_initcall(hid_bpf_init);
589module_exit(hid_bpf_exit);
590MODULE_AUTHOR("Benjamin Tissoires");
591MODULE_LICENSE("GPL");