Merge tag 'dmaengine-fix-4.20-rc6' of git://git.infradead.org/users/vkoul/slave-dma
[linux-2.6-block.git] / kernel / bpf / offload.c
CommitLineData
a39e17b2 1/*
0cd3cbed 2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
a39e17b2
JK
3 *
4 * This software is licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree.
7 *
8 * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
9 * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
10 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
11 * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
12 * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
13 * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
14 */
15
ab3f0063
JK
16#include <linux/bpf.h>
17#include <linux/bpf_verifier.h>
18#include <linux/bug.h>
675fc275 19#include <linux/kdev_t.h>
ab3f0063 20#include <linux/list.h>
9fd7c555 21#include <linux/lockdep.h>
ab3f0063
JK
22#include <linux/netdevice.h>
23#include <linux/printk.h>
675fc275 24#include <linux/proc_ns.h>
9fd7c555 25#include <linux/rhashtable.h>
ab3f0063 26#include <linux/rtnetlink.h>
e0d3974a 27#include <linux/rwsem.h>
ab3f0063 28
9fd7c555 29/* Protects offdevs, members of bpf_offload_netdev and offload members
a3884572 30 * of all progs.
e0d3974a
JK
31 * RTNL lock cannot be taken when holding this lock.
32 */
33static DECLARE_RWSEM(bpf_devs_lock);
9fd7c555 34
602144c2
JK
35struct bpf_offload_dev {
36 struct list_head netdevs;
37};
38
9fd7c555
JK
39struct bpf_offload_netdev {
40 struct rhash_head l;
41 struct net_device *netdev;
602144c2 42 struct bpf_offload_dev *offdev;
9fd7c555
JK
43 struct list_head progs;
44 struct list_head maps;
602144c2 45 struct list_head offdev_netdevs;
9fd7c555
JK
46};
47
48static const struct rhashtable_params offdevs_params = {
49 .nelem_hint = 4,
50 .key_len = sizeof(struct net_device *),
51 .key_offset = offsetof(struct bpf_offload_netdev, netdev),
52 .head_offset = offsetof(struct bpf_offload_netdev, l),
53 .automatic_shrinking = true,
54};
55
56static struct rhashtable offdevs;
57static bool offdevs_inited;
ab3f0063 58
5bc2d55c
JK
59static int bpf_dev_offload_check(struct net_device *netdev)
60{
61 if (!netdev)
62 return -EINVAL;
63 if (!netdev->netdev_ops->ndo_bpf)
64 return -EOPNOTSUPP;
65 return 0;
66}
67
9fd7c555
JK
68static struct bpf_offload_netdev *
69bpf_offload_find_netdev(struct net_device *netdev)
70{
71 lockdep_assert_held(&bpf_devs_lock);
72
73 if (!offdevs_inited)
74 return NULL;
75 return rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
76}
77
ab3f0063
JK
78int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
79{
9fd7c555 80 struct bpf_offload_netdev *ondev;
0a9c1991 81 struct bpf_prog_offload *offload;
5bc2d55c 82 int err;
ab3f0063 83
649f11dc
JK
84 if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
85 attr->prog_type != BPF_PROG_TYPE_XDP)
86 return -EINVAL;
ab3f0063
JK
87
88 if (attr->prog_flags)
89 return -EINVAL;
90
91 offload = kzalloc(sizeof(*offload), GFP_USER);
92 if (!offload)
93 return -ENOMEM;
94
95 offload->prog = prog;
ab3f0063 96
e0d3974a
JK
97 offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
98 attr->prog_ifindex);
5bc2d55c
JK
99 err = bpf_dev_offload_check(offload->netdev);
100 if (err)
101 goto err_maybe_put;
ab3f0063 102
e0d3974a 103 down_write(&bpf_devs_lock);
9fd7c555
JK
104 ondev = bpf_offload_find_netdev(offload->netdev);
105 if (!ondev) {
5bc2d55c 106 err = -EINVAL;
e0d3974a 107 goto err_unlock;
5bc2d55c 108 }
ab3f0063 109 prog->aux->offload = offload;
9fd7c555 110 list_add_tail(&offload->offloads, &ondev->progs);
e0d3974a
JK
111 dev_put(offload->netdev);
112 up_write(&bpf_devs_lock);
ab3f0063
JK
113
114 return 0;
e0d3974a
JK
115err_unlock:
116 up_write(&bpf_devs_lock);
5bc2d55c
JK
117err_maybe_put:
118 if (offload->netdev)
119 dev_put(offload->netdev);
e0d3974a 120 kfree(offload);
5bc2d55c 121 return err;
ab3f0063
JK
122}
123
124static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
125 struct netdev_bpf *data)
126{
0a9c1991 127 struct bpf_prog_offload *offload = prog->aux->offload;
ce3b9db4 128 struct net_device *netdev;
ab3f0063
JK
129
130 ASSERT_RTNL();
131
ce3b9db4 132 if (!offload)
ab3f0063 133 return -ENODEV;
ce3b9db4 134 netdev = offload->netdev;
ab3f0063
JK
135
136 data->command = cmd;
137
138 return netdev->netdev_ops->ndo_bpf(netdev, data);
139}
140
141int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
142{
143 struct netdev_bpf data = {};
144 int err;
145
146 data.verifier.prog = env->prog;
147
148 rtnl_lock();
149 err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
150 if (err)
151 goto exit_unlock;
152
cae1927c 153 env->prog->aux->offload->dev_ops = data.verifier.ops;
ab3f0063 154 env->prog->aux->offload->dev_state = true;
ab3f0063
JK
155exit_unlock:
156 rtnl_unlock();
157 return err;
158}
159
cae1927c
JK
160int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
161 int insn_idx, int prev_insn_idx)
162{
0a9c1991 163 struct bpf_prog_offload *offload;
cae1927c
JK
164 int ret = -ENODEV;
165
166 down_read(&bpf_devs_lock);
167 offload = env->prog->aux->offload;
ce3b9db4 168 if (offload)
cae1927c
JK
169 ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
170 up_read(&bpf_devs_lock);
171
172 return ret;
173}
174
c941ce9c
QM
175int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
176{
177 struct bpf_prog_offload *offload;
178 int ret = -ENODEV;
179
180 down_read(&bpf_devs_lock);
181 offload = env->prog->aux->offload;
182 if (offload) {
183 if (offload->dev_ops->finalize)
184 ret = offload->dev_ops->finalize(env);
185 else
186 ret = 0;
187 }
188 up_read(&bpf_devs_lock);
189
190 return ret;
191}
192
ab3f0063
JK
193static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
194{
0a9c1991 195 struct bpf_prog_offload *offload = prog->aux->offload;
ab3f0063
JK
196 struct netdev_bpf data = {};
197
198 data.offload.prog = prog;
199
ab3f0063
JK
200 if (offload->dev_state)
201 WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
202
ad8ad79f
JK
203 /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
204 bpf_prog_free_id(prog, true);
205
ab3f0063 206 list_del_init(&offload->offloads);
ce3b9db4
JK
207 kfree(offload);
208 prog->aux->offload = NULL;
ab3f0063
JK
209}
210
211void bpf_prog_offload_destroy(struct bpf_prog *prog)
212{
ab3f0063 213 rtnl_lock();
e0d3974a 214 down_write(&bpf_devs_lock);
ce3b9db4
JK
215 if (prog->aux->offload)
216 __bpf_prog_offload_destroy(prog);
e0d3974a 217 up_write(&bpf_devs_lock);
ab3f0063 218 rtnl_unlock();
ab3f0063
JK
219}
220
221static int bpf_prog_offload_translate(struct bpf_prog *prog)
222{
ab3f0063
JK
223 struct netdev_bpf data = {};
224 int ret;
225
226 data.offload.prog = prog;
227
ab3f0063
JK
228 rtnl_lock();
229 ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
230 rtnl_unlock();
231
232 return ret;
233}
234
235static unsigned int bpf_prog_warn_on_exec(const void *ctx,
236 const struct bpf_insn *insn)
237{
238 WARN(1, "attempt to execute device eBPF program on the host!");
239 return 0;
240}
241
242int bpf_prog_offload_compile(struct bpf_prog *prog)
243{
244 prog->bpf_func = bpf_prog_warn_on_exec;
245
246 return bpf_prog_offload_translate(prog);
247}
248
675fc275
JK
249struct ns_get_path_bpf_prog_args {
250 struct bpf_prog *prog;
251 struct bpf_prog_info *info;
252};
253
254static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data)
255{
256 struct ns_get_path_bpf_prog_args *args = private_data;
257 struct bpf_prog_aux *aux = args->prog->aux;
258 struct ns_common *ns;
259 struct net *net;
260
261 rtnl_lock();
262 down_read(&bpf_devs_lock);
263
264 if (aux->offload) {
265 args->info->ifindex = aux->offload->netdev->ifindex;
266 net = dev_net(aux->offload->netdev);
267 get_net(net);
268 ns = &net->ns;
269 } else {
270 args->info->ifindex = 0;
271 ns = NULL;
272 }
273
274 up_read(&bpf_devs_lock);
275 rtnl_unlock();
276
277 return ns;
278}
279
280int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
281 struct bpf_prog *prog)
282{
283 struct ns_get_path_bpf_prog_args args = {
284 .prog = prog,
285 .info = info,
286 };
fcfb126d 287 struct bpf_prog_aux *aux = prog->aux;
675fc275
JK
288 struct inode *ns_inode;
289 struct path ns_path;
fcfb126d 290 char __user *uinsns;
675fc275 291 void *res;
fcfb126d 292 u32 ulen;
675fc275
JK
293
294 res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args);
295 if (IS_ERR(res)) {
296 if (!info->ifindex)
297 return -ENODEV;
298 return PTR_ERR(res);
299 }
300
fcfb126d
JW
301 down_read(&bpf_devs_lock);
302
303 if (!aux->offload) {
304 up_read(&bpf_devs_lock);
305 return -ENODEV;
306 }
307
308 ulen = info->jited_prog_len;
309 info->jited_prog_len = aux->offload->jited_len;
310 if (info->jited_prog_len & ulen) {
311 uinsns = u64_to_user_ptr(info->jited_prog_insns);
312 ulen = min_t(u32, info->jited_prog_len, ulen);
313 if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
314 up_read(&bpf_devs_lock);
315 return -EFAULT;
316 }
317 }
318
319 up_read(&bpf_devs_lock);
320
675fc275
JK
321 ns_inode = ns_path.dentry->d_inode;
322 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
323 info->netns_ino = ns_inode->i_ino;
324 path_put(&ns_path);
325
326 return 0;
327}
328
ab3f0063
JK
329const struct bpf_prog_ops bpf_offload_prog_ops = {
330};
331
a3884572
JK
332static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
333 enum bpf_netdev_command cmd)
334{
335 struct netdev_bpf data = {};
336 struct net_device *netdev;
337
338 ASSERT_RTNL();
339
340 data.command = cmd;
341 data.offmap = offmap;
342 /* Caller must make sure netdev is valid */
343 netdev = offmap->netdev;
344
345 return netdev->netdev_ops->ndo_bpf(netdev, &data);
346}
347
348struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
349{
350 struct net *net = current->nsproxy->net_ns;
9fd7c555 351 struct bpf_offload_netdev *ondev;
a3884572
JK
352 struct bpf_offloaded_map *offmap;
353 int err;
354
355 if (!capable(CAP_SYS_ADMIN))
356 return ERR_PTR(-EPERM);
7a0ef693
JK
357 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
358 attr->map_type != BPF_MAP_TYPE_HASH)
a3884572
JK
359 return ERR_PTR(-EINVAL);
360
361 offmap = kzalloc(sizeof(*offmap), GFP_USER);
362 if (!offmap)
363 return ERR_PTR(-ENOMEM);
364
365 bpf_map_init_from_attr(&offmap->map, attr);
366
367 rtnl_lock();
368 down_write(&bpf_devs_lock);
369 offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
370 err = bpf_dev_offload_check(offmap->netdev);
371 if (err)
372 goto err_unlock;
373
9fd7c555
JK
374 ondev = bpf_offload_find_netdev(offmap->netdev);
375 if (!ondev) {
376 err = -EINVAL;
377 goto err_unlock;
378 }
379
a3884572
JK
380 err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
381 if (err)
382 goto err_unlock;
383
9fd7c555 384 list_add_tail(&offmap->offloads, &ondev->maps);
a3884572
JK
385 up_write(&bpf_devs_lock);
386 rtnl_unlock();
387
388 return &offmap->map;
389
390err_unlock:
391 up_write(&bpf_devs_lock);
392 rtnl_unlock();
393 kfree(offmap);
394 return ERR_PTR(err);
395}
396
397static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
398{
399 WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
400 /* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
401 bpf_map_free_id(&offmap->map, true);
402 list_del_init(&offmap->offloads);
403 offmap->netdev = NULL;
404}
405
406void bpf_map_offload_map_free(struct bpf_map *map)
407{
408 struct bpf_offloaded_map *offmap = map_to_offmap(map);
409
410 rtnl_lock();
411 down_write(&bpf_devs_lock);
412 if (offmap->netdev)
413 __bpf_map_offload_destroy(offmap);
414 up_write(&bpf_devs_lock);
415 rtnl_unlock();
416
417 kfree(offmap);
418}
419
420int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
421{
422 struct bpf_offloaded_map *offmap = map_to_offmap(map);
423 int ret = -ENODEV;
424
425 down_read(&bpf_devs_lock);
426 if (offmap->netdev)
427 ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
428 up_read(&bpf_devs_lock);
429
430 return ret;
431}
432
433int bpf_map_offload_update_elem(struct bpf_map *map,
434 void *key, void *value, u64 flags)
435{
436 struct bpf_offloaded_map *offmap = map_to_offmap(map);
437 int ret = -ENODEV;
438
439 if (unlikely(flags > BPF_EXIST))
440 return -EINVAL;
441
442 down_read(&bpf_devs_lock);
443 if (offmap->netdev)
444 ret = offmap->dev_ops->map_update_elem(offmap, key, value,
445 flags);
446 up_read(&bpf_devs_lock);
447
448 return ret;
449}
450
451int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
452{
453 struct bpf_offloaded_map *offmap = map_to_offmap(map);
454 int ret = -ENODEV;
455
456 down_read(&bpf_devs_lock);
457 if (offmap->netdev)
458 ret = offmap->dev_ops->map_delete_elem(offmap, key);
459 up_read(&bpf_devs_lock);
460
461 return ret;
462}
463
464int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
465{
466 struct bpf_offloaded_map *offmap = map_to_offmap(map);
467 int ret = -ENODEV;
468
469 down_read(&bpf_devs_lock);
470 if (offmap->netdev)
471 ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
472 up_read(&bpf_devs_lock);
473
474 return ret;
475}
476
52775b33
JK
477struct ns_get_path_bpf_map_args {
478 struct bpf_offloaded_map *offmap;
479 struct bpf_map_info *info;
480};
481
482static struct ns_common *bpf_map_offload_info_fill_ns(void *private_data)
483{
484 struct ns_get_path_bpf_map_args *args = private_data;
485 struct ns_common *ns;
486 struct net *net;
487
488 rtnl_lock();
489 down_read(&bpf_devs_lock);
490
491 if (args->offmap->netdev) {
492 args->info->ifindex = args->offmap->netdev->ifindex;
493 net = dev_net(args->offmap->netdev);
494 get_net(net);
495 ns = &net->ns;
496 } else {
497 args->info->ifindex = 0;
498 ns = NULL;
499 }
500
501 up_read(&bpf_devs_lock);
502 rtnl_unlock();
503
504 return ns;
505}
506
507int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map)
508{
509 struct ns_get_path_bpf_map_args args = {
510 .offmap = map_to_offmap(map),
511 .info = info,
512 };
513 struct inode *ns_inode;
514 struct path ns_path;
515 void *res;
516
517 res = ns_get_path_cb(&ns_path, bpf_map_offload_info_fill_ns, &args);
518 if (IS_ERR(res)) {
519 if (!info->ifindex)
520 return -ENODEV;
521 return PTR_ERR(res);
522 }
523
524 ns_inode = ns_path.dentry->d_inode;
525 info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev);
526 info->netns_ino = ns_inode->i_ino;
527 path_put(&ns_path);
528
529 return 0;
530}
531
fd4f227d
JK
532static bool __bpf_offload_dev_match(struct bpf_prog *prog,
533 struct net_device *netdev)
a3884572 534{
fd4f227d 535 struct bpf_offload_netdev *ondev1, *ondev2;
a3884572 536 struct bpf_prog_offload *offload;
a3884572 537
0cd3cbed 538 if (!bpf_prog_is_dev_bound(prog->aux))
a3884572 539 return false;
a3884572 540
a3884572 541 offload = prog->aux->offload;
fd4f227d
JK
542 if (!offload)
543 return false;
544 if (offload->netdev == netdev)
545 return true;
546
547 ondev1 = bpf_offload_find_netdev(offload->netdev);
548 ondev2 = bpf_offload_find_netdev(netdev);
549
550 return ondev1 && ondev2 && ondev1->offdev == ondev2->offdev;
551}
552
553bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev)
554{
555 bool ret;
556
557 down_read(&bpf_devs_lock);
558 ret = __bpf_offload_dev_match(prog, netdev);
559 up_read(&bpf_devs_lock);
560
561 return ret;
562}
563EXPORT_SYMBOL_GPL(bpf_offload_dev_match);
564
565bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
566{
567 struct bpf_offloaded_map *offmap;
568 bool ret;
569
570 if (!bpf_map_is_dev_bound(map))
571 return bpf_map_offload_neutral(map);
a3884572
JK
572 offmap = map_to_offmap(map);
573
fd4f227d
JK
574 down_read(&bpf_devs_lock);
575 ret = __bpf_offload_dev_match(prog, offmap->netdev);
a3884572
JK
576 up_read(&bpf_devs_lock);
577
578 return ret;
579}
580
602144c2
JK
581int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
582 struct net_device *netdev)
a3884572 583{
9fd7c555
JK
584 struct bpf_offload_netdev *ondev;
585 int err;
a3884572 586
9fd7c555
JK
587 ondev = kzalloc(sizeof(*ondev), GFP_KERNEL);
588 if (!ondev)
589 return -ENOMEM;
590
591 ondev->netdev = netdev;
602144c2 592 ondev->offdev = offdev;
9fd7c555
JK
593 INIT_LIST_HEAD(&ondev->progs);
594 INIT_LIST_HEAD(&ondev->maps);
595
596 down_write(&bpf_devs_lock);
597 err = rhashtable_insert_fast(&offdevs, &ondev->l, offdevs_params);
598 if (err) {
599 netdev_warn(netdev, "failed to register for BPF offload\n");
600 goto err_unlock_free;
601 }
a3884572 602
602144c2 603 list_add(&ondev->offdev_netdevs, &offdev->netdevs);
9fd7c555
JK
604 up_write(&bpf_devs_lock);
605 return 0;
606
607err_unlock_free:
608 up_write(&bpf_devs_lock);
609 kfree(ondev);
610 return err;
a3884572 611}
9fd7c555 612EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_register);
a3884572 613
602144c2
JK
614void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
615 struct net_device *netdev)
ab3f0063 616{
602144c2 617 struct bpf_offload_netdev *ondev, *altdev;
9fd7c555
JK
618 struct bpf_offloaded_map *offmap, *mtmp;
619 struct bpf_prog_offload *offload, *ptmp;
ab3f0063
JK
620
621 ASSERT_RTNL();
622
9fd7c555
JK
623 down_write(&bpf_devs_lock);
624 ondev = rhashtable_lookup_fast(&offdevs, &netdev, offdevs_params);
625 if (WARN_ON(!ondev))
626 goto unlock;
ab3f0063 627
9fd7c555 628 WARN_ON(rhashtable_remove_fast(&offdevs, &ondev->l, offdevs_params));
602144c2
JK
629 list_del(&ondev->offdev_netdevs);
630
631 /* Try to move the objects to another netdev of the device */
632 altdev = list_first_entry_or_null(&offdev->netdevs,
633 struct bpf_offload_netdev,
634 offdev_netdevs);
635 if (altdev) {
636 list_for_each_entry(offload, &ondev->progs, offloads)
637 offload->netdev = altdev->netdev;
638 list_splice_init(&ondev->progs, &altdev->progs);
639
640 list_for_each_entry(offmap, &ondev->maps, offloads)
641 offmap->netdev = altdev->netdev;
642 list_splice_init(&ondev->maps, &altdev->maps);
643 } else {
644 list_for_each_entry_safe(offload, ptmp, &ondev->progs, offloads)
645 __bpf_prog_offload_destroy(offload->prog);
646 list_for_each_entry_safe(offmap, mtmp, &ondev->maps, offloads)
647 __bpf_map_offload_destroy(offmap);
648 }
ab3f0063 649
9fd7c555
JK
650 WARN_ON(!list_empty(&ondev->progs));
651 WARN_ON(!list_empty(&ondev->maps));
652 kfree(ondev);
653unlock:
654 up_write(&bpf_devs_lock);
655}
656EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
602144c2
JK
657
658struct bpf_offload_dev *bpf_offload_dev_create(void)
659{
660 struct bpf_offload_dev *offdev;
661 int err;
662
663 down_write(&bpf_devs_lock);
664 if (!offdevs_inited) {
665 err = rhashtable_init(&offdevs, &offdevs_params);
666 if (err)
667 return ERR_PTR(err);
668 offdevs_inited = true;
669 }
670 up_write(&bpf_devs_lock);
671
672 offdev = kzalloc(sizeof(*offdev), GFP_KERNEL);
673 if (!offdev)
674 return ERR_PTR(-ENOMEM);
675
676 INIT_LIST_HEAD(&offdev->netdevs);
677
678 return offdev;
679}
680EXPORT_SYMBOL_GPL(bpf_offload_dev_create);
681
682void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev)
683{
684 WARN_ON(!list_empty(&offdev->netdevs));
685 kfree(offdev);
686}
687EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy);