Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
f8451725 HX |
2 | /* |
3 | * cls_cgroup.h Control Group Classifier | |
4 | * | |
5 | * Authors: Thomas Graf <tgraf@suug.ch> | |
f8451725 HX |
6 | */ |
7 | ||
8 | #ifndef _NET_CLS_CGROUP_H | |
9 | #define _NET_CLS_CGROUP_H | |
10 | ||
11 | #include <linux/cgroup.h> | |
12 | #include <linux/hardirq.h> | |
13 | #include <linux/rcupdate.h> | |
fe1217c4 | 14 | #include <net/sock.h> |
2309236c | 15 | #include <net/inet_sock.h> |
f8451725 | 16 | |
fe1217c4 DB |
17 | #ifdef CONFIG_CGROUP_NET_CLASSID |
18 | struct cgroup_cls_state { | |
f8451725 HX |
19 | struct cgroup_subsys_state css; |
20 | u32 classid; | |
21 | }; | |
22 | ||
fe1217c4 | 23 | struct cgroup_cls_state *task_cls_state(struct task_struct *p); |
f3419807 | 24 | |
f8451725 HX |
25 | static inline u32 task_cls_classid(struct task_struct *p) |
26 | { | |
920750ce | 27 | u32 classid; |
3fb5a991 | 28 | |
f8451725 HX |
29 | if (in_interrupt()) |
30 | return 0; | |
31 | ||
3fb5a991 | 32 | rcu_read_lock(); |
073219e9 | 33 | classid = container_of(task_css(p, net_cls_cgrp_id), |
3fb5a991 LZ |
34 | struct cgroup_cls_state, css)->classid; |
35 | rcu_read_unlock(); | |
36 | ||
37 | return classid; | |
f8451725 | 38 | } |
f8451725 | 39 | |
2a56a1fe | 40 | static inline void sock_update_classid(struct sock_cgroup_data *skcd) |
f3419807 | 41 | { |
fe1217c4 | 42 | u32 classid; |
f3419807 | 43 | |
fe1217c4 | 44 | classid = task_cls_classid(current); |
2a56a1fe | 45 | sock_cgroup_set_classid(skcd, classid); |
fe1217c4 | 46 | } |
b87a173e DB |
47 | |
48 | static inline u32 task_get_classid(const struct sk_buff *skb) | |
49 | { | |
50 | u32 classid = task_cls_state(current)->classid; | |
51 | ||
52 | /* Due to the nature of the classifier it is required to ignore all | |
53 | * packets originating from softirq context as accessing `current' | |
54 | * would lead to false results. | |
55 | * | |
56 | * This test assumes that all callers of dev_queue_xmit() explicitly | |
57 | * disable bh. Knowing this, it is possible to detect softirq based | |
58 | * calls by looking at the number of nested bh disable calls because | |
59 | * softirqs always disables bh. | |
60 | */ | |
61 | if (in_serving_softirq()) { | |
2309236c KK |
62 | struct sock *sk = skb_to_full_sk(skb); |
63 | ||
2a56a1fe | 64 | /* If there is an sock_cgroup_classid we'll use that. */ |
2309236c | 65 | if (!sk || !sk_fullsock(sk)) |
b87a173e DB |
66 | return 0; |
67 | ||
2309236c | 68 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); |
b87a173e DB |
69 | } |
70 | ||
71 | return classid; | |
72 | } | |
fe1217c4 | 73 | #else /* !CONFIG_CGROUP_NET_CLASSID */ |
2a56a1fe | 74 | static inline void sock_update_classid(struct sock_cgroup_data *skcd) |
f8451725 | 75 | { |
f8451725 | 76 | } |
b87a173e DB |
77 | |
78 | static inline u32 task_get_classid(const struct sk_buff *skb) | |
79 | { | |
80 | return 0; | |
81 | } | |
fe1217c4 | 82 | #endif /* CONFIG_CGROUP_NET_CLASSID */ |
f8451725 | 83 | #endif /* _NET_CLS_CGROUP_H */ |