Merge tag 'mfd-next-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[linux-block.git] / include / net / cls_cgroup.h
CommitLineData
2874c5fd 1/* SPDX-License-Identifier: GPL-2.0-or-later */
f8451725
HX
2/*
3 * cls_cgroup.h Control Group Classifier
4 *
5 * Authors: Thomas Graf <tgraf@suug.ch>
f8451725
HX
6 */
7
8#ifndef _NET_CLS_CGROUP_H
9#define _NET_CLS_CGROUP_H
10
11#include <linux/cgroup.h>
12#include <linux/hardirq.h>
13#include <linux/rcupdate.h>
fe1217c4 14#include <net/sock.h>
2309236c 15#include <net/inet_sock.h>
f8451725 16
fe1217c4
DB
17#ifdef CONFIG_CGROUP_NET_CLASSID
18struct cgroup_cls_state {
f8451725
HX
19 struct cgroup_subsys_state css;
20 u32 classid;
21};
22
fe1217c4 23struct cgroup_cls_state *task_cls_state(struct task_struct *p);
f3419807 24
f8451725
HX
25static inline u32 task_cls_classid(struct task_struct *p)
26{
920750ce 27 u32 classid;
3fb5a991 28
f8451725
HX
29 if (in_interrupt())
30 return 0;
31
3fb5a991 32 rcu_read_lock();
073219e9 33 classid = container_of(task_css(p, net_cls_cgrp_id),
3fb5a991
LZ
34 struct cgroup_cls_state, css)->classid;
35 rcu_read_unlock();
36
37 return classid;
f8451725 38}
f8451725 39
2a56a1fe 40static inline void sock_update_classid(struct sock_cgroup_data *skcd)
f3419807 41{
fe1217c4 42 u32 classid;
f3419807 43
fe1217c4 44 classid = task_cls_classid(current);
2a56a1fe 45 sock_cgroup_set_classid(skcd, classid);
fe1217c4 46}
b87a173e 47
5a52ae4e
DB
48static inline u32 __task_get_classid(struct task_struct *task)
49{
50 return task_cls_state(task)->classid;
51}
52
b87a173e
DB
53static inline u32 task_get_classid(const struct sk_buff *skb)
54{
5a52ae4e 55 u32 classid = __task_get_classid(current);
b87a173e
DB
56
57 /* Due to the nature of the classifier it is required to ignore all
58 * packets originating from softirq context as accessing `current'
59 * would lead to false results.
60 *
61 * This test assumes that all callers of dev_queue_xmit() explicitly
62 * disable bh. Knowing this, it is possible to detect softirq based
63 * calls by looking at the number of nested bh disable calls because
64 * softirqs always disables bh.
65 */
66 if (in_serving_softirq()) {
2309236c
KK
67 struct sock *sk = skb_to_full_sk(skb);
68
2a56a1fe 69 /* If there is an sock_cgroup_classid we'll use that. */
2309236c 70 if (!sk || !sk_fullsock(sk))
b87a173e
DB
71 return 0;
72
2309236c 73 classid = sock_cgroup_classid(&sk->sk_cgrp_data);
b87a173e
DB
74 }
75
76 return classid;
77}
fe1217c4 78#else /* !CONFIG_CGROUP_NET_CLASSID */
2a56a1fe 79static inline void sock_update_classid(struct sock_cgroup_data *skcd)
f8451725 80{
f8451725 81}
b87a173e
DB
82
83static inline u32 task_get_classid(const struct sk_buff *skb)
84{
85 return 0;
86}
fe1217c4 87#endif /* CONFIG_CGROUP_NET_CLASSID */
f8451725 88#endif /* _NET_CLS_CGROUP_H */