Merge tag 'mm-hotfixes-stable-2023-05-03-16-27' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / net / flow_offload.h
CommitLineData
8f256622
PNA
1#ifndef _NET_FLOW_OFFLOAD_H
2#define _NET_FLOW_OFFLOAD_H
3
fa85999f 4#include <linux/kernel.h>
a7323311 5#include <linux/list.h>
319a1d19 6#include <linux/netlink.h>
8f256622
PNA
7#include <net/flow_dissector.h>
8
9struct flow_match {
10 struct flow_dissector *dissector;
11 void *mask;
12 void *key;
13};
14
9558a83a
JP
15struct flow_match_meta {
16 struct flow_dissector_key_meta *key, *mask;
17};
18
8f256622
PNA
19struct flow_match_basic {
20 struct flow_dissector_key_basic *key, *mask;
21};
22
23struct flow_match_control {
24 struct flow_dissector_key_control *key, *mask;
25};
26
27struct flow_match_eth_addrs {
28 struct flow_dissector_key_eth_addrs *key, *mask;
29};
30
31struct flow_match_vlan {
32 struct flow_dissector_key_vlan *key, *mask;
33};
34
70ea86a0
SH
35struct flow_match_arp {
36 struct flow_dissector_key_arp *key, *mask;
37};
38
8f256622
PNA
39struct flow_match_ipv4_addrs {
40 struct flow_dissector_key_ipv4_addrs *key, *mask;
41};
42
43struct flow_match_ipv6_addrs {
44 struct flow_dissector_key_ipv6_addrs *key, *mask;
45};
46
47struct flow_match_ip {
48 struct flow_dissector_key_ip *key, *mask;
49};
50
51struct flow_match_ports {
52 struct flow_dissector_key_ports *key, *mask;
53};
54
83d85bb0
MG
55struct flow_match_ports_range {
56 struct flow_dissector_key_ports_range *key, *mask;
57};
58
8f256622
PNA
59struct flow_match_icmp {
60 struct flow_dissector_key_icmp *key, *mask;
61};
62
63struct flow_match_tcp {
64 struct flow_dissector_key_tcp *key, *mask;
65};
66
67struct flow_match_mpls {
68 struct flow_dissector_key_mpls *key, *mask;
69};
70
71struct flow_match_enc_keyid {
72 struct flow_dissector_key_keyid *key, *mask;
73};
74
75struct flow_match_enc_opts {
76 struct flow_dissector_key_enc_opts *key, *mask;
77};
78
ee1c45e8
PB
79struct flow_match_ct {
80 struct flow_dissector_key_ct *key, *mask;
81};
82
6a21b085
WD
83struct flow_match_pppoe {
84 struct flow_dissector_key_pppoe *key, *mask;
85};
86
2c1befac
WD
87struct flow_match_l2tpv3 {
88 struct flow_dissector_key_l2tpv3 *key, *mask;
89};
90
8f256622
PNA
91struct flow_rule;
92
9558a83a
JP
93void flow_rule_match_meta(const struct flow_rule *rule,
94 struct flow_match_meta *out);
8f256622
PNA
95void flow_rule_match_basic(const struct flow_rule *rule,
96 struct flow_match_basic *out);
97void flow_rule_match_control(const struct flow_rule *rule,
98 struct flow_match_control *out);
99void flow_rule_match_eth_addrs(const struct flow_rule *rule,
100 struct flow_match_eth_addrs *out);
101void flow_rule_match_vlan(const struct flow_rule *rule,
102 struct flow_match_vlan *out);
bae9ed69
EC
103void flow_rule_match_cvlan(const struct flow_rule *rule,
104 struct flow_match_vlan *out);
70ea86a0
SH
105void flow_rule_match_arp(const struct flow_rule *rule,
106 struct flow_match_arp *out);
8f256622
PNA
107void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
108 struct flow_match_ipv4_addrs *out);
109void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
110 struct flow_match_ipv6_addrs *out);
111void flow_rule_match_ip(const struct flow_rule *rule,
112 struct flow_match_ip *out);
113void flow_rule_match_ports(const struct flow_rule *rule,
114 struct flow_match_ports *out);
83d85bb0
MG
115void flow_rule_match_ports_range(const struct flow_rule *rule,
116 struct flow_match_ports_range *out);
8f256622
PNA
117void flow_rule_match_tcp(const struct flow_rule *rule,
118 struct flow_match_tcp *out);
119void flow_rule_match_icmp(const struct flow_rule *rule,
120 struct flow_match_icmp *out);
121void flow_rule_match_mpls(const struct flow_rule *rule,
122 struct flow_match_mpls *out);
123void flow_rule_match_enc_control(const struct flow_rule *rule,
124 struct flow_match_control *out);
125void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
126 struct flow_match_ipv4_addrs *out);
127void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
128 struct flow_match_ipv6_addrs *out);
129void flow_rule_match_enc_ip(const struct flow_rule *rule,
130 struct flow_match_ip *out);
131void flow_rule_match_enc_ports(const struct flow_rule *rule,
132 struct flow_match_ports *out);
133void flow_rule_match_enc_keyid(const struct flow_rule *rule,
134 struct flow_match_enc_keyid *out);
135void flow_rule_match_enc_opts(const struct flow_rule *rule,
136 struct flow_match_enc_opts *out);
ee1c45e8
PB
137void flow_rule_match_ct(const struct flow_rule *rule,
138 struct flow_match_ct *out);
6a21b085
WD
139void flow_rule_match_pppoe(const struct flow_rule *rule,
140 struct flow_match_pppoe *out);
2c1befac
WD
141void flow_rule_match_l2tpv3(const struct flow_rule *rule,
142 struct flow_match_l2tpv3 *out);
8f256622 143
e3ab786b
PNA
144enum flow_action_id {
145 FLOW_ACTION_ACCEPT = 0,
146 FLOW_ACTION_DROP,
147 FLOW_ACTION_TRAP,
148 FLOW_ACTION_GOTO,
149 FLOW_ACTION_REDIRECT,
150 FLOW_ACTION_MIRRED,
48e584ac
JH
151 FLOW_ACTION_REDIRECT_INGRESS,
152 FLOW_ACTION_MIRRED_INGRESS,
e3ab786b
PNA
153 FLOW_ACTION_VLAN_PUSH,
154 FLOW_ACTION_VLAN_POP,
155 FLOW_ACTION_VLAN_MANGLE,
156 FLOW_ACTION_TUNNEL_ENCAP,
157 FLOW_ACTION_TUNNEL_DECAP,
158 FLOW_ACTION_MANGLE,
159 FLOW_ACTION_ADD,
160 FLOW_ACTION_CSUM,
161 FLOW_ACTION_MARK,
fb1b775a 162 FLOW_ACTION_PTYPE,
2ce12410 163 FLOW_ACTION_PRIORITY,
4a6a676f 164 FLOW_ACTION_RX_QUEUE_MAPPING,
8bec2833
PNA
165 FLOW_ACTION_WAKE,
166 FLOW_ACTION_QUEUE,
a7a7be60 167 FLOW_ACTION_SAMPLE,
8c8cfc6e 168 FLOW_ACTION_POLICE,
b57dc7c1 169 FLOW_ACTION_CT,
9c26ba9b 170 FLOW_ACTION_CT_METADATA,
6749d590
JH
171 FLOW_ACTION_MPLS_PUSH,
172 FLOW_ACTION_MPLS_POP,
173 FLOW_ACTION_MPLS_MANGLE,
d29bdd69 174 FLOW_ACTION_GATE,
563ae557 175 FLOW_ACTION_PPPOE_PUSH,
b8cd5831
JL
176 FLOW_ACTION_JUMP,
177 FLOW_ACTION_PIPE,
ab95465c
MD
178 FLOW_ACTION_VLAN_PUSH_ETH,
179 FLOW_ACTION_VLAN_POP_ETH,
052f744f 180 FLOW_ACTION_CONTINUE,
7a978759 181 NUM_FLOW_ACTIONS,
e3ab786b
PNA
182};
183
184/* This is mirroring enum pedit_header_type definition for easy mapping between
185 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
186 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
187 */
188enum flow_action_mangle_base {
189 FLOW_ACT_MANGLE_UNSPEC = 0,
190 FLOW_ACT_MANGLE_HDR_TYPE_ETH,
191 FLOW_ACT_MANGLE_HDR_TYPE_IP4,
192 FLOW_ACT_MANGLE_HDR_TYPE_IP6,
193 FLOW_ACT_MANGLE_HDR_TYPE_TCP,
194 FLOW_ACT_MANGLE_HDR_TYPE_UDP,
195};
196
0dfb2d82 197enum flow_action_hw_stats_bit {
53eca1f3
JK
198 FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
199 FLOW_ACTION_HW_STATS_DELAYED_BIT,
16f80360 200 FLOW_ACTION_HW_STATS_DISABLED_BIT,
060b6381
EC
201
202 FLOW_ACTION_HW_STATS_NUM_BITS
42d5fe5f
JP
203};
204
0dfb2d82 205enum flow_action_hw_stats {
53eca1f3
JK
206 FLOW_ACTION_HW_STATS_IMMEDIATE =
207 BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
208 FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
209 FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
210 FLOW_ACTION_HW_STATS_DELAYED,
16f80360
PNA
211 FLOW_ACTION_HW_STATS_DISABLED =
212 BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
060b6381 213 FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
42d5fe5f 214};
2514921e 215
1158958a
VB
216typedef void (*action_destr)(void *priv);
217
2008495d
JP
218struct flow_action_cookie {
219 u32 cookie_len;
220 u8 cookie[];
221};
222
223struct flow_action_cookie *flow_action_cookie_create(void *data,
224 unsigned int len,
225 gfp_t gfp);
226void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
227
e3ab786b
PNA
228struct flow_action_entry {
229 enum flow_action_id id;
5a995900 230 u32 hw_index;
db4b4902 231 unsigned long cookie;
80cd22c3 232 u64 miss_cookie;
0dfb2d82 233 enum flow_action_hw_stats hw_stats;
1158958a
VB
234 action_destr destructor;
235 void *destructor_priv;
e3ab786b
PNA
236 union {
237 u32 chain_index; /* FLOW_ACTION_GOTO */
238 struct net_device *dev; /* FLOW_ACTION_REDIRECT */
239 struct { /* FLOW_ACTION_VLAN */
240 u16 vid;
241 __be16 proto;
242 u8 prio;
243 } vlan;
ab95465c
MD
244 struct { /* FLOW_ACTION_VLAN_PUSH_ETH */
245 unsigned char dst[ETH_ALEN];
246 unsigned char src[ETH_ALEN];
247 } vlan_push_eth;
1f40be6a
PM
248 struct { /* FLOW_ACTION_MANGLE */
249 /* FLOW_ACTION_ADD */
e3ab786b
PNA
250 enum flow_action_mangle_base htype;
251 u32 offset;
252 u32 mask;
253 u32 val;
254 } mangle;
1158958a 255 struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
e3ab786b
PNA
256 u32 csum_flags; /* FLOW_ACTION_CSUM */
257 u32 mark; /* FLOW_ACTION_MARK */
fb1b775a 258 u16 ptype; /* FLOW_ACTION_PTYPE */
4a6a676f 259 u16 rx_queue; /* FLOW_ACTION_RX_QUEUE_MAPPING */
2ce12410 260 u32 priority; /* FLOW_ACTION_PRIORITY */
8bec2833
PNA
261 struct { /* FLOW_ACTION_QUEUE */
262 u32 ctx;
263 u32 index;
264 u8 vf;
265 } queue;
a7a7be60
PJV
266 struct { /* FLOW_ACTION_SAMPLE */
267 struct psample_group *psample_group;
268 u32 rate;
269 u32 trunc_size;
270 bool truncate;
271 } sample;
8c8cfc6e 272 struct { /* FLOW_ACTION_POLICE */
5f035af7 273 u32 burst;
8c8cfc6e 274 u64 rate_bytes_ps;
b8cd5831
JL
275 u64 peakrate_bytes_ps;
276 u32 avrate;
277 u16 overhead;
25660156
XH
278 u64 burst_pkt;
279 u64 rate_pkt_ps;
19e528dc 280 u32 mtu;
b8cd5831
JL
281 struct {
282 enum flow_action_id act_id;
283 u32 extval;
284 } exceed, notexceed;
8c8cfc6e 285 } police;
b57dc7c1
PB
286 struct { /* FLOW_ACTION_CT */
287 int action;
288 u16 zone;
edd5861e 289 struct nf_flowtable *flow_table;
b57dc7c1 290 } ct;
9c26ba9b 291 struct {
30b0cf90 292 unsigned long cookie;
9c26ba9b
PB
293 u32 mark;
294 u32 labels[4];
941eff5a 295 bool orig_dir;
9c26ba9b 296 } ct_metadata;
6749d590
JH
297 struct { /* FLOW_ACTION_MPLS_PUSH */
298 u32 label;
299 __be16 proto;
300 u8 tc;
301 u8 bos;
302 u8 ttl;
303 } mpls_push;
304 struct { /* FLOW_ACTION_MPLS_POP */
305 __be16 proto;
306 } mpls_pop;
307 struct { /* FLOW_ACTION_MPLS_MANGLE */
308 u32 label;
309 u8 tc;
310 u8 bos;
311 u8 ttl;
312 } mpls_mangle;
d29bdd69 313 struct {
d29bdd69
PL
314 s32 prio;
315 u64 basetime;
316 u64 cycletime;
317 u64 cycletimeext;
318 u32 num_entries;
319 struct action_gate_entry *entries;
320 } gate;
563ae557
PNA
321 struct { /* FLOW_ACTION_PPPOE_PUSH */
322 u16 sid;
323 } pppoe;
e3ab786b 324 };
db4b4902 325 struct flow_action_cookie *user_cookie; /* user defined action cookie */
e3ab786b
PNA
326};
327
328struct flow_action {
329 unsigned int num_entries;
8661b6e7 330 struct flow_action_entry entries[];
e3ab786b
PNA
331};
332
333static inline bool flow_action_has_entries(const struct flow_action *action)
334{
335 return action->num_entries;
336}
337
ab79af32 338/**
c87a4c54 339 * flow_offload_has_one_action() - check if exactly one action is present
ab79af32
PJV
340 * @action: tc filter flow offload action
341 *
342 * Returns true if exactly one action is present.
343 */
344static inline bool flow_offload_has_one_action(const struct flow_action *action)
345{
346 return action->num_entries == 1;
347}
348
d97b4b10
JL
349static inline bool flow_action_is_last_entry(const struct flow_action *action,
350 const struct flow_action_entry *entry)
351{
352 return entry == &action->entries[action->num_entries - 1];
353}
354
62751b68
JP
355#define flow_action_for_each(__i, __act, __actions) \
356 for (__i = 0, __act = &(__actions)->entries[0]; \
357 __i < (__actions)->num_entries; \
358 __act = &(__actions)->entries[++__i])
359
319a1d19 360static inline bool
53eca1f3
JK
361flow_action_mixed_hw_stats_check(const struct flow_action *action,
362 struct netlink_ext_ack *extack)
319a1d19
JP
363{
364 const struct flow_action_entry *action_entry;
3f649ab7 365 u8 last_hw_stats;
319a1d19
JP
366 int i;
367
368 if (flow_offload_has_one_action(action))
369 return true;
370
fa05bdb8
IS
371 flow_action_for_each(i, action_entry, action) {
372 if (i && action_entry->hw_stats != last_hw_stats) {
373 NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
374 return false;
319a1d19 375 }
fa05bdb8 376 last_hw_stats = action_entry->hw_stats;
319a1d19
JP
377 }
378 return true;
379}
380
381static inline const struct flow_action_entry *
382flow_action_first_entry_get(const struct flow_action *action)
383{
384 WARN_ON(!flow_action_has_entries(action));
385 return &action->entries[0];
386}
387
388static inline bool
53eca1f3
JK
389__flow_action_hw_stats_check(const struct flow_action *action,
390 struct netlink_ext_ack *extack,
391 bool check_allow_bit,
0dfb2d82 392 enum flow_action_hw_stats_bit allow_bit)
319a1d19
JP
393{
394 const struct flow_action_entry *action_entry;
395
396 if (!flow_action_has_entries(action))
397 return true;
53eca1f3 398 if (!flow_action_mixed_hw_stats_check(action, extack))
319a1d19 399 return false;
16f80360 400
319a1d19 401 action_entry = flow_action_first_entry_get(action);
060b6381
EC
402
403 /* Zero is not a legal value for hw_stats, catch anyone passing it */
404 WARN_ON_ONCE(!action_entry->hw_stats);
16f80360 405
a16fa289 406 if (!check_allow_bit &&
060b6381 407 ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
319a1d19
JP
408 NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
409 return false;
a16fa289 410 } else if (check_allow_bit &&
0dfb2d82 411 !(action_entry->hw_stats & BIT(allow_bit))) {
319a1d19
JP
412 NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
413 return false;
414 }
415 return true;
416}
417
a16fa289 418static inline bool
53eca1f3
JK
419flow_action_hw_stats_check(const struct flow_action *action,
420 struct netlink_ext_ack *extack,
0dfb2d82 421 enum flow_action_hw_stats_bit allow_bit)
a16fa289 422{
53eca1f3 423 return __flow_action_hw_stats_check(action, extack, true, allow_bit);
a16fa289
JP
424}
425
319a1d19 426static inline bool
53eca1f3
JK
427flow_action_basic_hw_stats_check(const struct flow_action *action,
428 struct netlink_ext_ack *extack)
319a1d19 429{
53eca1f3 430 return __flow_action_hw_stats_check(action, extack, false, 0);
319a1d19
JP
431}
432
8f256622
PNA
433struct flow_rule {
434 struct flow_match match;
e3ab786b 435 struct flow_action action;
8f256622
PNA
436};
437
e3ab786b 438struct flow_rule *flow_rule_alloc(unsigned int num_actions);
8f256622
PNA
439
440static inline bool flow_rule_match_key(const struct flow_rule *rule,
441 enum flow_dissector_key_id key)
442{
443 return dissector_uses_key(rule->match.dissector, key);
444}
445
3b1903ef
PNA
446struct flow_stats {
447 u64 pkts;
448 u64 bytes;
4b61d3e8 449 u64 drops;
3b1903ef 450 u64 lastused;
93a129eb
JP
451 enum flow_action_hw_stats used_hw_stats;
452 bool used_hw_stats_valid;
3b1903ef
PNA
453};
454
455static inline void flow_stats_update(struct flow_stats *flow_stats,
4b61d3e8
PL
456 u64 bytes, u64 pkts,
457 u64 drops, u64 lastused,
93a129eb 458 enum flow_action_hw_stats used_hw_stats)
3b1903ef 459{
9f9dc493
JH
460 flow_stats->pkts += pkts;
461 flow_stats->bytes += bytes;
4b61d3e8 462 flow_stats->drops += drops;
9f9dc493 463 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
93a129eb
JP
464
465 /* The driver should pass value with a maximum of one bit set.
466 * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
467 */
468 WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
469 flow_stats->used_hw_stats |= used_hw_stats;
470 flow_stats->used_hw_stats_valid = true;
3b1903ef
PNA
471}
472
4e95bc26 473enum flow_block_command {
9c0e189e
PNA
474 FLOW_BLOCK_BIND,
475 FLOW_BLOCK_UNBIND,
4e95bc26
PNA
476};
477
478enum flow_block_binder_type {
32f8c409
PNA
479 FLOW_BLOCK_BINDER_TYPE_UNSPEC,
480 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
481 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
aee9caa0
PM
482 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
483 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
4e95bc26
PNA
484};
485
14bfb13f
PNA
486struct flow_block {
487 struct list_head cb_list;
488};
489
4e95bc26
PNA
490struct netlink_ext_ack;
491
492struct flow_block_offload {
493 enum flow_block_command command;
494 enum flow_block_binder_type binder_type;
955bcb6e 495 bool block_shared;
c9f14470 496 bool unlocked_driver_cb;
da3eeb90 497 struct net *net;
14bfb13f 498 struct flow_block *block;
da3eeb90 499 struct list_head cb_list;
4e95bc26
PNA
500 struct list_head *driver_block_list;
501 struct netlink_ext_ack *extack;
c40f4e50 502 struct Qdisc *sch;
74fc4f82 503 struct list_head *cb_list_head;
4e95bc26
PNA
504};
505
a7323311
PNA
506enum tc_setup_type;
507typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
508 void *cb_priv);
509
1fac52da
PNA
510struct flow_block_cb;
511
512struct flow_block_indr {
513 struct list_head list;
514 struct net_device *dev;
c40f4e50 515 struct Qdisc *sch;
1fac52da
PNA
516 enum flow_block_binder_type binder_type;
517 void *data;
a1db2178 518 void *cb_priv;
1fac52da
PNA
519 void (*cleanup)(struct flow_block_cb *block_cb);
520};
521
d63db30c 522struct flow_block_cb {
da3eeb90 523 struct list_head driver_list;
d63db30c 524 struct list_head list;
a7323311 525 flow_setup_cb_t *cb;
d63db30c
PNA
526 void *cb_ident;
527 void *cb_priv;
528 void (*release)(void *cb_priv);
1fac52da 529 struct flow_block_indr indr;
d63db30c
PNA
530 unsigned int refcnt;
531};
532
a7323311 533struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
d63db30c
PNA
534 void *cb_ident, void *cb_priv,
535 void (*release)(void *cb_priv));
26f2eb27 536struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
537 void *cb_ident, void *cb_priv,
538 void (*release)(void *cb_priv),
539 struct flow_block_offload *bo,
c40f4e50
PM
540 struct net_device *dev,
541 struct Qdisc *sch, void *data,
a1db2178 542 void *indr_cb_priv,
26f2eb27 543 void (*cleanup)(struct flow_block_cb *block_cb));
d63db30c
PNA
544void flow_block_cb_free(struct flow_block_cb *block_cb);
545
14bfb13f 546struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
a7323311 547 flow_setup_cb_t *cb, void *cb_ident);
da3eeb90 548
67bd0d5e
PNA
549void *flow_block_cb_priv(struct flow_block_cb *block_cb);
550void flow_block_cb_incref(struct flow_block_cb *block_cb);
551unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
552
da3eeb90
PNA
553static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
554 struct flow_block_offload *offload)
555{
556 list_add_tail(&block_cb->list, &offload->cb_list);
557}
558
559static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
560 struct flow_block_offload *offload)
561{
562 list_move(&block_cb->list, &offload->cb_list);
563}
564
26f2eb27 565static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
566 struct flow_block_offload *offload)
567{
568 list_del(&block_cb->indr.list);
569 list_move(&block_cb->list, &offload->cb_list);
570}
571
a7323311 572bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
0d4fd02e
PNA
573 struct list_head *driver_block_list);
574
4e95bc26 575int flow_block_cb_setup_simple(struct flow_block_offload *f,
a7323311
PNA
576 struct list_head *driver_list,
577 flow_setup_cb_t *cb,
4e95bc26
PNA
578 void *cb_ident, void *cb_priv, bool ingress_only);
579
f9e30088
PNA
580enum flow_cls_command {
581 FLOW_CLS_REPLACE,
582 FLOW_CLS_DESTROY,
583 FLOW_CLS_STATS,
584 FLOW_CLS_TMPLT_CREATE,
585 FLOW_CLS_TMPLT_DESTROY,
586};
587
588struct flow_cls_common_offload {
589 u32 chain_index;
590 __be16 protocol;
591 u32 prio;
592 struct netlink_ext_ack *extack;
593};
594
595struct flow_cls_offload {
596 struct flow_cls_common_offload common;
597 enum flow_cls_command command;
5246c896 598 bool use_act_stats;
f9e30088
PNA
599 unsigned long cookie;
600 struct flow_rule *rule;
601 struct flow_stats stats;
602 u32 classid;
603};
604
8cbfe939
BZ
605enum offload_act_command {
606 FLOW_ACT_REPLACE,
607 FLOW_ACT_DESTROY,
608 FLOW_ACT_STATS,
609};
610
611struct flow_offload_action {
612 struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/
613 enum offload_act_command command;
614 enum flow_action_id id;
615 u32 index;
d307b2c6 616 unsigned long cookie;
8cbfe939
BZ
617 struct flow_stats stats;
618 struct flow_action action;
619};
620
621struct flow_offload_action *offload_action_alloc(unsigned int num_actions);
622
f9e30088
PNA
623static inline struct flow_rule *
624flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
625{
626 return flow_cmd->rule;
627}
628
14bfb13f
PNA
629static inline void flow_block_init(struct flow_block *flow_block)
630{
631 INIT_LIST_HEAD(&flow_block->cb_list);
632}
633
c40f4e50 634typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
66f1939a 635 enum tc_setup_type type, void *type_data,
636 void *data,
637 void (*cleanup)(struct flow_block_cb *block_cb));
4e481908 638
1fac52da
PNA
639int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
640void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
a1db2178 641 void (*release)(void *cb_priv));
c40f4e50 642int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
1fac52da
PNA
643 enum tc_setup_type type, void *data,
644 struct flow_block_offload *bo,
645 void (*cleanup)(struct flow_block_cb *block_cb));
3a41c64d 646bool flow_indr_dev_exists(void);
1fac52da 647
8f256622 648#endif /* _NET_FLOW_OFFLOAD_H */