ASoC: Merge up v6.6-rc7
[linux-block.git] / drivers / net / ethernet / netronome / nfp / flower / conntrack.c
CommitLineData
c8b034fb
LP
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2021 Corigine, Inc. */
3
5cee92c6
HZ
4#include <net/tc_act/tc_csum.h>
5#include <net/tc_act/tc_ct.h>
6
c8b034fb 7#include "conntrack.h"
453cdc30 8#include "../nfp_port.h"
c8b034fb 9
f7ae12e2
LP
10const struct rhashtable_params nfp_tc_ct_merge_params = {
11 .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
12 hash_node),
13 .key_len = sizeof(unsigned long) * 2,
14 .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
15 .automatic_shrinking = true,
16};
17
b5e30c61
LP
18const struct rhashtable_params nfp_nft_ct_merge_params = {
19 .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
20 hash_node),
21 .key_len = sizeof(unsigned long) * 3,
22 .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
23 .automatic_shrinking = true,
24};
25
5e5f0816
LP
26static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
27 enum flow_action_id act_id);
28
bd0fe7f9
LP
29/**
30 * get_hashentry() - Wrapper around hashtable lookup.
31 * @ht: hashtable where entry could be found
32 * @key: key to lookup
33 * @params: hashtable params
34 * @size: size of entry to allocate if not in table
35 *
36 * Returns an entry from a hashtable. If entry does not exist
37 * yet allocate the memory for it and return the new entry.
38 */
39static void *get_hashentry(struct rhashtable *ht, void *key,
40 const struct rhashtable_params params, size_t size)
41{
42 void *result;
43
44 result = rhashtable_lookup_fast(ht, key, params);
45
46 if (result)
47 return result;
48
49 result = kzalloc(size, GFP_KERNEL);
50 if (!result)
51 return ERR_PTR(-ENOMEM);
52
53 return result;
54}
55
c8b034fb
LP
56bool is_pre_ct_flow(struct flow_cls_offload *flow)
57{
cee7b339
WJ
58 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
59 struct flow_dissector *dissector = rule->match.dissector;
c8b034fb 60 struct flow_action_entry *act;
cee7b339 61 struct flow_match_ct ct;
c8b034fb
LP
62 int i;
63
2b3082c6 64 if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
cee7b339
WJ
65 flow_rule_match_ct(rule, &ct);
66 if (ct.key->ct_state)
67 return false;
68 }
69
70 if (flow->common.chain_index)
71 return false;
72
c8b034fb 73 flow_action_for_each(i, act, &flow->rule->action) {
5cee92c6
HZ
74 if (act->id == FLOW_ACTION_CT) {
75 /* The pre_ct rule only have the ct or ct nat action, cannot
76 * contains other ct action e.g ct commit and so on.
77 */
78 if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
79 return true;
80 else
81 return false;
82 }
c8b034fb 83 }
5cee92c6 84
c8b034fb
LP
85 return false;
86}
87
88bool is_post_ct_flow(struct flow_cls_offload *flow)
89{
90 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
91 struct flow_dissector *dissector = rule->match.dissector;
5cee92c6
HZ
92 struct flow_action_entry *act;
93 bool exist_ct_clear = false;
c8b034fb 94 struct flow_match_ct ct;
5cee92c6
HZ
95 int i;
96
2b3082c6 97 if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) {
c8b034fb
LP
98 flow_rule_match_ct(rule, &ct);
99 if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
100 return true;
5cee92c6 101 } else {
0b8d953c
WJ
102 /* post ct entry cannot contains any ct action except ct_clear. */
103 flow_action_for_each(i, act, &flow->rule->action) {
104 if (act->id == FLOW_ACTION_CT) {
105 /* ignore ct clear action. */
106 if (act->ct.action == TCA_CT_ACT_CLEAR) {
107 exist_ct_clear = true;
108 continue;
109 }
110
111 return false;
112 }
113 }
5cee92c6
HZ
114 /* when do nat with ct, the post ct entry ignore the ct status,
115 * will match the nat field(sip/dip) instead. In this situation,
116 * the flow chain index is not zero and contains ct clear action.
117 */
118 if (flow->common.chain_index && exist_ct_clear)
119 return true;
c8b034fb 120 }
5cee92c6 121
c8b034fb
LP
122 return false;
123}
124
e43d940f
YZ
125/**
126 * get_mangled_key() - Mangle the key if mangle act exists
127 * @rule: rule that carries the actions
128 * @buf: pointer to key to be mangled
129 * @offset: used to adjust mangled offset in L2/L3/L4 header
130 * @key_sz: key size
131 * @htype: mangling type
132 *
133 * Returns buf where the mangled key stores.
134 */
135static void *get_mangled_key(struct flow_rule *rule, void *buf,
136 u32 offset, size_t key_sz,
137 enum flow_action_mangle_base htype)
138{
139 struct flow_action_entry *act;
140 u32 *val = (u32 *)buf;
141 u32 off, msk, key;
142 int i;
143
144 flow_action_for_each(i, act, &rule->action) {
145 if (act->id == FLOW_ACTION_MANGLE &&
146 act->mangle.htype == htype) {
147 off = act->mangle.offset - offset;
148 msk = act->mangle.mask;
149 key = act->mangle.val;
150
151 /* Mangling is supposed to be u32 aligned */
152 if (off % 4 || off >= key_sz)
153 continue;
154
155 val[off >> 2] &= msk;
156 val[off >> 2] |= key;
157 }
158 }
159
160 return buf;
161}
162
163/* Only tos and ttl are involved in flow_match_ip structure, which
164 * doesn't conform to the layout of ip/ipv6 header definition. So
165 * they need particular process here: fill them into the ip/ipv6
166 * header, so that mangling actions can work directly.
167 */
168#define NFP_IPV4_TOS_MASK GENMASK(23, 16)
169#define NFP_IPV4_TTL_MASK GENMASK(31, 24)
170#define NFP_IPV6_TCLASS_MASK GENMASK(27, 20)
171#define NFP_IPV6_HLIMIT_MASK GENMASK(7, 0)
172static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
173 bool is_v6)
174{
175 struct flow_match_ip match;
176 /* IPv4's ttl field is in third dword. */
177 __be32 ip_hdr[3];
178 u32 tmp, hdr_len;
179
180 flow_rule_match_ip(rule, &match);
181
182 if (is_v6) {
183 tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos);
184 ip_hdr[0] = cpu_to_be32(tmp);
185 tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl);
186 ip_hdr[1] = cpu_to_be32(tmp);
187 hdr_len = 2 * sizeof(__be32);
188 } else {
189 tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos);
190 ip_hdr[0] = cpu_to_be32(tmp);
191 tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl);
192 ip_hdr[2] = cpu_to_be32(tmp);
193 hdr_len = 3 * sizeof(__be32);
194 }
195
196 get_mangled_key(rule, ip_hdr, 0, hdr_len,
197 is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 :
198 FLOW_ACT_MANGLE_HDR_TYPE_IP4);
199
200 match.key = buf;
201
202 if (is_v6) {
203 tmp = be32_to_cpu(ip_hdr[0]);
204 match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp);
205 tmp = be32_to_cpu(ip_hdr[1]);
206 match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp);
207 } else {
208 tmp = be32_to_cpu(ip_hdr[0]);
209 match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp);
210 tmp = be32_to_cpu(ip_hdr[2]);
211 match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp);
212 }
213
214 return buf;
215}
216
5cee92c6
HZ
217/* Note entry1 and entry2 are not swappable. only skip ip and
218 * tport merge check for pre_ct and post_ct when pre_ct do nat.
219 */
220static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
221 struct nfp_fl_ct_flow_entry *entry2)
222{
223 /* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
224 if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
225 entry2->type == CT_TYPE_POST_CT)
226 return false;
227
228 return true;
229}
230
e43d940f
YZ
231/* Note entry1 and entry2 are not swappable, entry1 should be
232 * the former flow whose mangle action need be taken into account
233 * if existed, and entry2 should be the latter flow whose action
234 * we don't care.
235 */
3c863c30
LP
236static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
237 struct nfp_fl_ct_flow_entry *entry2)
238{
2b3082c6 239 unsigned long long ovlp_keys;
e43d940f
YZ
240 bool out, is_v6 = false;
241 u8 ip_proto = 0;
2b3082c6
RK
242 ovlp_keys = entry1->rule->match.dissector->used_keys &
243 entry2->rule->match.dissector->used_keys;
e43d940f
YZ
244 /* Temporary buffer for mangling keys, 64 is enough to cover max
245 * struct size of key in various fields that may be mangled.
9bacb93b 246 * Supported fields to mangle:
e43d940f
YZ
247 * mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
248 * nw_tos/nw_ttl(struct flow_match_ip, 2B)
249 * nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
250 * tp_src/tp_dst(struct flow_match_ports, 4B)
251 */
252 char buf[64];
c698e2ad 253
7195464c
YZ
254 if (entry1->netdev && entry2->netdev &&
255 entry1->netdev != entry2->netdev)
256 return -EINVAL;
257
9bacb93b 258 /* Check the overlapped fields one by one, the unmasked part
c698e2ad
LP
259 * should not conflict with each other.
260 */
2b3082c6 261 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) {
c698e2ad
LP
262 struct flow_match_control match1, match2;
263
264 flow_rule_match_control(entry1->rule, &match1);
265 flow_rule_match_control(entry2->rule, &match2);
266 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
267 if (out)
268 goto check_failed;
269 }
270
2b3082c6 271 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) {
c698e2ad
LP
272 struct flow_match_basic match1, match2;
273
274 flow_rule_match_basic(entry1->rule, &match1);
275 flow_rule_match_basic(entry2->rule, &match2);
e43d940f
YZ
276
277 /* n_proto field is a must in ct-related flows,
278 * it should be either ipv4 or ipv6.
279 */
280 is_v6 = match1.key->n_proto == htons(ETH_P_IPV6);
281 /* ip_proto field is a must when port field is cared */
282 ip_proto = match1.key->ip_proto;
283
c698e2ad
LP
284 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
285 if (out)
286 goto check_failed;
287 }
288
5cee92c6
HZ
289 /* if pre ct entry do nat, the nat ip exists in nft entry,
290 * will be do merge check when do nft and post ct merge,
291 * so skip this ip merge check here.
292 */
2b3082c6 293 if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
5cee92c6 294 nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
c698e2ad
LP
295 struct flow_match_ipv4_addrs match1, match2;
296
297 flow_rule_match_ipv4_addrs(entry1->rule, &match1);
298 flow_rule_match_ipv4_addrs(entry2->rule, &match2);
e43d940f
YZ
299
300 memcpy(buf, match1.key, sizeof(*match1.key));
301 match1.key = get_mangled_key(entry1->rule, buf,
302 offsetof(struct iphdr, saddr),
303 sizeof(*match1.key),
304 FLOW_ACT_MANGLE_HDR_TYPE_IP4);
305
c698e2ad
LP
306 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
307 if (out)
308 goto check_failed;
309 }
310
5cee92c6
HZ
311 /* if pre ct entry do nat, the nat ip exists in nft entry,
312 * will be do merge check when do nft and post ct merge,
313 * so skip this ip merge check here.
314 */
2b3082c6 315 if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
5cee92c6 316 nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
c698e2ad
LP
317 struct flow_match_ipv6_addrs match1, match2;
318
319 flow_rule_match_ipv6_addrs(entry1->rule, &match1);
320 flow_rule_match_ipv6_addrs(entry2->rule, &match2);
e43d940f
YZ
321
322 memcpy(buf, match1.key, sizeof(*match1.key));
323 match1.key = get_mangled_key(entry1->rule, buf,
324 offsetof(struct ipv6hdr, saddr),
325 sizeof(*match1.key),
326 FLOW_ACT_MANGLE_HDR_TYPE_IP6);
327
c698e2ad
LP
328 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
329 if (out)
330 goto check_failed;
331 }
332
5cee92c6
HZ
333 /* if pre ct entry do nat, the nat tport exists in nft entry,
334 * will be do merge check when do nft and post ct merge,
335 * so skip this tport merge check here.
336 */
2b3082c6 337 if ((ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_PORTS)) &&
5cee92c6 338 nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
e43d940f 339 enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
c698e2ad
LP
340 struct flow_match_ports match1, match2;
341
342 flow_rule_match_ports(entry1->rule, &match1);
343 flow_rule_match_ports(entry2->rule, &match2);
e43d940f
YZ
344
345 if (ip_proto == IPPROTO_UDP)
346 htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
347 else if (ip_proto == IPPROTO_TCP)
348 htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
349
350 memcpy(buf, match1.key, sizeof(*match1.key));
351 match1.key = get_mangled_key(entry1->rule, buf, 0,
352 sizeof(*match1.key), htype);
353
c698e2ad
LP
354 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
355 if (out)
356 goto check_failed;
357 }
358
2b3082c6 359 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
c698e2ad
LP
360 struct flow_match_eth_addrs match1, match2;
361
362 flow_rule_match_eth_addrs(entry1->rule, &match1);
363 flow_rule_match_eth_addrs(entry2->rule, &match2);
e43d940f
YZ
364
365 memcpy(buf, match1.key, sizeof(*match1.key));
366 match1.key = get_mangled_key(entry1->rule, buf, 0,
367 sizeof(*match1.key),
368 FLOW_ACT_MANGLE_HDR_TYPE_ETH);
369
c698e2ad
LP
370 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
371 if (out)
372 goto check_failed;
373 }
374
2b3082c6 375 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_VLAN)) {
c698e2ad
LP
376 struct flow_match_vlan match1, match2;
377
378 flow_rule_match_vlan(entry1->rule, &match1);
379 flow_rule_match_vlan(entry2->rule, &match2);
380 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
381 if (out)
382 goto check_failed;
383 }
384
2b3082c6 385 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_MPLS)) {
c698e2ad
LP
386 struct flow_match_mpls match1, match2;
387
388 flow_rule_match_mpls(entry1->rule, &match1);
389 flow_rule_match_mpls(entry2->rule, &match2);
390 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
391 if (out)
392 goto check_failed;
393 }
394
2b3082c6 395 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_TCP)) {
c698e2ad
LP
396 struct flow_match_tcp match1, match2;
397
398 flow_rule_match_tcp(entry1->rule, &match1);
399 flow_rule_match_tcp(entry2->rule, &match2);
400 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
401 if (out)
402 goto check_failed;
403 }
404
2b3082c6 405 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_IP)) {
c698e2ad
LP
406 struct flow_match_ip match1, match2;
407
408 flow_rule_match_ip(entry1->rule, &match1);
409 flow_rule_match_ip(entry2->rule, &match2);
e43d940f
YZ
410
411 match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6);
c698e2ad
LP
412 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
413 if (out)
414 goto check_failed;
415 }
416
2b3082c6 417 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
c698e2ad
LP
418 struct flow_match_enc_keyid match1, match2;
419
420 flow_rule_match_enc_keyid(entry1->rule, &match1);
421 flow_rule_match_enc_keyid(entry2->rule, &match2);
422 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
423 if (out)
424 goto check_failed;
425 }
426
2b3082c6 427 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
c698e2ad
LP
428 struct flow_match_ipv4_addrs match1, match2;
429
430 flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
431 flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
432 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
433 if (out)
434 goto check_failed;
435 }
436
2b3082c6 437 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
c698e2ad
LP
438 struct flow_match_ipv6_addrs match1, match2;
439
440 flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
441 flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
442 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
443 if (out)
444 goto check_failed;
445 }
446
2b3082c6 447 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
c698e2ad
LP
448 struct flow_match_control match1, match2;
449
450 flow_rule_match_enc_control(entry1->rule, &match1);
451 flow_rule_match_enc_control(entry2->rule, &match2);
452 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
453 if (out)
454 goto check_failed;
455 }
456
2b3082c6 457 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) {
c698e2ad
LP
458 struct flow_match_ip match1, match2;
459
460 flow_rule_match_enc_ip(entry1->rule, &match1);
461 flow_rule_match_enc_ip(entry2->rule, &match2);
462 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
463 if (out)
464 goto check_failed;
465 }
466
2b3082c6 467 if (ovlp_keys & BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
c698e2ad
LP
468 struct flow_match_enc_opts match1, match2;
469
470 flow_rule_match_enc_opts(entry1->rule, &match1);
471 flow_rule_match_enc_opts(entry2->rule, &match2);
472 COMPARE_UNMASKED_FIELDS(match1, match2, &out);
473 if (out)
474 goto check_failed;
475 }
476
3c863c30 477 return 0;
c698e2ad
LP
478
479check_failed:
480 return -EINVAL;
3c863c30
LP
481}
482
742b7072
HZ
483static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
484 struct flow_rule *rule)
485{
486 struct flow_match_vlan match;
487
488 if (unlikely(flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)))
489 return -EOPNOTSUPP;
490
491 /* post_ct does not match VLAN KEY, can be merged. */
492 if (likely(!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)))
493 return 0;
494
495 switch (a_in->id) {
496 /* pre_ct has pop vlan, post_ct cannot match VLAN KEY, cannot be merged. */
497 case FLOW_ACTION_VLAN_POP:
498 return -EOPNOTSUPP;
499
500 case FLOW_ACTION_VLAN_PUSH:
501 case FLOW_ACTION_VLAN_MANGLE:
502 flow_rule_match_vlan(rule, &match);
503 /* different vlan id, cannot be merged. */
504 if ((match.key->vlan_id & match.mask->vlan_id) ^
505 (a_in->vlan.vid & match.mask->vlan_id))
506 return -EOPNOTSUPP;
507
508 /* different tpid, cannot be merged. */
509 if ((match.key->vlan_tpid & match.mask->vlan_tpid) ^
510 (a_in->vlan.proto & match.mask->vlan_tpid))
511 return -EOPNOTSUPP;
512
513 /* different priority, cannot be merged. */
514 if ((match.key->vlan_priority & match.mask->vlan_priority) ^
515 (a_in->vlan.prio & match.mask->vlan_priority))
516 return -EOPNOTSUPP;
517
518 break;
519 default:
520 return -EOPNOTSUPP;
521 }
522
523 return 0;
524}
525
a87ceb3d
WJ
526/* Extra check for multiple ct-zones merge
527 * currently surpport nft entries merge check in different zones
528 */
529static int nfp_ct_merge_extra_check(struct nfp_fl_ct_flow_entry *nft_entry,
530 struct nfp_fl_ct_tc_merge *tc_m_entry)
531{
532 struct nfp_fl_nft_tc_merge *prev_nft_m_entry;
533 struct nfp_fl_ct_flow_entry *pre_ct_entry;
534
535 pre_ct_entry = tc_m_entry->pre_ct_parent;
536 prev_nft_m_entry = pre_ct_entry->prev_m_entries[pre_ct_entry->num_prev_m_entries - 1];
537
538 return nfp_ct_merge_check(prev_nft_m_entry->nft_parent, nft_entry);
539}
540
a6ffdd3a
LP
541static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
542 struct nfp_fl_ct_flow_entry *post_ct_entry,
543 struct nfp_fl_ct_flow_entry *nft_entry)
544{
30c4a9f4 545 struct flow_action_entry *act;
742b7072 546 int i, err;
30c4a9f4
LP
547
548 /* Check for pre_ct->action conflicts */
549 flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
550 switch (act->id) {
30c4a9f4
LP
551 case FLOW_ACTION_VLAN_PUSH:
552 case FLOW_ACTION_VLAN_POP:
553 case FLOW_ACTION_VLAN_MANGLE:
742b7072
HZ
554 err = nfp_ct_check_vlan_merge(act, post_ct_entry->rule);
555 if (err)
556 return err;
557 break;
30c4a9f4
LP
558 case FLOW_ACTION_MPLS_PUSH:
559 case FLOW_ACTION_MPLS_POP:
560 case FLOW_ACTION_MPLS_MANGLE:
561 return -EOPNOTSUPP;
562 default:
563 break;
564 }
565 }
566
567 /* Check for nft->action conflicts */
568 flow_action_for_each(i, act, &nft_entry->rule->action) {
569 switch (act->id) {
30c4a9f4
LP
570 case FLOW_ACTION_VLAN_PUSH:
571 case FLOW_ACTION_VLAN_POP:
572 case FLOW_ACTION_VLAN_MANGLE:
573 case FLOW_ACTION_MPLS_PUSH:
574 case FLOW_ACTION_MPLS_POP:
575 case FLOW_ACTION_MPLS_MANGLE:
576 return -EOPNOTSUPP;
577 default:
578 break;
579 }
580 }
a6ffdd3a
LP
581 return 0;
582}
583
584static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
585 struct nfp_fl_ct_flow_entry *nft_entry)
586{
5e5f0816
LP
587 struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
588 struct flow_action_entry *ct_met;
589 struct flow_match_ct ct;
590 int i;
591
592 ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
2b3082c6 593 if (ct_met && (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT))) {
5e5f0816
LP
594 u32 *act_lbl;
595
596 act_lbl = ct_met->ct_metadata.labels;
597 flow_rule_match_ct(post_ct_entry->rule, &ct);
598 for (i = 0; i < 4; i++) {
599 if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
600 (act_lbl[i] & ct.mask->ct_labels[i]))
601 return -EINVAL;
602 }
603
604 if ((ct.key->ct_mark & ct.mask->ct_mark) ^
605 (ct_met->ct_metadata.mark & ct.mask->ct_mark))
606 return -EINVAL;
607
608 return 0;
5cee92c6
HZ
609 } else {
610 /* post_ct with ct clear action will not match the
611 * ct status when nft is nat entry.
612 */
613 if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
614 return 0;
5e5f0816
LP
615 }
616
617 return -EINVAL;
a6ffdd3a
LP
618}
619
71e88cfb
LP
620static int
621nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
622{
623 int key_size;
624
625 /* This field must always be present */
626 key_size = sizeof(struct nfp_flower_meta_tci);
627 map[FLOW_PAY_META_TCI] = 0;
628
629 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
630 map[FLOW_PAY_EXT_META] = key_size;
631 key_size += sizeof(struct nfp_flower_ext_meta);
632 }
633 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
634 map[FLOW_PAY_INPORT] = key_size;
635 key_size += sizeof(struct nfp_flower_in_port);
636 }
637 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
638 map[FLOW_PAY_MAC_MPLS] = key_size;
639 key_size += sizeof(struct nfp_flower_mac_mpls);
640 }
641 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
642 map[FLOW_PAY_L4] = key_size;
643 key_size += sizeof(struct nfp_flower_tp_ports);
644 }
645 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
646 map[FLOW_PAY_IPV4] = key_size;
647 key_size += sizeof(struct nfp_flower_ipv4);
648 }
649 if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
650 map[FLOW_PAY_IPV6] = key_size;
651 key_size += sizeof(struct nfp_flower_ipv6);
652 }
653
a0b84334
EL
654 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
655 map[FLOW_PAY_QINQ] = key_size;
656 key_size += sizeof(struct nfp_flower_vlan);
657 }
658
71e88cfb
LP
659 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
660 map[FLOW_PAY_GRE] = key_size;
661 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
662 key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
663 else
664 key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
665 }
666
71e88cfb
LP
667 if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
668 (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
669 map[FLOW_PAY_UDP_TUN] = key_size;
670 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
671 key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
672 else
673 key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
674 }
675
676 if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
677 map[FLOW_PAY_GENEVE_OPT] = key_size;
678 key_size += sizeof(struct nfp_flower_geneve_options);
679 }
680
681 return key_size;
682}
683
5cee92c6
HZ
684/* get the csum flag according the ip proto and mangle action. */
685static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
686{
687 if (a_in->id != FLOW_ACTION_MANGLE)
688 return;
689
690 switch (a_in->mangle.htype) {
691 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
692 *csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
693 if (ip_proto == IPPROTO_TCP)
694 *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
695 else if (ip_proto == IPPROTO_UDP)
696 *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
697 break;
698 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
699 *csum |= TCA_CSUM_UPDATE_FLAG_TCP;
700 break;
701 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
702 *csum |= TCA_CSUM_UPDATE_FLAG_UDP;
703 break;
704 default:
705 break;
706 }
707}
708
d94a63b4
LP
709static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
710 struct nfp_flower_priv *priv,
711 struct net_device *netdev,
46a83c85
WJ
712 struct nfp_fl_payload *flow_pay,
713 int num_rules)
d94a63b4 714{
5cee92c6 715 enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
d94a63b4 716 struct flow_action_entry *a_in;
46a83c85 717 int i, j, id, num_actions = 0;
d94a63b4
LP
718 struct flow_rule *a_rule;
719 int err = 0, offset = 0;
720
46a83c85
WJ
721 for (i = 0; i < num_rules; i++)
722 num_actions += rules[i]->action.num_entries;
d94a63b4 723
5cee92c6
HZ
724 /* Add one action to make sure there is enough room to add an checksum action
725 * when do nat.
726 */
46a83c85 727 a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
d94a63b4
LP
728 if (!a_rule)
729 return -ENOMEM;
730
5cee92c6 731 /* post_ct entry have one action at least. */
46a83c85
WJ
732 if (rules[num_rules - 1]->action.num_entries != 0)
733 tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;
734
735 /* Actions need a BASIC dissector. */
736 a_rule->match = rules[0]->match;
d94a63b4
LP
737
738 /* Copy actions */
46a83c85 739 for (j = 0; j < num_rules; j++) {
5cee92c6
HZ
740 u32 csum_updated = 0;
741 u8 ip_proto = 0;
742
d94a63b4
LP
743 if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
744 struct flow_match_basic match;
745
9bacb93b 746 /* ip_proto is the only field that is needed in later compile_action,
d94a63b4
LP
747 * needed to set the correct checksum flags. It doesn't really matter
748 * which input rule's ip_proto field we take as the earlier merge checks
749 * would have made sure that they don't conflict. We do not know which
750 * of the subflows would have the ip_proto filled in, so we need to iterate
751 * through the subflows and assign the proper subflow to a_rule
752 */
753 flow_rule_match_basic(rules[j], &match);
5cee92c6 754 if (match.mask->ip_proto) {
d94a63b4 755 a_rule->match = rules[j]->match;
5cee92c6
HZ
756 ip_proto = match.key->ip_proto;
757 }
d94a63b4
LP
758 }
759
760 for (i = 0; i < rules[j]->action.num_entries; i++) {
761 a_in = &rules[j]->action.entries[i];
762 id = a_in->id;
763
764 /* Ignore CT related actions as these would already have
765 * been taken care of by previous checks, and we do not send
766 * any CT actions to the firmware.
767 */
768 switch (id) {
769 case FLOW_ACTION_CT:
770 case FLOW_ACTION_GOTO:
771 case FLOW_ACTION_CT_METADATA:
772 continue;
773 default:
5cee92c6
HZ
774 /* nft entry is generated by tc ct, which mangle action do not care
775 * the stats, inherit the post entry stats to meet the
776 * flow_action_hw_stats_check.
46a83c85 777 * nft entry flow rules are at odd array index.
5cee92c6 778 */
46a83c85 779 if (j & 0x01) {
5cee92c6
HZ
780 if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
781 a_in->hw_stats = tmp_stats;
782 nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
783 }
d94a63b4
LP
784 memcpy(&a_rule->action.entries[offset++],
785 a_in, sizeof(struct flow_action_entry));
786 break;
787 }
788 }
5cee92c6
HZ
789 /* nft entry have mangle action, but do not have checksum action when do NAT,
790 * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
791 * to meet csum action check.
792 */
793 if (csum_updated) {
794 struct flow_action_entry *csum_action;
795
796 csum_action = &a_rule->action.entries[offset++];
797 csum_action->id = FLOW_ACTION_CSUM;
798 csum_action->csum_flags = csum_updated;
799 csum_action->hw_stats = tmp_stats;
800 }
d94a63b4
LP
801 }
802
803 /* Some actions would have been ignored, so update the num_entries field */
804 a_rule->action.num_entries = offset;
805 err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
806 kfree(a_rule);
807
808 return err;
809}
810
a6ffdd3a
LP
811static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
812{
71e88cfb
LP
813 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
814 struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
a87ceb3d
WJ
815 struct flow_rule *rules[NFP_MAX_ENTRY_RULES];
816 struct nfp_fl_ct_flow_entry *pre_ct_entry;
71e88cfb
LP
817 struct nfp_fl_key_ls key_layer, tmp_layer;
818 struct nfp_flower_priv *priv = zt->priv;
819 u16 key_map[_FLOW_PAY_LAYERS_MAX];
5a2b9304 820 struct nfp_fl_payload *flow_pay;
5a2b9304 821 u8 *key, *msk, *kdata, *mdata;
453cdc30 822 struct nfp_port *port = NULL;
a87ceb3d 823 int num_rules, err, i, j = 0;
5a2b9304
LP
824 struct net_device *netdev;
825 bool qinq_sup;
826 u32 port_id;
827 u16 offset;
71e88cfb 828
5a2b9304
LP
829 netdev = m_entry->netdev;
830 qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
831
a87ceb3d
WJ
832 pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
833 num_rules = pre_ct_entry->num_prev_m_entries * 2 + _CT_TYPE_MAX;
834
835 for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++) {
836 rules[j++] = pre_ct_entry->prev_m_entries[i]->tc_m_parent->pre_ct_parent->rule;
837 rules[j++] = pre_ct_entry->prev_m_entries[i]->nft_parent->rule;
838 }
839
840 rules[j++] = m_entry->tc_m_parent->pre_ct_parent->rule;
841 rules[j++] = m_entry->nft_parent->rule;
842 rules[j++] = m_entry->tc_m_parent->post_ct_parent->rule;
71e88cfb
LP
843
844 memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
845 memset(&key_map, 0, sizeof(key_map));
846
847 /* Calculate the resultant key layer and size for offload */
46a83c85 848 for (i = 0; i < num_rules; i++) {
71e88cfb
LP
849 err = nfp_flower_calculate_key_layers(priv->app,
850 m_entry->netdev,
851 &tmp_layer, rules[i],
852 &tun_type, NULL);
853 if (err)
854 return err;
855
856 key_layer.key_layer |= tmp_layer.key_layer;
857 key_layer.key_layer_two |= tmp_layer.key_layer_two;
858 }
859 key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
860
5a2b9304
LP
861 flow_pay = nfp_flower_allocate_new(&key_layer);
862 if (!flow_pay)
863 return -ENOMEM;
864
865 memset(flow_pay->unmasked_data, 0, key_layer.key_size);
866 memset(flow_pay->mask_data, 0, key_layer.key_size);
867
868 kdata = flow_pay->unmasked_data;
869 mdata = flow_pay->mask_data;
870
871 offset = key_map[FLOW_PAY_META_TCI];
872 key = kdata + offset;
873 msk = mdata + offset;
874 nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
875 (struct nfp_flower_meta_tci *)msk,
876 key_layer.key_layer);
877
878 if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
879 offset = key_map[FLOW_PAY_EXT_META];
880 key = kdata + offset;
881 msk = mdata + offset;
882 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
883 key_layer.key_layer_two);
884 nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
885 key_layer.key_layer_two);
886 }
887
888 /* Using in_port from the -trk rule. The tc merge checks should already
889 * be checking that the ingress netdevs are the same
890 */
891 port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
892 offset = key_map[FLOW_PAY_INPORT];
893 key = kdata + offset;
894 msk = mdata + offset;
895 err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
896 port_id, false, tun_type, NULL);
897 if (err)
898 goto ct_offload_err;
899 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
900 port_id, true, tun_type, NULL);
901 if (err)
902 goto ct_offload_err;
903
904 /* This following part works on the assumption that previous checks has
905 * already filtered out flows that has different values for the different
906 * layers. Here we iterate through all three rules and merge their respective
907 * masked value(cared bits), basic method is:
908 * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
909 * final_mask = r1_mask | r2_mask | r3_mask
910 * If none of the rules contains a match that is also fine, that simply means
911 * that the layer is not present.
912 */
913 if (!qinq_sup) {
46a83c85 914 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
915 offset = key_map[FLOW_PAY_META_TCI];
916 key = kdata + offset;
917 msk = mdata + offset;
918 nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
919 (struct nfp_flower_meta_tci *)msk,
920 rules[i]);
921 }
922 }
923
924 if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
925 offset = key_map[FLOW_PAY_MAC_MPLS];
926 key = kdata + offset;
927 msk = mdata + offset;
46a83c85 928 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
929 nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
930 (struct nfp_flower_mac_mpls *)msk,
931 rules[i]);
932 err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
933 (struct nfp_flower_mac_mpls *)msk,
934 rules[i], NULL);
935 if (err)
936 goto ct_offload_err;
937 }
938 }
939
940 if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
941 offset = key_map[FLOW_PAY_IPV4];
942 key = kdata + offset;
943 msk = mdata + offset;
46a83c85 944 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
945 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
946 (struct nfp_flower_ipv4 *)msk,
947 rules[i]);
948 }
949 }
950
951 if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
952 offset = key_map[FLOW_PAY_IPV6];
953 key = kdata + offset;
954 msk = mdata + offset;
46a83c85 955 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
956 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
957 (struct nfp_flower_ipv6 *)msk,
958 rules[i]);
959 }
960 }
961
962 if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
963 offset = key_map[FLOW_PAY_L4];
964 key = kdata + offset;
965 msk = mdata + offset;
46a83c85 966 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
967 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
968 (struct nfp_flower_tp_ports *)msk,
969 rules[i]);
970 }
971 }
972
a0b84334
EL
973 if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
974 offset = key_map[FLOW_PAY_QINQ];
975 key = kdata + offset;
976 msk = mdata + offset;
46a83c85 977 for (i = 0; i < num_rules; i++) {
a0b84334
EL
978 nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
979 (struct nfp_flower_vlan *)msk,
980 rules[i]);
981 }
982 }
983
5a2b9304
LP
984 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
985 offset = key_map[FLOW_PAY_GRE];
986 key = kdata + offset;
987 msk = mdata + offset;
988 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
989 struct nfp_flower_ipv6_gre_tun *gre_match;
990 struct nfp_ipv6_addr_entry *entry;
991 struct in6_addr *dst;
992
46a83c85 993 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
994 nfp_flower_compile_ipv6_gre_tun((void *)key,
995 (void *)msk, rules[i]);
996 }
997 gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
998 dst = &gre_match->ipv6.dst;
999
1000 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
d80f6d66
YY
1001 if (!entry) {
1002 err = -ENOMEM;
5a2b9304 1003 goto ct_offload_err;
d80f6d66 1004 }
5a2b9304
LP
1005
1006 flow_pay->nfp_tun_ipv6 = entry;
1007 } else {
1008 __be32 dst;
1009
46a83c85 1010 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
1011 nfp_flower_compile_ipv4_gre_tun((void *)key,
1012 (void *)msk, rules[i]);
1013 }
1014 dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
1015
1016 /* Store the tunnel destination in the rule data.
1017 * This must be present and be an exact match.
1018 */
1019 flow_pay->nfp_tun_ipv4_addr = dst;
1020 nfp_tunnel_add_ipv4_off(priv->app, dst);
1021 }
1022 }
1023
5a2b9304
LP
1024 if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
1025 key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
1026 offset = key_map[FLOW_PAY_UDP_TUN];
1027 key = kdata + offset;
1028 msk = mdata + offset;
1029 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
1030 struct nfp_flower_ipv6_udp_tun *udp_match;
1031 struct nfp_ipv6_addr_entry *entry;
1032 struct in6_addr *dst;
1033
46a83c85 1034 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
1035 nfp_flower_compile_ipv6_udp_tun((void *)key,
1036 (void *)msk, rules[i]);
1037 }
1038 udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
1039 dst = &udp_match->ipv6.dst;
1040
1041 entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
d80f6d66
YY
1042 if (!entry) {
1043 err = -ENOMEM;
5a2b9304 1044 goto ct_offload_err;
d80f6d66 1045 }
5a2b9304
LP
1046
1047 flow_pay->nfp_tun_ipv6 = entry;
1048 } else {
1049 __be32 dst;
1050
46a83c85 1051 for (i = 0; i < num_rules; i++) {
5a2b9304
LP
1052 nfp_flower_compile_ipv4_udp_tun((void *)key,
1053 (void *)msk, rules[i]);
1054 }
1055 dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
1056
1057 /* Store the tunnel destination in the rule data.
1058 * This must be present and be an exact match.
1059 */
1060 flow_pay->nfp_tun_ipv4_addr = dst;
1061 nfp_tunnel_add_ipv4_off(priv->app, dst);
1062 }
1063
1064 if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
1065 offset = key_map[FLOW_PAY_GENEVE_OPT];
1066 key = kdata + offset;
1067 msk = mdata + offset;
46a83c85 1068 for (i = 0; i < num_rules; i++)
5a2b9304
LP
1069 nfp_flower_compile_geneve_opt(key, msk, rules[i]);
1070 }
1071 }
1072
d94a63b4 1073 /* Merge actions into flow_pay */
46a83c85 1074 err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
d94a63b4
LP
1075 if (err)
1076 goto ct_offload_err;
1077
453cdc30
LP
1078 /* Use the pointer address as the cookie, but set the last bit to 1.
1079 * This is to avoid the 'is_merge_flow' check from detecting this as
1080 * an already merged flow. This works since address alignment means
1081 * that the last bit for pointer addresses will be 0.
1082 */
1083 flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
1084 err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
1085 flow_pay, netdev, NULL);
1086 if (err)
1087 goto ct_offload_err;
1088
1089 if (nfp_netdev_is_nfp_repr(netdev))
1090 port = nfp_port_from_netdev(netdev);
1091
1092 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1093 nfp_flower_table_params);
1094 if (err)
1095 goto ct_release_offload_meta_err;
1096
400a5e5f
LP
1097 err = nfp_flower_xmit_flow(priv->app, flow_pay,
1098 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1099 if (err)
1100 goto ct_remove_rhash_err;
1101
453cdc30
LP
1102 m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
1103 m_entry->flow_pay = flow_pay;
1104
1105 if (port)
1106 port->tc_offload_cnt++;
1107
1108 return err;
1109
400a5e5f
LP
1110ct_remove_rhash_err:
1111 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1112 &flow_pay->fl_node,
1113 nfp_flower_table_params));
453cdc30
LP
1114ct_release_offload_meta_err:
1115 nfp_modify_flow_metadata(priv->app, flow_pay);
5a2b9304 1116ct_offload_err:
453cdc30
LP
1117 if (flow_pay->nfp_tun_ipv4_addr)
1118 nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
1119 if (flow_pay->nfp_tun_ipv6)
1120 nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
5a2b9304
LP
1121 kfree(flow_pay->action_data);
1122 kfree(flow_pay->mask_data);
1123 kfree(flow_pay->unmasked_data);
1124 kfree(flow_pay);
1125 return err;
a6ffdd3a
LP
1126}
1127
1128static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
1129 struct net_device *netdev)
1130{
453cdc30
LP
1131 struct nfp_flower_priv *priv = app->priv;
1132 struct nfp_fl_payload *flow_pay;
1133 struct nfp_port *port = NULL;
1134 int err = 0;
1135
1136 if (nfp_netdev_is_nfp_repr(netdev))
1137 port = nfp_port_from_netdev(netdev);
1138
1139 flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
1140 if (!flow_pay)
1141 return -ENOENT;
1142
1143 err = nfp_modify_flow_metadata(app, flow_pay);
1144 if (err)
1145 goto err_free_merge_flow;
1146
1147 if (flow_pay->nfp_tun_ipv4_addr)
1148 nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
1149
1150 if (flow_pay->nfp_tun_ipv6)
1151 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1152
1153 if (!flow_pay->in_hw) {
1154 err = 0;
1155 goto err_free_merge_flow;
1156 }
1157
400a5e5f
LP
1158 err = nfp_flower_xmit_flow(app, flow_pay,
1159 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1160
453cdc30
LP
1161err_free_merge_flow:
1162 nfp_flower_del_linked_merge_flows(app, flow_pay);
1163 if (port)
1164 port->tc_offload_cnt--;
1165 kfree(flow_pay->action_data);
1166 kfree(flow_pay->mask_data);
1167 kfree(flow_pay->unmasked_data);
1168 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1169 &flow_pay->fl_node,
1170 nfp_flower_table_params));
1171 kfree_rcu(flow_pay, rcu);
1172 return err;
a6ffdd3a
LP
1173}
1174
1175static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
1176 struct nfp_fl_ct_flow_entry *nft_entry,
1177 struct nfp_fl_ct_tc_merge *tc_m_entry)
1178{
1179 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
1180 struct nfp_fl_nft_tc_merge *nft_m_entry;
1181 unsigned long new_cookie[3];
1182 int err;
1183
1184 pre_ct_entry = tc_m_entry->pre_ct_parent;
1185 post_ct_entry = tc_m_entry->post_ct_parent;
1186
1187 err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
1188 if (err)
1189 return err;
1190
1191 /* Check that the two tc flows are also compatible with
1192 * the nft entry. No need to check the pre_ct and post_ct
1193 * entries as that was already done during pre_merge.
7195464c 1194 * The nft entry does not have a chain populated, so
a6ffdd3a
LP
1195 * skip this check.
1196 */
1197 err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
1198 if (err)
1199 return err;
e43d940f 1200 err = nfp_ct_merge_check(nft_entry, post_ct_entry);
a6ffdd3a
LP
1201 if (err)
1202 return err;
1203 err = nfp_ct_check_meta(post_ct_entry, nft_entry);
1204 if (err)
1205 return err;
1206
a87ceb3d
WJ
1207 if (pre_ct_entry->num_prev_m_entries > 0) {
1208 err = nfp_ct_merge_extra_check(nft_entry, tc_m_entry);
1209 if (err)
1210 return err;
1211 }
1212
a6ffdd3a
LP
1213 /* Combine tc_merge and nft cookies for this cookie. */
1214 new_cookie[0] = tc_m_entry->cookie[0];
1215 new_cookie[1] = tc_m_entry->cookie[1];
1216 new_cookie[2] = nft_entry->cookie;
1217 nft_m_entry = get_hashentry(&zt->nft_merge_tb,
1218 &new_cookie,
1219 nfp_nft_ct_merge_params,
1220 sizeof(*nft_m_entry));
1221
1222 if (IS_ERR(nft_m_entry))
1223 return PTR_ERR(nft_m_entry);
1224
1225 /* nft_m_entry already present, not merging again */
1226 if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
1227 return 0;
1228
1229 memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
1230 nft_m_entry->zt = zt;
1231 nft_m_entry->tc_m_parent = tc_m_entry;
1232 nft_m_entry->nft_parent = nft_entry;
1233 nft_m_entry->tc_flower_cookie = 0;
9bacb93b 1234 /* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
a6ffdd3a
LP
1235 * it only combined them if the netdevs were the same, so can use any of them.
1236 */
1237 nft_m_entry->netdev = pre_ct_entry->netdev;
1238
1239 /* Add this entry to the tc_m_list and nft_flow lists */
1240 list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
1241 list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
1242
a6ffdd3a
LP
1243 err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
1244 nfp_nft_ct_merge_params);
1245 if (err)
1246 goto err_nft_ct_merge_insert;
1247
1248 zt->nft_merge_count++;
1249
a87ceb3d
WJ
1250 if (post_ct_entry->goto_chain_index > 0)
1251 return nfp_fl_create_new_pre_ct(nft_m_entry);
1252
1253 /* Generate offload structure and send to nfp */
1254 err = nfp_fl_ct_add_offload(nft_m_entry);
1255 if (err)
1256 goto err_nft_ct_offload;
1257
a6ffdd3a
LP
1258 return err;
1259
a87ceb3d 1260err_nft_ct_offload:
a6ffdd3a
LP
1261 nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
1262 nft_m_entry->netdev);
a87ceb3d 1263err_nft_ct_merge_insert:
a6ffdd3a
LP
1264 list_del(&nft_m_entry->tc_merge_list);
1265 list_del(&nft_m_entry->nft_flow_list);
1266 kfree(nft_m_entry);
1267 return err;
1268}
1269
3c863c30
LP
1270static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
1271 struct nfp_fl_ct_flow_entry *ct_entry1,
1272 struct nfp_fl_ct_flow_entry *ct_entry2)
1273{
1274 struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
a6ffdd3a 1275 struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
3c863c30
LP
1276 struct nfp_fl_ct_tc_merge *m_entry;
1277 unsigned long new_cookie[2];
1278 int err;
1279
1280 if (ct_entry1->type == CT_TYPE_PRE_CT) {
1281 pre_ct_entry = ct_entry1;
1282 post_ct_entry = ct_entry2;
1283 } else {
1284 post_ct_entry = ct_entry1;
1285 pre_ct_entry = ct_entry2;
1286 }
1287
3c863c30
LP
1288 /* Checks that the chain_index of the filter matches the
1289 * chain_index of the GOTO action.
1290 */
3e44d199 1291 if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index)
3c863c30
LP
1292 return -EINVAL;
1293
e43d940f 1294 err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
3c863c30
LP
1295 if (err)
1296 return err;
1297
1298 new_cookie[0] = pre_ct_entry->cookie;
1299 new_cookie[1] = post_ct_entry->cookie;
1300 m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
1301 nfp_tc_ct_merge_params, sizeof(*m_entry));
1302 if (IS_ERR(m_entry))
1303 return PTR_ERR(m_entry);
1304
1305 /* m_entry already present, not merging again */
1306 if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
1307 return 0;
1308
1309 memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
1310 m_entry->zt = zt;
1311 m_entry->post_ct_parent = post_ct_entry;
1312 m_entry->pre_ct_parent = pre_ct_entry;
1313
1314 /* Add this entry to the pre_ct and post_ct lists */
1315 list_add(&m_entry->post_ct_list, &post_ct_entry->children);
1316 list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
1317 INIT_LIST_HEAD(&m_entry->children);
1318
1319 err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
1320 nfp_tc_ct_merge_params);
1321 if (err)
1322 goto err_ct_tc_merge_insert;
1323 zt->tc_merge_count++;
1324
a6ffdd3a
LP
1325 /* Merge with existing nft flows */
1326 list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
1327 list_node) {
1328 nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
1329 }
1330
3c863c30
LP
1331 return 0;
1332
1333err_ct_tc_merge_insert:
1334 list_del(&m_entry->post_ct_list);
1335 list_del(&m_entry->pre_ct_list);
1336 kfree(m_entry);
1337 return err;
1338}
1339
bd0fe7f9
LP
1340static struct
1341nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
1342 u16 zone, bool wildcarded)
1343{
1344 struct nfp_fl_ct_zone_entry *zt;
1345 int err;
1346
1347 if (wildcarded && priv->ct_zone_wc)
1348 return priv->ct_zone_wc;
1349
1350 if (!wildcarded) {
1351 zt = get_hashentry(&priv->ct_zone_table, &zone,
1352 nfp_zone_table_params, sizeof(*zt));
1353
1354 /* If priv is set this is an existing entry, just return it */
1355 if (IS_ERR(zt) || zt->priv)
1356 return zt;
1357 } else {
1358 zt = kzalloc(sizeof(*zt), GFP_KERNEL);
1359 if (!zt)
1360 return ERR_PTR(-ENOMEM);
1361 }
1362
1363 zt->zone = zone;
1364 zt->priv = priv;
1365 zt->nft = NULL;
1366
9bacb93b 1367 /* init the various hash tables and lists */
072c089c
LP
1368 INIT_LIST_HEAD(&zt->pre_ct_list);
1369 INIT_LIST_HEAD(&zt->post_ct_list);
95255017 1370 INIT_LIST_HEAD(&zt->nft_flows_list);
072c089c 1371
f7ae12e2
LP
1372 err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
1373 if (err)
1374 goto err_tc_merge_tb_init;
1375
b5e30c61
LP
1376 err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
1377 if (err)
1378 goto err_nft_merge_tb_init;
1379
bd0fe7f9
LP
1380 if (wildcarded) {
1381 priv->ct_zone_wc = zt;
1382 } else {
1383 err = rhashtable_insert_fast(&priv->ct_zone_table,
1384 &zt->hash_node,
1385 nfp_zone_table_params);
1386 if (err)
1387 goto err_zone_insert;
1388 }
1389
1390 return zt;
1391
1392err_zone_insert:
b5e30c61
LP
1393 rhashtable_destroy(&zt->nft_merge_tb);
1394err_nft_merge_tb_init:
f7ae12e2
LP
1395 rhashtable_destroy(&zt->tc_merge_tb);
1396err_tc_merge_tb_init:
bd0fe7f9
LP
1397 kfree(zt);
1398 return ERR_PTR(err);
1399}
1400
7195464c
YZ
1401static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
1402{
1403 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
1404 struct flow_match_meta match;
1405
1406 flow_rule_match_meta(rule, &match);
1407 if (match.key->ingress_ifindex & match.mask->ingress_ifindex)
1408 return __dev_get_by_index(&init_net,
1409 match.key->ingress_ifindex);
1410 }
1411
1412 return NULL;
1413}
1414
5cee92c6
HZ
1415static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
1416{
1417 if (mangle_action->id != FLOW_ACTION_MANGLE)
1418 return;
1419
1420 switch (mangle_action->mangle.htype) {
1421 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1422 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
1423 mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
1424 mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
1425 return;
1426
1427 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1428 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1429 mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
1430 mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
1431 return;
1432
1433 default:
1434 return;
1435 }
1436}
1437
1438static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
1439 struct nfp_fl_ct_flow_entry *entry)
1440{
1441 switch (act->id) {
1442 case FLOW_ACTION_CT:
1443 if (act->ct.action == TCA_CT_ACT_NAT)
1444 entry->flags |= NFP_FL_ACTION_DO_NAT;
1445 break;
1446
1447 case FLOW_ACTION_MANGLE:
1448 entry->flags |= NFP_FL_ACTION_DO_MANGLE;
1449 break;
1450
1451 default:
1452 break;
1453 }
1454
1455 return 0;
1456}
1457
072c089c
LP
1458static struct
1459nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
1460 struct net_device *netdev,
fa81d6d2 1461 struct flow_cls_offload *flow,
4772ad3f 1462 bool is_nft, struct netlink_ext_ack *extack)
072c089c 1463{
4772ad3f 1464 struct nf_flow_match *nft_match = NULL;
072c089c 1465 struct nfp_fl_ct_flow_entry *entry;
fa81d6d2 1466 struct nfp_fl_ct_map_entry *map;
072c089c
LP
1467 struct flow_action_entry *act;
1468 int err, i;
1469
1470 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1471 if (!entry)
1472 return ERR_PTR(-ENOMEM);
1473
072c089c
LP
1474 entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
1475 if (!entry->rule) {
1476 err = -ENOMEM;
4772ad3f 1477 goto err_pre_ct_rule;
072c089c 1478 }
4772ad3f
YZ
1479
1480 /* nft flows gets destroyed after callback return, so need
1481 * to do a full copy instead of just a reference.
1482 */
1483 if (is_nft) {
1484 nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
1485 if (!nft_match) {
1486 err = -ENOMEM;
1487 goto err_pre_ct_act;
1488 }
1489 memcpy(&nft_match->dissector, flow->rule->match.dissector,
1490 sizeof(nft_match->dissector));
1491 memcpy(&nft_match->mask, flow->rule->match.mask,
1492 sizeof(nft_match->mask));
1493 memcpy(&nft_match->key, flow->rule->match.key,
1494 sizeof(nft_match->key));
1495 entry->rule->match.dissector = &nft_match->dissector;
1496 entry->rule->match.mask = &nft_match->mask;
1497 entry->rule->match.key = &nft_match->key;
7195464c
YZ
1498
1499 if (!netdev)
1500 netdev = get_netdev_from_rule(entry->rule);
4772ad3f
YZ
1501 } else {
1502 entry->rule->match.dissector = flow->rule->match.dissector;
1503 entry->rule->match.mask = flow->rule->match.mask;
1504 entry->rule->match.key = flow->rule->match.key;
1505 }
1506
1507 entry->zt = zt;
1508 entry->netdev = netdev;
a87ceb3d 1509 entry->cookie = flow->cookie > 0 ? flow->cookie : (unsigned long)entry;
072c089c
LP
1510 entry->chain_index = flow->common.chain_index;
1511 entry->tun_offset = NFP_FL_CT_NO_TUN;
1512
1513 /* Copy over action data. Unfortunately we do not get a handle to the
1514 * original tcf_action data, and the flow objects gets destroyed, so we
1515 * cannot just save a pointer to this either, so need to copy over the
1516 * data unfortunately.
1517 */
1518 entry->rule->action.num_entries = flow->rule->action.num_entries;
1519 flow_action_for_each(i, act, &flow->rule->action) {
1520 struct flow_action_entry *new_act;
1521
1522 new_act = &entry->rule->action.entries[i];
1523 memcpy(new_act, act, sizeof(struct flow_action_entry));
5cee92c6
HZ
1524 /* nft entry mangle field is host byte order, need translate to
1525 * network byte order.
1526 */
1527 if (is_nft)
1528 nfp_nft_ct_translate_mangle_action(new_act);
1529
1530 nfp_nft_ct_set_flow_flag(new_act, entry);
072c089c
LP
1531 /* Entunnel is a special case, need to allocate and copy
1532 * tunnel info.
1533 */
1534 if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
1535 struct ip_tunnel_info *tun = act->tunnel;
1536 size_t tun_size = sizeof(*tun) + tun->options_len;
1537
1538 new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
1539 if (!new_act->tunnel) {
1540 err = -ENOMEM;
1541 goto err_pre_ct_tun_cp;
1542 }
1543 entry->tun_offset = i;
1544 }
1545 }
1546
1547 INIT_LIST_HEAD(&entry->children);
1548
a87ceb3d
WJ
1549 if (flow->cookie == 0)
1550 return entry;
1551
fa81d6d2
LP
1552 /* Now add a ct map entry to flower-priv */
1553 map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
1554 nfp_ct_map_params, sizeof(*map));
1555 if (IS_ERR(map)) {
1556 NL_SET_ERR_MSG_MOD(extack,
1557 "offload error: ct map entry creation failed");
1558 err = -ENOMEM;
1559 goto err_ct_flow_insert;
1560 }
1561 map->cookie = flow->cookie;
1562 map->ct_entry = entry;
1563 err = rhashtable_insert_fast(&zt->priv->ct_map_table,
1564 &map->hash_node,
1565 nfp_ct_map_params);
1566 if (err) {
1567 NL_SET_ERR_MSG_MOD(extack,
1568 "offload error: ct map entry table add failed");
1569 goto err_map_insert;
1570 }
072c089c
LP
1571
1572 return entry;
1573
fa81d6d2
LP
1574err_map_insert:
1575 kfree(map);
1576err_ct_flow_insert:
1577 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1578 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
072c089c 1579err_pre_ct_tun_cp:
4772ad3f 1580 kfree(nft_match);
072c089c 1581err_pre_ct_act:
4772ad3f
YZ
1582 kfree(entry->rule);
1583err_pre_ct_rule:
072c089c
LP
1584 kfree(entry);
1585 return ERR_PTR(err);
1586}
1587
a6ffdd3a
LP
1588static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
1589{
1590 struct nfp_fl_ct_zone_entry *zt;
1591 int err;
1592
1593 zt = m_entry->zt;
1594
1595 /* Flow is in HW, need to delete */
1596 if (m_entry->tc_flower_cookie) {
1597 err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
1598 m_entry->netdev);
1599 if (err)
1600 return;
1601 }
1602
1603 WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
1604 &m_entry->hash_node,
1605 nfp_nft_ct_merge_params));
1606 zt->nft_merge_count--;
1607 list_del(&m_entry->tc_merge_list);
1608 list_del(&m_entry->nft_flow_list);
1609
a87ceb3d
WJ
1610 if (m_entry->next_pre_ct_entry) {
1611 struct nfp_fl_ct_map_entry pre_ct_map_ent;
1612
1613 pre_ct_map_ent.ct_entry = m_entry->next_pre_ct_entry;
1614 pre_ct_map_ent.cookie = 0;
1615 nfp_fl_ct_del_flow(&pre_ct_map_ent);
1616 }
1617
a6ffdd3a
LP
1618 kfree(m_entry);
1619}
1620
3c863c30 1621static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
072c089c 1622{
a6ffdd3a
LP
1623 struct nfp_fl_nft_tc_merge *m_entry, *tmp;
1624
1625 /* These post entries are parts of two lists, one is a list of nft_entries
1626 * and the other is of from a list of tc_merge structures. Iterate
1627 * through the relevant list and cleanup the entries.
1628 */
1629
1630 if (is_nft_flow) {
9bacb93b 1631 /* Need to iterate through list of nft_flow entries */
a6ffdd3a
LP
1632 struct nfp_fl_ct_flow_entry *ct_entry = entry;
1633
1634 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1635 nft_flow_list) {
1636 cleanup_nft_merge_entry(m_entry);
1637 }
1638 } else {
9bacb93b 1639 /* Need to iterate through list of tc_merged_flow entries */
a6ffdd3a
LP
1640 struct nfp_fl_ct_tc_merge *ct_entry = entry;
1641
1642 list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1643 tc_merge_list) {
1644 cleanup_nft_merge_entry(m_entry);
1645 }
1646 }
072c089c
LP
1647}
1648
3c863c30
LP
1649static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
1650{
1651 struct nfp_fl_ct_zone_entry *zt;
1652 int err;
1653
1654 zt = m_ent->zt;
1655 err = rhashtable_remove_fast(&zt->tc_merge_tb,
1656 &m_ent->hash_node,
1657 nfp_tc_ct_merge_params);
1658 if (err)
1659 pr_warn("WARNING: could not remove merge_entry from hashtable\n");
1660 zt->tc_merge_count--;
1661 list_del(&m_ent->post_ct_list);
1662 list_del(&m_ent->pre_ct_list);
1663
1664 if (!list_empty(&m_ent->children))
1665 nfp_free_nft_merge_children(m_ent, false);
1666 kfree(m_ent);
1667}
1668
1669static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
072c089c 1670{
3c863c30
LP
1671 struct nfp_fl_ct_tc_merge *m_ent, *tmp;
1672
1673 switch (entry->type) {
1674 case CT_TYPE_PRE_CT:
1675 list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
1676 nfp_del_tc_merge_entry(m_ent);
1677 }
1678 break;
1679 case CT_TYPE_POST_CT:
1680 list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
1681 nfp_del_tc_merge_entry(m_ent);
1682 }
1683 break;
1684 default:
1685 break;
1686 }
072c089c
LP
1687}
1688
1689void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
1690{
1691 list_del(&entry->list_node);
1692
1693 if (!list_empty(&entry->children)) {
1694 if (entry->type == CT_TYPE_NFT)
1695 nfp_free_nft_merge_children(entry, true);
1696 else
1697 nfp_free_tc_merge_children(entry);
1698 }
1699
1700 if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1701 kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
4772ad3f
YZ
1702
1703 if (entry->type == CT_TYPE_NFT) {
1704 struct nf_flow_match *nft_match;
1705
1706 nft_match = container_of(entry->rule->match.dissector,
1707 struct nf_flow_match, dissector);
1708 kfree(nft_match);
1709 }
1710
072c089c
LP
1711 kfree(entry->rule);
1712 kfree(entry);
1713}
1714
8a8db7ae
WJ
1715static struct flow_action_entry *get_flow_act_ct(struct flow_rule *rule)
1716{
1717 struct flow_action_entry *act;
1718 int i;
1719
1720 /* More than one ct action may be present in a flow rule,
1721 * Return the first one that is not a CT clear action
1722 */
1723 flow_action_for_each(i, act, &rule->action) {
1724 if (act->id == FLOW_ACTION_CT && act->ct.action != TCA_CT_ACT_CLEAR)
1725 return act;
1726 }
1727
1728 return NULL;
1729}
1730
5e5f0816 1731static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
bd0fe7f9
LP
1732 enum flow_action_id act_id)
1733{
1734 struct flow_action_entry *act = NULL;
1735 int i;
1736
5e5f0816 1737 flow_action_for_each(i, act, &rule->action) {
bd0fe7f9
LP
1738 if (act->id == act_id)
1739 return act;
1740 }
1741 return NULL;
1742}
1743
3c863c30
LP
1744static void
1745nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
1746 struct nfp_fl_ct_zone_entry *zt_src,
1747 struct nfp_fl_ct_zone_entry *zt_dst)
1748{
1749 struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
1750 struct list_head *ct_list;
1751
1752 if (ct_entry1->type == CT_TYPE_PRE_CT)
1753 ct_list = &zt_src->post_ct_list;
1754 else if (ct_entry1->type == CT_TYPE_POST_CT)
1755 ct_list = &zt_src->pre_ct_list;
1756 else
1757 return;
1758
1759 list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
1760 list_node) {
1761 nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
1762 }
1763}
1764
a6ffdd3a
LP
1765static void
1766nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
1767 struct nfp_fl_ct_zone_entry *zt)
1768{
1769 struct nfp_fl_ct_tc_merge *tc_merge_entry;
1770 struct rhashtable_iter iter;
1771
1772 rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
1773 rhashtable_walk_start(&iter);
1774 while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
1775 if (IS_ERR(tc_merge_entry))
1776 continue;
1777 rhashtable_walk_stop(&iter);
1778 nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
1779 rhashtable_walk_start(&iter);
1780 }
1781 rhashtable_walk_stop(&iter);
1782 rhashtable_walk_exit(&iter);
1783}
1784
c8b034fb
LP
1785int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
1786 struct net_device *netdev,
1787 struct flow_cls_offload *flow,
a87ceb3d
WJ
1788 struct netlink_ext_ack *extack,
1789 struct nfp_fl_nft_tc_merge *m_entry)
c8b034fb 1790{
072c089c
LP
1791 struct flow_action_entry *ct_act, *ct_goto;
1792 struct nfp_fl_ct_flow_entry *ct_entry;
bd0fe7f9 1793 struct nfp_fl_ct_zone_entry *zt;
62268e78 1794 int err;
bd0fe7f9 1795
8a8db7ae 1796 ct_act = get_flow_act_ct(flow->rule);
bd0fe7f9
LP
1797 if (!ct_act) {
1798 NL_SET_ERR_MSG_MOD(extack,
1799 "unsupported offload: Conntrack action empty in conntrack offload");
1800 return -EOPNOTSUPP;
1801 }
1802
5e5f0816 1803 ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
072c089c
LP
1804 if (!ct_goto) {
1805 NL_SET_ERR_MSG_MOD(extack,
1806 "unsupported offload: Conntrack requires ACTION_GOTO");
1807 return -EOPNOTSUPP;
1808 }
1809
bd0fe7f9
LP
1810 zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
1811 if (IS_ERR(zt)) {
1812 NL_SET_ERR_MSG_MOD(extack,
1813 "offload error: Could not create zone table entry");
1814 return PTR_ERR(zt);
1815 }
1816
62268e78 1817 if (!zt->nft) {
bd0fe7f9 1818 zt->nft = ct_act->ct.flow_table;
62268e78
LP
1819 err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
1820 if (err) {
1821 NL_SET_ERR_MSG_MOD(extack,
1822 "offload error: Could not register nft_callback");
1823 return err;
1824 }
1825 }
bd0fe7f9 1826
072c089c 1827 /* Add entry to pre_ct_list */
4772ad3f 1828 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
072c089c
LP
1829 if (IS_ERR(ct_entry))
1830 return PTR_ERR(ct_entry);
1831 ct_entry->type = CT_TYPE_PRE_CT;
3e44d199
WJ
1832 ct_entry->chain_index = flow->common.chain_index;
1833 ct_entry->goto_chain_index = ct_goto->chain_index;
a87ceb3d
WJ
1834
1835 if (m_entry) {
1836 struct nfp_fl_ct_flow_entry *pre_ct_entry;
1837 int i;
1838
1839 pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
1840 for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++)
1841 ct_entry->prev_m_entries[i] = pre_ct_entry->prev_m_entries[i];
1842 ct_entry->prev_m_entries[i++] = m_entry;
1843 ct_entry->num_prev_m_entries = i;
1844
1845 m_entry->next_pre_ct_entry = ct_entry;
1846 }
1847
072c089c
LP
1848 list_add(&ct_entry->list_node, &zt->pre_ct_list);
1849 zt->pre_ct_count++;
1850
3c863c30
LP
1851 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1852
1853 /* Need to check and merge with tables in the wc_zone as well */
1854 if (priv->ct_zone_wc)
1855 nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
1856
d33d24a7 1857 return 0;
c8b034fb
LP
1858}
1859
1860int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
1861 struct net_device *netdev,
1862 struct flow_cls_offload *flow,
1863 struct netlink_ext_ack *extack)
1864{
bd0fe7f9 1865 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
072c089c 1866 struct nfp_fl_ct_flow_entry *ct_entry;
bd0fe7f9
LP
1867 struct nfp_fl_ct_zone_entry *zt;
1868 bool wildcarded = false;
1869 struct flow_match_ct ct;
3e44d199 1870 struct flow_action_entry *ct_goto;
bd0fe7f9
LP
1871
1872 flow_rule_match_ct(rule, &ct);
1873 if (!ct.mask->ct_zone) {
1874 wildcarded = true;
1875 } else if (ct.mask->ct_zone != U16_MAX) {
1876 NL_SET_ERR_MSG_MOD(extack,
1877 "unsupported offload: partially wildcarded ct_zone is not supported");
1878 return -EOPNOTSUPP;
1879 }
1880
1881 zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
1882 if (IS_ERR(zt)) {
1883 NL_SET_ERR_MSG_MOD(extack,
1884 "offload error: Could not create zone table entry");
1885 return PTR_ERR(zt);
1886 }
1887
072c089c 1888 /* Add entry to post_ct_list */
4772ad3f 1889 ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
072c089c
LP
1890 if (IS_ERR(ct_entry))
1891 return PTR_ERR(ct_entry);
1892
1893 ct_entry->type = CT_TYPE_POST_CT;
1894 ct_entry->chain_index = flow->common.chain_index;
3e44d199
WJ
1895 ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1896 ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0;
072c089c
LP
1897 list_add(&ct_entry->list_node, &zt->post_ct_list);
1898 zt->post_ct_count++;
1899
3c863c30
LP
1900 if (wildcarded) {
1901 /* Iterate through all zone tables if not empty, look for merges with
1902 * pre_ct entries and merge them.
1903 */
1904 struct rhashtable_iter iter;
1905 struct nfp_fl_ct_zone_entry *zone_table;
1906
1907 rhashtable_walk_enter(&priv->ct_zone_table, &iter);
1908 rhashtable_walk_start(&iter);
1909 while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
1910 if (IS_ERR(zone_table))
1911 continue;
1912 rhashtable_walk_stop(&iter);
1913 nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
1914 rhashtable_walk_start(&iter);
1915 }
1916 rhashtable_walk_stop(&iter);
1917 rhashtable_walk_exit(&iter);
1918 } else {
1919 nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1920 }
1921
d33d24a7
LP
1922 return 0;
1923}
1924
a87ceb3d
WJ
1925int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry)
1926{
1927 struct nfp_fl_ct_flow_entry *pre_ct_entry, *post_ct_entry;
1928 struct flow_cls_offload new_pre_ct_flow;
1929 int err;
1930
1931 pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
1932 if (pre_ct_entry->num_prev_m_entries >= NFP_MAX_RECIRC_CT_ZONES - 1)
1933 return -1;
1934
1935 post_ct_entry = m_entry->tc_m_parent->post_ct_parent;
1936 memset(&new_pre_ct_flow, 0, sizeof(struct flow_cls_offload));
1937 new_pre_ct_flow.rule = post_ct_entry->rule;
1938 new_pre_ct_flow.common.chain_index = post_ct_entry->chain_index;
1939
1940 err = nfp_fl_ct_handle_pre_ct(pre_ct_entry->zt->priv,
1941 pre_ct_entry->netdev,
1942 &new_pre_ct_flow, NULL,
1943 m_entry);
1944 return err;
1945}
1946
40c10bd9
LP
1947static void
1948nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
1949 enum ct_entry_type type, u64 *m_pkts,
1950 u64 *m_bytes, u64 *m_used)
1951{
1952 struct nfp_flower_priv *priv = nft_merge->zt->priv;
1953 struct nfp_fl_payload *nfp_flow;
1954 u32 ctx_id;
1955
1956 nfp_flow = nft_merge->flow_pay;
1957 if (!nfp_flow)
1958 return;
1959
1960 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1961 *m_pkts += priv->stats[ctx_id].pkts;
1962 *m_bytes += priv->stats[ctx_id].bytes;
1963 *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
1964
1965 /* If request is for a sub_flow which is part of a tunnel merged
1966 * flow then update stats from tunnel merged flows first.
1967 */
1968 if (!list_empty(&nfp_flow->linked_flows))
1969 nfp_flower_update_merge_stats(priv->app, nfp_flow);
1970
1971 if (type != CT_TYPE_NFT) {
1972 /* Update nft cached stats */
1973 flow_stats_update(&nft_merge->nft_parent->stats,
1974 priv->stats[ctx_id].bytes,
1975 priv->stats[ctx_id].pkts,
1976 0, priv->stats[ctx_id].used,
1977 FLOW_ACTION_HW_STATS_DELAYED);
1978 } else {
1979 /* Update pre_ct cached stats */
1980 flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
1981 priv->stats[ctx_id].bytes,
1982 priv->stats[ctx_id].pkts,
1983 0, priv->stats[ctx_id].used,
1984 FLOW_ACTION_HW_STATS_DELAYED);
1985 /* Update post_ct cached stats */
1986 flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
1987 priv->stats[ctx_id].bytes,
1988 priv->stats[ctx_id].pkts,
1989 0, priv->stats[ctx_id].used,
1990 FLOW_ACTION_HW_STATS_DELAYED);
1991 }
a87ceb3d
WJ
1992
1993 /* Update previous pre_ct/post_ct/nft flow stats */
1994 if (nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries > 0) {
1995 struct nfp_fl_nft_tc_merge *tmp_nft_merge;
1996 int i;
1997
1998 for (i = 0; i < nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries; i++) {
1999 tmp_nft_merge = nft_merge->tc_m_parent->pre_ct_parent->prev_m_entries[i];
2000 flow_stats_update(&tmp_nft_merge->tc_m_parent->pre_ct_parent->stats,
2001 priv->stats[ctx_id].bytes,
2002 priv->stats[ctx_id].pkts,
2003 0, priv->stats[ctx_id].used,
2004 FLOW_ACTION_HW_STATS_DELAYED);
2005 flow_stats_update(&tmp_nft_merge->tc_m_parent->post_ct_parent->stats,
2006 priv->stats[ctx_id].bytes,
2007 priv->stats[ctx_id].pkts,
2008 0, priv->stats[ctx_id].used,
2009 FLOW_ACTION_HW_STATS_DELAYED);
2010 flow_stats_update(&tmp_nft_merge->nft_parent->stats,
2011 priv->stats[ctx_id].bytes,
2012 priv->stats[ctx_id].pkts,
2013 0, priv->stats[ctx_id].used,
2014 FLOW_ACTION_HW_STATS_DELAYED);
2015 }
2016 }
2017
40c10bd9
LP
2018 /* Reset stats from the nfp */
2019 priv->stats[ctx_id].pkts = 0;
2020 priv->stats[ctx_id].bytes = 0;
2021}
2022
2023int nfp_fl_ct_stats(struct flow_cls_offload *flow,
2024 struct nfp_fl_ct_map_entry *ct_map_ent)
2025{
2026 struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
2027 struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
2028 struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
2029
2030 u64 pkts = 0, bytes = 0, used = 0;
2031 u64 m_pkts, m_bytes, m_used;
2032
2033 spin_lock_bh(&ct_entry->zt->priv->stats_lock);
2034
2035 if (ct_entry->type == CT_TYPE_PRE_CT) {
2036 /* Iterate tc_merge entries associated with this flow */
2037 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
2038 pre_ct_list) {
2039 m_pkts = 0;
2040 m_bytes = 0;
2041 m_used = 0;
2042 /* Iterate nft_merge entries associated with this tc_merge flow */
2043 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
2044 tc_merge_list) {
2045 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
2046 &m_pkts, &m_bytes, &m_used);
2047 }
2048 pkts += m_pkts;
2049 bytes += m_bytes;
2050 used = max_t(u64, used, m_used);
2051 /* Update post_ct partner */
2052 flow_stats_update(&tc_merge->post_ct_parent->stats,
2053 m_bytes, m_pkts, 0, m_used,
2054 FLOW_ACTION_HW_STATS_DELAYED);
2055 }
2056 } else if (ct_entry->type == CT_TYPE_POST_CT) {
2057 /* Iterate tc_merge entries associated with this flow */
2058 list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
2059 post_ct_list) {
2060 m_pkts = 0;
2061 m_bytes = 0;
2062 m_used = 0;
2063 /* Iterate nft_merge entries associated with this tc_merge flow */
2064 list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
2065 tc_merge_list) {
2066 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
2067 &m_pkts, &m_bytes, &m_used);
2068 }
2069 pkts += m_pkts;
2070 bytes += m_bytes;
2071 used = max_t(u64, used, m_used);
2072 /* Update pre_ct partner */
2073 flow_stats_update(&tc_merge->pre_ct_parent->stats,
2074 m_bytes, m_pkts, 0, m_used,
2075 FLOW_ACTION_HW_STATS_DELAYED);
2076 }
2077 } else {
2078 /* Iterate nft_merge entries associated with this nft flow */
2079 list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
2080 nft_flow_list) {
2081 nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
2082 &pkts, &bytes, &used);
2083 }
2084 }
2085
2086 /* Add stats from this request to stats potentially cached by
2087 * previous requests.
2088 */
2089 flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
2090 FLOW_ACTION_HW_STATS_DELAYED);
2091 /* Finally update the flow stats from the original stats request */
2092 flow_stats_update(&flow->stats, ct_entry->stats.bytes,
2093 ct_entry->stats.pkts, 0,
2094 ct_entry->stats.lastused,
2095 FLOW_ACTION_HW_STATS_DELAYED);
2096 /* Stats has been synced to original flow, can now clear
2097 * the cache.
2098 */
2099 ct_entry->stats.pkts = 0;
2100 ct_entry->stats.bytes = 0;
2101 spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
2102
2103 return 0;
2104}
2105
29744a10
VB
2106static bool
2107nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
2108{
2109 struct flow_rule *flow_rule = flow->rule;
2110 struct flow_action *flow_action =
2111 &flow_rule->action;
2112 struct flow_action_entry *act;
2113 int i;
2114
2115 flow_action_for_each(i, act, flow_action) {
2116 if (act->id == FLOW_ACTION_CT_METADATA) {
2117 enum ip_conntrack_info ctinfo =
2118 act->ct_metadata.cookie & NFCT_INFOMASK;
2119
2120 return ctinfo != IP_CT_NEW;
2121 }
2122 }
2123
2124 return false;
2125}
2126
62268e78
LP
2127static int
2128nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
2129{
95255017
LP
2130 struct nfp_fl_ct_map_entry *ct_map_ent;
2131 struct nfp_fl_ct_flow_entry *ct_entry;
2132 struct netlink_ext_ack *extack = NULL;
2133
95255017 2134 extack = flow->common.extack;
62268e78
LP
2135 switch (flow->command) {
2136 case FLOW_CLS_REPLACE:
29744a10
VB
2137 if (!nfp_fl_ct_offload_nft_supported(flow))
2138 return -EOPNOTSUPP;
2139
95255017
LP
2140 /* Netfilter can request offload multiple times for the same
2141 * flow - protect against adding duplicates.
2142 */
2143 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2144 nfp_ct_map_params);
2145 if (!ct_map_ent) {
4772ad3f 2146 ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
43c9a811
DC
2147 if (IS_ERR(ct_entry))
2148 return PTR_ERR(ct_entry);
95255017
LP
2149 ct_entry->type = CT_TYPE_NFT;
2150 list_add(&ct_entry->list_node, &zt->nft_flows_list);
2151 zt->nft_flows_count++;
a6ffdd3a 2152 nfp_ct_merge_nft_with_tc(ct_entry, zt);
95255017 2153 }
62268e78
LP
2154 return 0;
2155 case FLOW_CLS_DESTROY:
95255017
LP
2156 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2157 nfp_ct_map_params);
2158 return nfp_fl_ct_del_flow(ct_map_ent);
62268e78 2159 case FLOW_CLS_STATS:
40c10bd9
LP
2160 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2161 nfp_ct_map_params);
2162 if (ct_map_ent)
2163 return nfp_fl_ct_stats(flow, ct_map_ent);
2164 break;
62268e78
LP
2165 default:
2166 break;
2167 }
2168 return -EINVAL;
2169}
2170
2171int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
2172{
2173 struct flow_cls_offload *flow = type_data;
2174 struct nfp_fl_ct_zone_entry *zt = cb_priv;
2175 int err = -EOPNOTSUPP;
2176
2177 switch (type) {
2178 case TC_SETUP_CLSFLOWER:
14690995
YL
2179 while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
2180 if (!zt->nft) /* avoid deadlock */
2181 return err;
2182 msleep(20);
2183 }
62268e78 2184 err = nfp_fl_ct_offload_nft_flow(zt, flow);
14690995 2185 mutex_unlock(&zt->priv->nfp_fl_lock);
62268e78
LP
2186 break;
2187 default:
2188 return -EOPNOTSUPP;
2189 }
2190 return err;
2191}
2192
95255017
LP
2193static void
2194nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
2195{
2196 struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
2197 struct nfp_fl_ct_map_entry *ct_map_ent;
2198
2199 list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
2200 list_node) {
2201 ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
2202 &nft_entry->cookie,
2203 nfp_ct_map_params);
2204 nfp_fl_ct_del_flow(ct_map_ent);
2205 }
2206}
2207
d33d24a7
LP
2208int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
2209{
2210 struct nfp_fl_ct_flow_entry *ct_entry;
2211 struct nfp_fl_ct_zone_entry *zt;
2212 struct rhashtable *m_table;
14690995 2213 struct nf_flowtable *nft;
d33d24a7 2214
95255017
LP
2215 if (!ct_map_ent)
2216 return -ENOENT;
2217
d33d24a7
LP
2218 zt = ct_map_ent->ct_entry->zt;
2219 ct_entry = ct_map_ent->ct_entry;
2220 m_table = &zt->priv->ct_map_table;
2221
2222 switch (ct_entry->type) {
2223 case CT_TYPE_PRE_CT:
2224 zt->pre_ct_count--;
a87ceb3d
WJ
2225 if (ct_map_ent->cookie > 0)
2226 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2227 nfp_ct_map_params);
d33d24a7 2228 nfp_fl_ct_clean_flow_entry(ct_entry);
a87ceb3d
WJ
2229 if (ct_map_ent->cookie > 0)
2230 kfree(ct_map_ent);
62268e78 2231
14690995
YL
2232 if (!zt->pre_ct_count && zt->nft) {
2233 nft = zt->nft;
2234 zt->nft = NULL; /* avoid deadlock */
2235 nf_flow_table_offload_del_cb(nft,
2236 nfp_fl_ct_handle_nft_flow,
2237 zt);
95255017 2238 nfp_fl_ct_clean_nft_entries(zt);
62268e78 2239 }
d33d24a7
LP
2240 break;
2241 case CT_TYPE_POST_CT:
2242 zt->post_ct_count--;
2243 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2244 nfp_ct_map_params);
2245 nfp_fl_ct_clean_flow_entry(ct_entry);
2246 kfree(ct_map_ent);
2247 break;
95255017
LP
2248 case CT_TYPE_NFT:
2249 zt->nft_flows_count--;
2250 rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2251 nfp_ct_map_params);
2252 nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
2253 kfree(ct_map_ent);
4020f26b 2254 break;
d33d24a7
LP
2255 default:
2256 break;
2257 }
2258
2259 return 0;
c8b034fb 2260}