Commit | Line | Data |
---|---|---|
96de2506 JK |
1 | // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
2 | /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ | |
8a276873 PJV |
3 | |
4 | #include <linux/skbuff.h> | |
5 | #include <net/devlink.h> | |
6 | #include <net/pkt_cls.h> | |
7 | ||
8 | #include "cmsg.h" | |
9 | #include "main.h" | |
c8b034fb | 10 | #include "conntrack.h" |
8a276873 PJV |
11 | #include "../nfpcore/nfp_cpp.h" |
12 | #include "../nfpcore/nfp_nsp.h" | |
13 | #include "../nfp_app.h" | |
14 | #include "../nfp_main.h" | |
15 | #include "../nfp_net.h" | |
16 | #include "../nfp_port.h" | |
17 | ||
ffa61202 PJV |
18 | #define NFP_FLOWER_SUPPORTED_TCPFLAGS \ |
19 | (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ | |
20 | TCPHDR_PSH | TCPHDR_URG) | |
21 | ||
71ea5343 PJV |
22 | #define NFP_FLOWER_SUPPORTED_CTLFLAGS \ |
23 | (FLOW_DIS_IS_FRAGMENT | \ | |
24 | FLOW_DIS_FIRST_FRAG) | |
25 | ||
b95a2d83 | 26 | #define NFP_FLOWER_WHITELIST_DISSECTOR \ |
2b3082c6 RK |
27 | (BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | \ |
28 | BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | \ | |
29 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \ | |
30 | BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \ | |
31 | BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | \ | |
32 | BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | \ | |
33 | BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ | |
34 | BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | \ | |
35 | BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | \ | |
36 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ | |
37 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ | |
38 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ | |
39 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ | |
40 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ | |
41 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ | |
42 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | \ | |
43 | BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) | \ | |
44 | BIT_ULL(FLOW_DISSECTOR_KEY_CT) | \ | |
45 | BIT_ULL(FLOW_DISSECTOR_KEY_META) | \ | |
46 | BIT_ULL(FLOW_DISSECTOR_KEY_IP)) | |
b95a2d83 | 47 | |
611aec10 | 48 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ |
2b3082c6 RK |
49 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
50 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ | |
51 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ | |
52 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ | |
53 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ | |
54 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ | |
55 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP)) | |
611aec10 JH |
56 | |
57 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ | |
2b3082c6 RK |
58 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
59 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) | |
611aec10 | 60 | |
78a722af | 61 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \ |
2b3082c6 RK |
62 | (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
63 | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) | |
78a722af | 64 | |
107e37bb JH |
65 | #define NFP_FLOWER_MERGE_FIELDS \ |
66 | (NFP_FLOWER_LAYER_PORT | \ | |
67 | NFP_FLOWER_LAYER_MAC | \ | |
68 | NFP_FLOWER_LAYER_TP | \ | |
69 | NFP_FLOWER_LAYER_IPV4 | \ | |
70 | NFP_FLOWER_LAYER_IPV6) | |
71 | ||
120ffd84 | 72 | #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ |
0d630f58 LP |
73 | (NFP_FLOWER_LAYER_EXT_META | \ |
74 | NFP_FLOWER_LAYER_PORT | \ | |
120ffd84 | 75 | NFP_FLOWER_LAYER_MAC | \ |
78346160 JH |
76 | NFP_FLOWER_LAYER_IPV4 | \ |
77 | NFP_FLOWER_LAYER_IPV6) | |
120ffd84 | 78 | |
107e37bb JH |
79 | struct nfp_flower_merge_check { |
80 | union { | |
81 | struct { | |
82 | __be16 tci; | |
83 | struct nfp_flower_mac_mpls l2; | |
84 | struct nfp_flower_tp_ports l4; | |
85 | union { | |
86 | struct nfp_flower_ipv4 ipv4; | |
87 | struct nfp_flower_ipv6 ipv6; | |
88 | }; | |
89 | }; | |
90 | unsigned long vals[8]; | |
91 | }; | |
92 | }; | |
93 | ||
400a5e5f | 94 | int |
7885b4fc JH |
95 | nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, |
96 | u8 mtype) | |
81f3ddf2 PJV |
97 | { |
98 | u32 meta_len, key_len, mask_len, act_len, tot_len; | |
81f3ddf2 PJV |
99 | struct sk_buff *skb; |
100 | unsigned char *msg; | |
101 | ||
102 | meta_len = sizeof(struct nfp_fl_rule_metadata); | |
103 | key_len = nfp_flow->meta.key_len; | |
104 | mask_len = nfp_flow->meta.mask_len; | |
105 | act_len = nfp_flow->meta.act_len; | |
106 | ||
107 | tot_len = meta_len + key_len + mask_len + act_len; | |
108 | ||
109 | /* Convert to long words as firmware expects | |
110 | * lengths in units of NFP_FL_LW_SIZ. | |
111 | */ | |
112 | nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ; | |
113 | nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; | |
114 | nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; | |
115 | ||
7885b4fc | 116 | skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); |
81f3ddf2 PJV |
117 | if (!skb) |
118 | return -ENOMEM; | |
119 | ||
120 | msg = nfp_flower_cmsg_get_data(skb); | |
121 | memcpy(msg, &nfp_flow->meta, meta_len); | |
122 | memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len); | |
123 | memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len); | |
124 | memcpy(&msg[meta_len + key_len + mask_len], | |
125 | nfp_flow->action_data, act_len); | |
126 | ||
127 | /* Convert back to bytes as software expects | |
128 | * lengths in units of bytes. | |
129 | */ | |
130 | nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ; | |
131 | nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; | |
132 | nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; | |
133 | ||
7885b4fc | 134 | nfp_ctrl_tx(app->ctrl, skb); |
81f3ddf2 PJV |
135 | |
136 | return 0; | |
137 | } | |
138 | ||
4b15fb18 | 139 | static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule) |
af9d842c | 140 | { |
8f256622 PNA |
141 | return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || |
142 | flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || | |
143 | flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || | |
144 | flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); | |
af9d842c PJV |
145 | } |
146 | ||
4b15fb18 | 147 | static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule) |
2a047845 | 148 | { |
2a047845 PJV |
149 | return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || |
150 | flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); | |
151 | } | |
152 | ||
0a22b17a | 153 | static int |
986643de | 154 | nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, |
78a722af | 155 | u32 *key_layer_two, int *key_size, bool ipv6, |
14179c4b | 156 | struct netlink_ext_ack *extack) |
0a22b17a | 157 | { |
78a722af JH |
158 | if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY || |
159 | (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) { | |
14179c4b | 160 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); |
0a22b17a | 161 | return -EOPNOTSUPP; |
14179c4b | 162 | } |
0a22b17a | 163 | |
986643de | 164 | if (enc_opts->len > 0) { |
0a22b17a PJV |
165 | *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; |
166 | *key_size += sizeof(struct nfp_flower_geneve_options); | |
167 | } | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
986643de PJV |
172 | static int |
173 | nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, | |
174 | struct flow_dissector_key_enc_opts *enc_op, | |
175 | u32 *key_layer_two, u8 *key_layer, int *key_size, | |
176 | struct nfp_flower_priv *priv, | |
78a722af | 177 | enum nfp_flower_tun_type *tun_type, bool ipv6, |
986643de PJV |
178 | struct netlink_ext_ack *extack) |
179 | { | |
180 | int err; | |
181 | ||
182 | switch (enc_ports->dst) { | |
183 | case htons(IANA_VXLAN_UDP_PORT): | |
184 | *tun_type = NFP_FL_TUNNEL_VXLAN; | |
185 | *key_layer |= NFP_FLOWER_LAYER_VXLAN; | |
78a722af JH |
186 | |
187 | if (ipv6) { | |
188 | *key_layer |= NFP_FLOWER_LAYER_EXT_META; | |
189 | *key_size += sizeof(struct nfp_flower_ext_meta); | |
190 | *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; | |
191 | *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); | |
192 | } else { | |
193 | *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); | |
194 | } | |
986643de PJV |
195 | |
196 | if (enc_op) { | |
197 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); | |
198 | return -EOPNOTSUPP; | |
199 | } | |
200 | break; | |
201 | case htons(GENEVE_UDP_PORT): | |
202 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { | |
203 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload"); | |
204 | return -EOPNOTSUPP; | |
205 | } | |
206 | *tun_type = NFP_FL_TUNNEL_GENEVE; | |
207 | *key_layer |= NFP_FLOWER_LAYER_EXT_META; | |
208 | *key_size += sizeof(struct nfp_flower_ext_meta); | |
209 | *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; | |
78a722af JH |
210 | |
211 | if (ipv6) { | |
212 | *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; | |
213 | *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); | |
214 | } else { | |
215 | *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); | |
216 | } | |
986643de PJV |
217 | |
218 | if (!enc_op) | |
219 | break; | |
220 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) { | |
221 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); | |
222 | return -EOPNOTSUPP; | |
223 | } | |
78a722af JH |
224 | err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size, |
225 | ipv6, extack); | |
986643de PJV |
226 | if (err) |
227 | return err; | |
228 | break; | |
229 | default: | |
230 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown"); | |
231 | return -EOPNOTSUPP; | |
232 | } | |
233 | ||
234 | return 0; | |
235 | } | |
236 | ||
71e88cfb | 237 | int |
bedeca15 | 238 | nfp_flower_calculate_key_layers(struct nfp_app *app, |
3166dd07 | 239 | struct net_device *netdev, |
bedeca15 | 240 | struct nfp_fl_key_ls *ret_key_ls, |
4b15fb18 | 241 | struct flow_rule *rule, |
14179c4b PJV |
242 | enum nfp_flower_tun_type *tun_type, |
243 | struct netlink_ext_ack *extack) | |
af9d842c | 244 | { |
8f256622 PNA |
245 | struct flow_dissector *dissector = rule->match.dissector; |
246 | struct flow_match_basic basic = { NULL, NULL}; | |
bedeca15 | 247 | struct nfp_flower_priv *priv = app->priv; |
af9d842c PJV |
248 | u32 key_layer_two; |
249 | u8 key_layer; | |
250 | int key_size; | |
0a22b17a | 251 | int err; |
af9d842c | 252 | |
14179c4b PJV |
253 | if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { |
254 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported"); | |
b95a2d83 | 255 | return -EOPNOTSUPP; |
14179c4b | 256 | } |
b95a2d83 | 257 | |
611aec10 | 258 | /* If any tun dissector is used then the required set must be used. */ |
8f256622 | 259 | if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && |
78a722af JH |
260 | (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R) |
261 | != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R && | |
8f256622 | 262 | (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) |
14179c4b PJV |
263 | != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { |
264 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); | |
611aec10 | 265 | return -EOPNOTSUPP; |
14179c4b | 266 | } |
611aec10 JH |
267 | |
268 | key_layer_two = 0; | |
de7d9549 | 269 | key_layer = NFP_FLOWER_LAYER_PORT; |
a427673e | 270 | key_size = sizeof(struct nfp_flower_meta_tci) + |
de7d9549 JH |
271 | sizeof(struct nfp_flower_in_port); |
272 | ||
8f256622 PNA |
273 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) || |
274 | flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { | |
de7d9549 JH |
275 | key_layer |= NFP_FLOWER_LAYER_MAC; |
276 | key_size += sizeof(struct nfp_flower_mac_mpls); | |
277 | } | |
611aec10 | 278 | |
8f256622 PNA |
279 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
280 | struct flow_match_vlan vlan; | |
db191db8 | 281 | |
8f256622 | 282 | flow_rule_match_vlan(rule, &vlan); |
db191db8 | 283 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && |
14179c4b PJV |
284 | vlan.key->vlan_priority) { |
285 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); | |
db191db8 | 286 | return -EOPNOTSUPP; |
14179c4b | 287 | } |
0d630f58 LP |
288 | if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && |
289 | !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { | |
290 | key_layer |= NFP_FLOWER_LAYER_EXT_META; | |
291 | key_size += sizeof(struct nfp_flower_ext_meta); | |
292 | key_size += sizeof(struct nfp_flower_vlan); | |
293 | key_layer_two |= NFP_FLOWER_LAYER2_QINQ; | |
294 | } | |
295 | } | |
296 | ||
297 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { | |
298 | struct flow_match_vlan cvlan; | |
299 | ||
300 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { | |
301 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload"); | |
302 | return -EOPNOTSUPP; | |
303 | } | |
304 | ||
305 | flow_rule_match_vlan(rule, &cvlan); | |
306 | if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { | |
307 | key_layer |= NFP_FLOWER_LAYER_EXT_META; | |
308 | key_size += sizeof(struct nfp_flower_ext_meta); | |
309 | key_size += sizeof(struct nfp_flower_vlan); | |
310 | key_layer_two |= NFP_FLOWER_LAYER2_QINQ; | |
311 | } | |
db191db8 PJV |
312 | } |
313 | ||
8f256622 PNA |
314 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
315 | struct flow_match_enc_opts enc_op = { NULL, NULL }; | |
316 | struct flow_match_ipv4_addrs ipv4_addrs; | |
78a722af | 317 | struct flow_match_ipv6_addrs ipv6_addrs; |
8f256622 PNA |
318 | struct flow_match_control enc_ctl; |
319 | struct flow_match_ports enc_ports; | |
78a722af | 320 | bool ipv6_tun = false; |
8f256622 PNA |
321 | |
322 | flow_rule_match_enc_control(rule, &enc_ctl); | |
323 | ||
14179c4b PJV |
324 | if (enc_ctl.mask->addr_type != 0xffff) { |
325 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); | |
326 | return -EOPNOTSUPP; | |
327 | } | |
78a722af JH |
328 | |
329 | ipv6_tun = enc_ctl.key->addr_type == | |
330 | FLOW_DISSECTOR_KEY_IPV6_ADDRS; | |
331 | if (ipv6_tun && | |
332 | !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) { | |
333 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels"); | |
a7cd39e0 | 334 | return -EOPNOTSUPP; |
14179c4b | 335 | } |
611aec10 | 336 | |
78a722af JH |
337 | if (!ipv6_tun && |
338 | enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { | |
339 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6"); | |
611aec10 | 340 | return -EOPNOTSUPP; |
14179c4b | 341 | } |
611aec10 | 342 | |
78a722af JH |
343 | if (ipv6_tun) { |
344 | flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs); | |
345 | if (memchr_inv(&ipv6_addrs.mask->dst, 0xff, | |
346 | sizeof(ipv6_addrs.mask->dst))) { | |
347 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported"); | |
348 | return -EOPNOTSUPP; | |
349 | } | |
350 | } else { | |
351 | flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); | |
352 | if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { | |
353 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); | |
354 | return -EOPNOTSUPP; | |
355 | } | |
356 | } | |
357 | ||
8f256622 PNA |
358 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) |
359 | flow_rule_match_enc_opts(rule, &enc_op); | |
0a22b17a | 360 | |
e3a6aba0 | 361 | if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { |
9bacb93b | 362 | /* Check if GRE, which has no enc_ports */ |
f7536ffb | 363 | if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) { |
78a722af JH |
364 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); |
365 | return -EOPNOTSUPP; | |
366 | } | |
e3a6aba0 | 367 | |
78a722af JH |
368 | *tun_type = NFP_FL_TUNNEL_GRE; |
369 | key_layer |= NFP_FLOWER_LAYER_EXT_META; | |
370 | key_size += sizeof(struct nfp_flower_ext_meta); | |
371 | key_layer_two |= NFP_FLOWER_LAYER2_GRE; | |
372 | ||
373 | if (ipv6_tun) { | |
374 | key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; | |
375 | key_size += | |
b15e2e49 | 376 | sizeof(struct nfp_flower_ipv6_gre_tun); |
e3a6aba0 | 377 | } else { |
78a722af | 378 | key_size += |
b15e2e49 | 379 | sizeof(struct nfp_flower_ipv4_gre_tun); |
78a722af JH |
380 | } |
381 | ||
382 | if (enc_op.key) { | |
383 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); | |
e3a6aba0 PJV |
384 | return -EOPNOTSUPP; |
385 | } | |
386 | } else { | |
387 | flow_rule_match_enc_ports(rule, &enc_ports); | |
388 | if (enc_ports.mask->dst != cpu_to_be16(~0)) { | |
389 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported"); | |
390 | return -EOPNOTSUPP; | |
391 | } | |
392 | ||
393 | err = nfp_flower_calc_udp_tun_layer(enc_ports.key, | |
394 | enc_op.key, | |
395 | &key_layer_two, | |
396 | &key_layer, | |
397 | &key_size, priv, | |
78a722af JH |
398 | tun_type, ipv6_tun, |
399 | extack); | |
e3a6aba0 PJV |
400 | if (err) |
401 | return err; | |
402 | ||
403 | /* Ensure the ingress netdev matches the expected | |
404 | * tun type. | |
405 | */ | |
406 | if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) { | |
407 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type"); | |
408 | return -EOPNOTSUPP; | |
409 | } | |
14179c4b | 410 | } |
a7cd39e0 | 411 | } |
af9d842c | 412 | |
8f256622 PNA |
413 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) |
414 | flow_rule_match_basic(rule, &basic); | |
af9d842c | 415 | |
8f256622 | 416 | if (basic.mask && basic.mask->n_proto) { |
af9d842c | 417 | /* Ethernet type is present in the key. */ |
8f256622 | 418 | switch (basic.key->n_proto) { |
af9d842c PJV |
419 | case cpu_to_be16(ETH_P_IP): |
420 | key_layer |= NFP_FLOWER_LAYER_IPV4; | |
421 | key_size += sizeof(struct nfp_flower_ipv4); | |
422 | break; | |
423 | ||
424 | case cpu_to_be16(ETH_P_IPV6): | |
425 | key_layer |= NFP_FLOWER_LAYER_IPV6; | |
426 | key_size += sizeof(struct nfp_flower_ipv6); | |
427 | break; | |
428 | ||
429 | /* Currently we do not offload ARP | |
430 | * because we rely on it to get to the host. | |
431 | */ | |
432 | case cpu_to_be16(ETH_P_ARP): | |
14179c4b | 433 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported"); |
af9d842c PJV |
434 | return -EOPNOTSUPP; |
435 | ||
a6411941 PJV |
436 | case cpu_to_be16(ETH_P_MPLS_UC): |
437 | case cpu_to_be16(ETH_P_MPLS_MC): | |
438 | if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { | |
439 | key_layer |= NFP_FLOWER_LAYER_MAC; | |
440 | key_size += sizeof(struct nfp_flower_mac_mpls); | |
441 | } | |
442 | break; | |
443 | ||
af9d842c PJV |
444 | /* Will be included in layer 2. */ |
445 | case cpu_to_be16(ETH_P_8021Q): | |
446 | break; | |
447 | ||
448 | default: | |
fd262a6d JH |
449 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported"); |
450 | return -EOPNOTSUPP; | |
af9d842c | 451 | } |
4b15fb18 | 452 | } else if (nfp_flower_check_higher_than_mac(rule)) { |
fd262a6d JH |
453 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType"); |
454 | return -EOPNOTSUPP; | |
af9d842c PJV |
455 | } |
456 | ||
8f256622 | 457 | if (basic.mask && basic.mask->ip_proto) { |
8f256622 | 458 | switch (basic.key->ip_proto) { |
af9d842c PJV |
459 | case IPPROTO_TCP: |
460 | case IPPROTO_UDP: | |
461 | case IPPROTO_SCTP: | |
462 | case IPPROTO_ICMP: | |
463 | case IPPROTO_ICMPV6: | |
464 | key_layer |= NFP_FLOWER_LAYER_TP; | |
465 | key_size += sizeof(struct nfp_flower_tp_ports); | |
466 | break; | |
af9d842c PJV |
467 | } |
468 | } | |
469 | ||
103b7c25 | 470 | if (!(key_layer & NFP_FLOWER_LAYER_TP) && |
4b15fb18 | 471 | nfp_flower_check_higher_than_l3(rule)) { |
103b7c25 JH |
472 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type"); |
473 | return -EOPNOTSUPP; | |
474 | } | |
475 | ||
8f256622 PNA |
476 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { |
477 | struct flow_match_tcp tcp; | |
ffa61202 PJV |
478 | u32 tcp_flags; |
479 | ||
8f256622 PNA |
480 | flow_rule_match_tcp(rule, &tcp); |
481 | tcp_flags = be16_to_cpu(tcp.key->flags); | |
ffa61202 | 482 | |
14179c4b PJV |
483 | if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) { |
484 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags"); | |
ffa61202 | 485 | return -EOPNOTSUPP; |
14179c4b | 486 | } |
ffa61202 PJV |
487 | |
488 | /* We only support PSH and URG flags when either | |
489 | * FIN, SYN or RST is present as well. | |
490 | */ | |
491 | if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && | |
14179c4b PJV |
492 | !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) { |
493 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST"); | |
ffa61202 | 494 | return -EOPNOTSUPP; |
14179c4b | 495 | } |
ffa61202 | 496 | |
290974d4 PJV |
497 | /* We need to store TCP flags in the either the IPv4 or IPv6 key |
498 | * space, thus we need to ensure we include a IPv4/IPv6 key | |
499 | * layer if we have not done so already. | |
ffa61202 | 500 | */ |
14179c4b PJV |
501 | if (!basic.key) { |
502 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol"); | |
290974d4 | 503 | return -EOPNOTSUPP; |
14179c4b | 504 | } |
290974d4 PJV |
505 | |
506 | if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && | |
507 | !(key_layer & NFP_FLOWER_LAYER_IPV6)) { | |
8f256622 | 508 | switch (basic.key->n_proto) { |
290974d4 PJV |
509 | case cpu_to_be16(ETH_P_IP): |
510 | key_layer |= NFP_FLOWER_LAYER_IPV4; | |
511 | key_size += sizeof(struct nfp_flower_ipv4); | |
512 | break; | |
513 | ||
514 | case cpu_to_be16(ETH_P_IPV6): | |
8af56f40 | 515 | key_layer |= NFP_FLOWER_LAYER_IPV6; |
290974d4 PJV |
516 | key_size += sizeof(struct nfp_flower_ipv6); |
517 | break; | |
518 | ||
519 | default: | |
14179c4b | 520 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6"); |
290974d4 PJV |
521 | return -EOPNOTSUPP; |
522 | } | |
ffa61202 PJV |
523 | } |
524 | } | |
525 | ||
8f256622 PNA |
526 | if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { |
527 | struct flow_match_control ctl; | |
71ea5343 | 528 | |
8f256622 | 529 | flow_rule_match_control(rule, &ctl); |
14179c4b PJV |
530 | if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) { |
531 | NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag"); | |
71ea5343 | 532 | return -EOPNOTSUPP; |
14179c4b | 533 | } |
71ea5343 PJV |
534 | } |
535 | ||
af9d842c PJV |
536 | ret_key_ls->key_layer = key_layer; |
537 | ret_key_ls->key_layer_two = key_layer_two; | |
538 | ret_key_ls->key_size = key_size; | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
71e88cfb | 543 | struct nfp_fl_payload * |
4f63fde3 | 544 | nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) |
af9d842c PJV |
545 | { |
546 | struct nfp_fl_payload *flow_pay; | |
547 | ||
548 | flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL); | |
549 | if (!flow_pay) | |
550 | return NULL; | |
551 | ||
552 | flow_pay->meta.key_len = key_layer->key_size; | |
553 | flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL); | |
554 | if (!flow_pay->unmasked_data) | |
555 | goto err_free_flow; | |
556 | ||
557 | flow_pay->meta.mask_len = key_layer->key_size; | |
558 | flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL); | |
559 | if (!flow_pay->mask_data) | |
560 | goto err_free_unmasked; | |
561 | ||
1a1e586f PJV |
562 | flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL); |
563 | if (!flow_pay->action_data) | |
564 | goto err_free_mask; | |
565 | ||
2d9ad71a | 566 | flow_pay->nfp_tun_ipv4_addr = 0; |
cfa18993 | 567 | flow_pay->nfp_tun_ipv6 = NULL; |
af9d842c | 568 | flow_pay->meta.flags = 0; |
aa6ce2ea | 569 | INIT_LIST_HEAD(&flow_pay->linked_flows); |
8af56f40 | 570 | flow_pay->in_hw = false; |
f5c977ee | 571 | flow_pay->pre_tun_rule.dev = NULL; |
c50647d3 | 572 | |
af9d842c PJV |
573 | return flow_pay; |
574 | ||
1a1e586f PJV |
575 | err_free_mask: |
576 | kfree(flow_pay->mask_data); | |
af9d842c PJV |
577 | err_free_unmasked: |
578 | kfree(flow_pay->unmasked_data); | |
579 | err_free_flow: | |
580 | kfree(flow_pay); | |
581 | return NULL; | |
582 | } | |
583 | ||
107e37bb JH |
584 | static int |
585 | nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, | |
586 | struct nfp_flower_merge_check *merge, | |
587 | u8 *last_act_id, int *act_out) | |
588 | { | |
589 | struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl; | |
590 | struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos; | |
591 | struct nfp_fl_set_ip4_addrs *ipv4_add; | |
592 | struct nfp_fl_set_ipv6_addr *ipv6_add; | |
593 | struct nfp_fl_push_vlan *push_vlan; | |
78346160 | 594 | struct nfp_fl_pre_tunnel *pre_tun; |
107e37bb JH |
595 | struct nfp_fl_set_tport *tport; |
596 | struct nfp_fl_set_eth *eth; | |
597 | struct nfp_fl_act_head *a; | |
598 | unsigned int act_off = 0; | |
78346160 | 599 | bool ipv6_tun = false; |
107e37bb JH |
600 | u8 act_id = 0; |
601 | u8 *ports; | |
602 | int i; | |
603 | ||
604 | while (act_off < flow->meta.act_len) { | |
605 | a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; | |
606 | act_id = a->jump_id; | |
607 | ||
608 | switch (act_id) { | |
609 | case NFP_FL_ACTION_OPCODE_OUTPUT: | |
610 | if (act_out) | |
611 | (*act_out)++; | |
612 | break; | |
613 | case NFP_FL_ACTION_OPCODE_PUSH_VLAN: | |
614 | push_vlan = (struct nfp_fl_push_vlan *)a; | |
615 | if (push_vlan->vlan_tci) | |
616 | merge->tci = cpu_to_be16(0xffff); | |
617 | break; | |
618 | case NFP_FL_ACTION_OPCODE_POP_VLAN: | |
619 | merge->tci = cpu_to_be16(0); | |
620 | break; | |
1922c9a4 | 621 | case NFP_FL_ACTION_OPCODE_SET_TUNNEL: |
107e37bb JH |
622 | /* New tunnel header means l2 to l4 can be matched. */ |
623 | eth_broadcast_addr(&merge->l2.mac_dst[0]); | |
624 | eth_broadcast_addr(&merge->l2.mac_src[0]); | |
625 | memset(&merge->l4, 0xff, | |
626 | sizeof(struct nfp_flower_tp_ports)); | |
78346160 JH |
627 | if (ipv6_tun) |
628 | memset(&merge->ipv6, 0xff, | |
629 | sizeof(struct nfp_flower_ipv6)); | |
630 | else | |
631 | memset(&merge->ipv4, 0xff, | |
632 | sizeof(struct nfp_flower_ipv4)); | |
107e37bb JH |
633 | break; |
634 | case NFP_FL_ACTION_OPCODE_SET_ETHERNET: | |
635 | eth = (struct nfp_fl_set_eth *)a; | |
636 | for (i = 0; i < ETH_ALEN; i++) | |
637 | merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; | |
638 | for (i = 0; i < ETH_ALEN; i++) | |
639 | merge->l2.mac_src[i] |= | |
640 | eth->eth_addr_mask[ETH_ALEN + i]; | |
641 | break; | |
642 | case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS: | |
643 | ipv4_add = (struct nfp_fl_set_ip4_addrs *)a; | |
644 | merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; | |
645 | merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; | |
646 | break; | |
647 | case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS: | |
648 | ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a; | |
649 | merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; | |
650 | merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; | |
651 | break; | |
652 | case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC: | |
653 | ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; | |
654 | for (i = 0; i < 4; i++) | |
655 | merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= | |
656 | ipv6_add->ipv6[i].mask; | |
657 | break; | |
658 | case NFP_FL_ACTION_OPCODE_SET_IPV6_DST: | |
659 | ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; | |
660 | for (i = 0; i < 4; i++) | |
661 | merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= | |
662 | ipv6_add->ipv6[i].mask; | |
663 | break; | |
664 | case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL: | |
665 | ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a; | |
666 | merge->ipv6.ip_ext.ttl |= | |
667 | ipv6_tc_hl_fl->ipv6_hop_limit_mask; | |
668 | merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; | |
669 | merge->ipv6.ipv6_flow_label_exthdr |= | |
670 | ipv6_tc_hl_fl->ipv6_label_mask; | |
671 | break; | |
672 | case NFP_FL_ACTION_OPCODE_SET_UDP: | |
673 | case NFP_FL_ACTION_OPCODE_SET_TCP: | |
674 | tport = (struct nfp_fl_set_tport *)a; | |
675 | ports = (u8 *)&merge->l4.port_src; | |
676 | for (i = 0; i < 4; i++) | |
677 | ports[i] |= tport->tp_port_mask[i]; | |
678 | break; | |
679 | case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: | |
78346160 JH |
680 | pre_tun = (struct nfp_fl_pre_tunnel *)a; |
681 | ipv6_tun = be16_to_cpu(pre_tun->flags) & | |
682 | NFP_FL_PRE_TUN_IPV6; | |
683 | break; | |
107e37bb JH |
684 | case NFP_FL_ACTION_OPCODE_PRE_LAG: |
685 | case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: | |
686 | break; | |
687 | default: | |
688 | return -EOPNOTSUPP; | |
689 | } | |
690 | ||
691 | act_off += a->len_lw << NFP_FL_LW_SIZ; | |
692 | } | |
693 | ||
694 | if (last_act_id) | |
695 | *last_act_id = act_id; | |
696 | ||
697 | return 0; | |
698 | } | |
699 | ||
700 | static int | |
701 | nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, | |
702 | struct nfp_flower_merge_check *merge, | |
703 | bool extra_fields) | |
704 | { | |
705 | struct nfp_flower_meta_tci *meta_tci; | |
706 | u8 *mask = flow->mask_data; | |
707 | u8 key_layer, match_size; | |
708 | ||
709 | memset(merge, 0, sizeof(struct nfp_flower_merge_check)); | |
710 | ||
711 | meta_tci = (struct nfp_flower_meta_tci *)mask; | |
712 | key_layer = meta_tci->nfp_flow_key_layer; | |
713 | ||
714 | if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields) | |
715 | return -EOPNOTSUPP; | |
716 | ||
717 | merge->tci = meta_tci->tci; | |
718 | mask += sizeof(struct nfp_flower_meta_tci); | |
719 | ||
720 | if (key_layer & NFP_FLOWER_LAYER_EXT_META) | |
721 | mask += sizeof(struct nfp_flower_ext_meta); | |
722 | ||
723 | mask += sizeof(struct nfp_flower_in_port); | |
724 | ||
725 | if (key_layer & NFP_FLOWER_LAYER_MAC) { | |
726 | match_size = sizeof(struct nfp_flower_mac_mpls); | |
727 | memcpy(&merge->l2, mask, match_size); | |
728 | mask += match_size; | |
729 | } | |
730 | ||
731 | if (key_layer & NFP_FLOWER_LAYER_TP) { | |
732 | match_size = sizeof(struct nfp_flower_tp_ports); | |
733 | memcpy(&merge->l4, mask, match_size); | |
734 | mask += match_size; | |
735 | } | |
736 | ||
737 | if (key_layer & NFP_FLOWER_LAYER_IPV4) { | |
738 | match_size = sizeof(struct nfp_flower_ipv4); | |
739 | memcpy(&merge->ipv4, mask, match_size); | |
740 | } | |
741 | ||
742 | if (key_layer & NFP_FLOWER_LAYER_IPV6) { | |
743 | match_size = sizeof(struct nfp_flower_ipv6); | |
744 | memcpy(&merge->ipv6, mask, match_size); | |
745 | } | |
746 | ||
747 | return 0; | |
748 | } | |
749 | ||
750 | static int | |
751 | nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, | |
752 | struct nfp_fl_payload *sub_flow2) | |
753 | { | |
754 | /* Two flows can be merged if sub_flow2 only matches on bits that are | |
755 | * either matched by sub_flow1 or set by a sub_flow1 action. This | |
756 | * ensures that every packet that hits sub_flow1 and recirculates is | |
757 | * guaranteed to hit sub_flow2. | |
758 | */ | |
759 | struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; | |
760 | int err, act_out = 0; | |
761 | u8 last_act_id = 0; | |
762 | ||
763 | err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge, | |
764 | true); | |
765 | if (err) | |
766 | return err; | |
767 | ||
768 | err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge, | |
769 | false); | |
770 | if (err) | |
771 | return err; | |
772 | ||
773 | err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge, | |
774 | &last_act_id, &act_out); | |
775 | if (err) | |
776 | return err; | |
777 | ||
778 | /* Must only be 1 output action and it must be the last in sequence. */ | |
779 | if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) | |
780 | return -EOPNOTSUPP; | |
781 | ||
782 | /* Reject merge if sub_flow2 matches on something that is not matched | |
783 | * on or set in an action by sub_flow1. | |
784 | */ | |
785 | err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals, | |
786 | sub_flow1_merge.vals, | |
787 | sizeof(struct nfp_flower_merge_check) * 8); | |
788 | if (err) | |
789 | return -EINVAL; | |
790 | ||
791 | return 0; | |
792 | } | |
793 | ||
1c6952ca JH |
794 | static unsigned int |
795 | nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, | |
796 | bool *tunnel_act) | |
797 | { | |
798 | unsigned int act_off = 0, act_len; | |
799 | struct nfp_fl_act_head *a; | |
800 | u8 act_id = 0; | |
801 | ||
802 | while (act_off < len) { | |
803 | a = (struct nfp_fl_act_head *)&act_src[act_off]; | |
804 | act_len = a->len_lw << NFP_FL_LW_SIZ; | |
805 | act_id = a->jump_id; | |
806 | ||
807 | switch (act_id) { | |
808 | case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: | |
809 | if (tunnel_act) | |
810 | *tunnel_act = true; | |
df561f66 | 811 | fallthrough; |
1c6952ca JH |
812 | case NFP_FL_ACTION_OPCODE_PRE_LAG: |
813 | memcpy(act_dst + act_off, act_src + act_off, act_len); | |
814 | break; | |
815 | default: | |
816 | return act_off; | |
817 | } | |
818 | ||
819 | act_off += act_len; | |
820 | } | |
821 | ||
822 | return act_off; | |
823 | } | |
824 | ||
4b10c53d JH |
825 | static int |
826 | nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) | |
1c6952ca JH |
827 | { |
828 | struct nfp_fl_act_head *a; | |
829 | unsigned int act_off = 0; | |
830 | ||
831 | while (act_off < len) { | |
832 | a = (struct nfp_fl_act_head *)&acts[act_off]; | |
4b10c53d JH |
833 | |
834 | if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) | |
835 | *vlan = (struct nfp_fl_push_vlan *)a; | |
836 | else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) | |
1c6952ca JH |
837 | return -EOPNOTSUPP; |
838 | ||
839 | act_off += a->len_lw << NFP_FL_LW_SIZ; | |
840 | } | |
841 | ||
4b10c53d JH |
842 | /* Ensure any VLAN push also has an egress action. */ |
843 | if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) | |
844 | return -EOPNOTSUPP; | |
845 | ||
1c6952ca JH |
846 | return 0; |
847 | } | |
848 | ||
4b10c53d JH |
849 | static int |
850 | nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) | |
851 | { | |
1922c9a4 | 852 | struct nfp_fl_set_tun *tun; |
4b10c53d JH |
853 | struct nfp_fl_act_head *a; |
854 | unsigned int act_off = 0; | |
855 | ||
856 | while (act_off < len) { | |
857 | a = (struct nfp_fl_act_head *)&acts[act_off]; | |
858 | ||
1922c9a4 JH |
859 | if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) { |
860 | tun = (struct nfp_fl_set_tun *)a; | |
4b10c53d JH |
861 | tun->outer_vlan_tpid = vlan->vlan_tpid; |
862 | tun->outer_vlan_tci = vlan->vlan_tci; | |
863 | ||
864 | return 0; | |
865 | } | |
866 | ||
867 | act_off += a->len_lw << NFP_FL_LW_SIZ; | |
868 | } | |
869 | ||
870 | /* Return error if no tunnel action is found. */ | |
871 | return -EOPNOTSUPP; | |
872 | } | |
873 | ||
1c6952ca JH |
874 | static int |
875 | nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, | |
876 | struct nfp_fl_payload *sub_flow2, | |
877 | struct nfp_fl_payload *merge_flow) | |
878 | { | |
879 | unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; | |
4b10c53d | 880 | struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; |
1c6952ca JH |
881 | bool tunnel_act = false; |
882 | char *merge_act; | |
883 | int err; | |
884 | ||
885 | /* The last action of sub_flow1 must be output - do not merge this. */ | |
886 | sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); | |
887 | sub2_act_len = sub_flow2->meta.act_len; | |
888 | ||
889 | if (!sub2_act_len) | |
890 | return -EINVAL; | |
891 | ||
892 | if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) | |
893 | return -EINVAL; | |
894 | ||
895 | /* A shortcut can only be applied if there is a single action. */ | |
896 | if (sub1_act_len) | |
897 | merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); | |
898 | else | |
899 | merge_flow->meta.shortcut = sub_flow2->meta.shortcut; | |
900 | ||
901 | merge_flow->meta.act_len = sub1_act_len + sub2_act_len; | |
902 | merge_act = merge_flow->action_data; | |
903 | ||
904 | /* Copy any pre-actions to the start of merge flow action list. */ | |
905 | pre_off1 = nfp_flower_copy_pre_actions(merge_act, | |
906 | sub_flow1->action_data, | |
907 | sub1_act_len, &tunnel_act); | |
908 | merge_act += pre_off1; | |
909 | sub1_act_len -= pre_off1; | |
910 | pre_off2 = nfp_flower_copy_pre_actions(merge_act, | |
911 | sub_flow2->action_data, | |
912 | sub2_act_len, NULL); | |
913 | merge_act += pre_off2; | |
914 | sub2_act_len -= pre_off2; | |
915 | ||
916 | /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes | |
4b10c53d JH |
917 | * a tunnel, there are restrictions on what sub_flow 2 actions lead to a |
918 | * valid merge. | |
1c6952ca JH |
919 | */ |
920 | if (tunnel_act) { | |
921 | char *post_tun_acts = &sub_flow2->action_data[pre_off2]; | |
922 | ||
4b10c53d JH |
923 | err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, |
924 | &post_tun_push_vlan); | |
1c6952ca JH |
925 | if (err) |
926 | return err; | |
4b10c53d JH |
927 | |
928 | if (post_tun_push_vlan) { | |
929 | pre_off2 += sizeof(*post_tun_push_vlan); | |
930 | sub2_act_len -= sizeof(*post_tun_push_vlan); | |
931 | } | |
1c6952ca JH |
932 | } |
933 | ||
934 | /* Copy remaining actions from sub_flows 1 and 2. */ | |
935 | memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); | |
4b10c53d JH |
936 | |
937 | if (post_tun_push_vlan) { | |
938 | /* Update tunnel action in merge to include VLAN push. */ | |
939 | err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, | |
940 | post_tun_push_vlan); | |
941 | if (err) | |
942 | return err; | |
943 | ||
944 | merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); | |
945 | } | |
946 | ||
1c6952ca JH |
947 | merge_act += sub1_act_len; |
948 | memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); | |
949 | ||
950 | return 0; | |
951 | } | |
952 | ||
aa6ce2ea JH |
953 | /* Flow link code should only be accessed under RTNL. */ |
954 | static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link) | |
955 | { | |
956 | list_del(&link->merge_flow.list); | |
957 | list_del(&link->sub_flow.list); | |
958 | kfree(link); | |
959 | } | |
960 | ||
961 | static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow, | |
962 | struct nfp_fl_payload *sub_flow) | |
963 | { | |
964 | struct nfp_fl_payload_link *link; | |
965 | ||
966 | list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) | |
967 | if (link->sub_flow.flow == sub_flow) { | |
968 | nfp_flower_unlink_flow(link); | |
969 | return; | |
970 | } | |
971 | } | |
972 | ||
973 | static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow, | |
974 | struct nfp_fl_payload *sub_flow) | |
975 | { | |
976 | struct nfp_fl_payload_link *link; | |
977 | ||
978 | link = kmalloc(sizeof(*link), GFP_KERNEL); | |
979 | if (!link) | |
980 | return -ENOMEM; | |
981 | ||
982 | link->merge_flow.flow = merge_flow; | |
983 | list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows); | |
984 | link->sub_flow.flow = sub_flow; | |
985 | list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows); | |
986 | ||
987 | return 0; | |
988 | } | |
989 | ||
dbc2d68e JH |
990 | /** |
991 | * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. | |
992 | * @app: Pointer to the APP handle | |
993 | * @sub_flow1: Initial flow matched to produce merge hint | |
994 | * @sub_flow2: Post recirculation flow matched in merge hint | |
995 | * | |
996 | * Combines 2 flows (if valid) to a single flow, removing the initial from hw | |
997 | * and offloading the new, merged flow. | |
998 | * | |
999 | * Return: negative value on error, 0 in success. | |
1000 | */ | |
1001 | int nfp_flower_merge_offloaded_flows(struct nfp_app *app, | |
1002 | struct nfp_fl_payload *sub_flow1, | |
1003 | struct nfp_fl_payload *sub_flow2) | |
1004 | { | |
8af56f40 | 1005 | struct nfp_flower_priv *priv = app->priv; |
1c6952ca JH |
1006 | struct nfp_fl_payload *merge_flow; |
1007 | struct nfp_fl_key_ls merge_key_ls; | |
2ea538db YZ |
1008 | struct nfp_merge_info *merge_info; |
1009 | u64 parent_ctx = 0; | |
107e37bb JH |
1010 | int err; |
1011 | ||
1c6952ca JH |
1012 | if (sub_flow1 == sub_flow2 || |
1013 | nfp_flower_is_merge_flow(sub_flow1) || | |
1014 | nfp_flower_is_merge_flow(sub_flow2)) | |
1015 | return -EINVAL; | |
1016 | ||
9bacb93b | 1017 | /* Check if the two flows are already merged */ |
2ea538db YZ |
1018 | parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; |
1019 | parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); | |
1020 | if (rhashtable_lookup_fast(&priv->merge_table, | |
1021 | &parent_ctx, merge_table_params)) { | |
1022 | nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); | |
1023 | return 0; | |
1024 | } | |
1025 | ||
107e37bb JH |
1026 | err = nfp_flower_can_merge(sub_flow1, sub_flow2); |
1027 | if (err) | |
1028 | return err; | |
1029 | ||
1c6952ca JH |
1030 | merge_key_ls.key_size = sub_flow1->meta.key_len; |
1031 | ||
1032 | merge_flow = nfp_flower_allocate_new(&merge_key_ls); | |
1033 | if (!merge_flow) | |
1034 | return -ENOMEM; | |
1035 | ||
1036 | merge_flow->tc_flower_cookie = (unsigned long)merge_flow; | |
1037 | merge_flow->ingress_dev = sub_flow1->ingress_dev; | |
1038 | ||
1039 | memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, | |
1040 | sub_flow1->meta.key_len); | |
1041 | memcpy(merge_flow->mask_data, sub_flow1->mask_data, | |
1042 | sub_flow1->meta.mask_len); | |
1043 | ||
1044 | err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow); | |
1045 | if (err) | |
1046 | goto err_destroy_merge_flow; | |
1047 | ||
aa6ce2ea JH |
1048 | err = nfp_flower_link_flows(merge_flow, sub_flow1); |
1049 | if (err) | |
1050 | goto err_destroy_merge_flow; | |
1051 | ||
1052 | err = nfp_flower_link_flows(merge_flow, sub_flow2); | |
1053 | if (err) | |
1054 | goto err_unlink_sub_flow1; | |
1055 | ||
4b15fb18 LP |
1056 | err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow, |
1057 | merge_flow->ingress_dev, NULL); | |
8af56f40 JH |
1058 | if (err) |
1059 | goto err_unlink_sub_flow2; | |
1c6952ca | 1060 | |
8af56f40 JH |
1061 | err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node, |
1062 | nfp_flower_table_params); | |
1063 | if (err) | |
1064 | goto err_release_metadata; | |
1065 | ||
2ea538db YZ |
1066 | merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL); |
1067 | if (!merge_info) { | |
1068 | err = -ENOMEM; | |
1069 | goto err_remove_rhash; | |
1070 | } | |
1071 | merge_info->parent_ctx = parent_ctx; | |
1072 | err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, | |
1073 | merge_table_params); | |
1074 | if (err) | |
1075 | goto err_destroy_merge_info; | |
1076 | ||
8af56f40 JH |
1077 | err = nfp_flower_xmit_flow(app, merge_flow, |
1078 | NFP_FLOWER_CMSG_TYPE_FLOW_MOD); | |
1079 | if (err) | |
2ea538db | 1080 | goto err_remove_merge_info; |
8af56f40 JH |
1081 | |
1082 | merge_flow->in_hw = true; | |
1083 | sub_flow1->in_hw = false; | |
1084 | ||
1085 | return 0; | |
1086 | ||
2ea538db YZ |
1087 | err_remove_merge_info: |
1088 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, | |
1089 | &merge_info->ht_node, | |
1090 | merge_table_params)); | |
1091 | err_destroy_merge_info: | |
1092 | kfree(merge_info); | |
8af56f40 JH |
1093 | err_remove_rhash: |
1094 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, | |
1095 | &merge_flow->fl_node, | |
1096 | nfp_flower_table_params)); | |
1097 | err_release_metadata: | |
1098 | nfp_modify_flow_metadata(app, merge_flow); | |
1099 | err_unlink_sub_flow2: | |
aa6ce2ea JH |
1100 | nfp_flower_unlink_flows(merge_flow, sub_flow2); |
1101 | err_unlink_sub_flow1: | |
1102 | nfp_flower_unlink_flows(merge_flow, sub_flow1); | |
1c6952ca JH |
1103 | err_destroy_merge_flow: |
1104 | kfree(merge_flow->action_data); | |
1105 | kfree(merge_flow->mask_data); | |
1106 | kfree(merge_flow->unmasked_data); | |
1107 | kfree(merge_flow); | |
1108 | return err; | |
dbc2d68e JH |
1109 | } |
1110 | ||
f5c977ee JH |
1111 | /** |
1112 | * nfp_flower_validate_pre_tun_rule() | |
1113 | * @app: Pointer to the APP handle | |
1114 | * @flow: Pointer to NFP flow representation of rule | |
0d630f58 | 1115 | * @key_ls: Pointer to NFP key layers structure |
f5c977ee JH |
1116 | * @extack: Netlink extended ACK report |
1117 | * | |
1118 | * Verifies the flow as a pre-tunnel rule. | |
1119 | * | |
1120 | * Return: negative value on error, 0 if verified. | |
1121 | */ | |
1122 | static int | |
1123 | nfp_flower_validate_pre_tun_rule(struct nfp_app *app, | |
1124 | struct nfp_fl_payload *flow, | |
0d630f58 | 1125 | struct nfp_fl_key_ls *key_ls, |
f5c977ee JH |
1126 | struct netlink_ext_ack *extack) |
1127 | { | |
0d630f58 | 1128 | struct nfp_flower_priv *priv = app->priv; |
120ffd84 JH |
1129 | struct nfp_flower_meta_tci *meta_tci; |
1130 | struct nfp_flower_mac_mpls *mac; | |
0d630f58 | 1131 | u8 *ext = flow->unmasked_data; |
120ffd84 JH |
1132 | struct nfp_fl_act_head *act; |
1133 | u8 *mask = flow->mask_data; | |
1134 | bool vlan = false; | |
1135 | int act_offset; | |
1136 | u8 key_layer; | |
1137 | ||
1138 | meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; | |
0d630f58 LP |
1139 | key_layer = key_ls->key_layer; |
1140 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { | |
1141 | if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { | |
1142 | u16 vlan_tci = be16_to_cpu(meta_tci->tci); | |
1143 | ||
1144 | vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; | |
1145 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); | |
1146 | vlan = true; | |
1147 | } else { | |
1148 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); | |
1149 | } | |
120ffd84 JH |
1150 | } |
1151 | ||
120ffd84 JH |
1152 | if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { |
1153 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); | |
1154 | return -EOPNOTSUPP; | |
0d630f58 LP |
1155 | } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { |
1156 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); | |
1157 | return -EOPNOTSUPP; | |
120ffd84 JH |
1158 | } |
1159 | ||
1160 | if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { | |
1161 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); | |
1162 | return -EOPNOTSUPP; | |
1163 | } | |
1164 | ||
982e5ee2 LP |
1165 | if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && |
1166 | !(key_layer & NFP_FLOWER_LAYER_IPV6)) { | |
1167 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); | |
1168 | return -EOPNOTSUPP; | |
1169 | } | |
1170 | ||
38fc158e LP |
1171 | if (key_layer & NFP_FLOWER_LAYER_IPV6) |
1172 | flow->pre_tun_rule.is_ipv6 = true; | |
1173 | else | |
1174 | flow->pre_tun_rule.is_ipv6 = false; | |
1175 | ||
120ffd84 JH |
1176 | /* Skip fields known to exist. */ |
1177 | mask += sizeof(struct nfp_flower_meta_tci); | |
0d630f58 LP |
1178 | ext += sizeof(struct nfp_flower_meta_tci); |
1179 | if (key_ls->key_layer_two) { | |
1180 | mask += sizeof(struct nfp_flower_ext_meta); | |
1181 | ext += sizeof(struct nfp_flower_ext_meta); | |
1182 | } | |
120ffd84 | 1183 | mask += sizeof(struct nfp_flower_in_port); |
0d630f58 | 1184 | ext += sizeof(struct nfp_flower_in_port); |
120ffd84 JH |
1185 | |
1186 | /* Ensure destination MAC address is fully matched. */ | |
1187 | mac = (struct nfp_flower_mac_mpls *)mask; | |
1188 | if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { | |
1189 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); | |
1190 | return -EOPNOTSUPP; | |
1191 | } | |
1192 | ||
38fc158e LP |
1193 | /* Ensure source MAC address is fully matched. This is only needed |
1194 | * for firmware with the DECAP_V2 feature enabled. Don't do this | |
1195 | * for firmware without this feature to keep old behaviour. | |
1196 | */ | |
1197 | if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { | |
1198 | mac = (struct nfp_flower_mac_mpls *)mask; | |
1199 | if (!is_broadcast_ether_addr(&mac->mac_src[0])) { | |
1200 | NL_SET_ERR_MSG_MOD(extack, | |
1201 | "unsupported pre-tunnel rule: source MAC field must not be masked"); | |
1202 | return -EOPNOTSUPP; | |
1203 | } | |
1204 | } | |
1205 | ||
982e5ee2 LP |
1206 | if (mac->mpls_lse) { |
1207 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); | |
1208 | return -EOPNOTSUPP; | |
1209 | } | |
1210 | ||
38fc158e LP |
1211 | /* Ensure destination MAC address matches pre_tun_dev. */ |
1212 | mac = (struct nfp_flower_mac_mpls *)ext; | |
1213 | if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { | |
1214 | NL_SET_ERR_MSG_MOD(extack, | |
1215 | "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); | |
1216 | return -EOPNOTSUPP; | |
1217 | } | |
1218 | ||
1219 | /* Save mac addresses in pre_tun_rule entry for later use */ | |
1220 | memcpy(&flow->pre_tun_rule.loc_mac, &mac->mac_dst[0], ETH_ALEN); | |
1221 | memcpy(&flow->pre_tun_rule.rem_mac, &mac->mac_src[0], ETH_ALEN); | |
1222 | ||
0d630f58 LP |
1223 | mask += sizeof(struct nfp_flower_mac_mpls); |
1224 | ext += sizeof(struct nfp_flower_mac_mpls); | |
78346160 JH |
1225 | if (key_layer & NFP_FLOWER_LAYER_IPV4 || |
1226 | key_layer & NFP_FLOWER_LAYER_IPV6) { | |
1227 | /* Flags and proto fields have same offset in IPv4 and IPv6. */ | |
120ffd84 JH |
1228 | int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); |
1229 | int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); | |
78346160 | 1230 | int size; |
120ffd84 JH |
1231 | int i; |
1232 | ||
78346160 JH |
1233 | size = key_layer & NFP_FLOWER_LAYER_IPV4 ? |
1234 | sizeof(struct nfp_flower_ipv4) : | |
1235 | sizeof(struct nfp_flower_ipv6); | |
1236 | ||
120ffd84 JH |
1237 | |
1238 | /* Ensure proto and flags are the only IP layer fields. */ | |
78346160 | 1239 | for (i = 0; i < size; i++) |
120ffd84 JH |
1240 | if (mask[i] && i != ip_flags && i != ip_proto) { |
1241 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); | |
1242 | return -EOPNOTSUPP; | |
1243 | } | |
0d630f58 LP |
1244 | ext += size; |
1245 | mask += size; | |
1246 | } | |
1247 | ||
1248 | if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { | |
1249 | if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { | |
1250 | struct nfp_flower_vlan *vlan_tags; | |
38fc158e | 1251 | u16 vlan_tpid; |
0d630f58 LP |
1252 | u16 vlan_tci; |
1253 | ||
1254 | vlan_tags = (struct nfp_flower_vlan *)ext; | |
1255 | ||
1256 | vlan_tci = be16_to_cpu(vlan_tags->outer_tci); | |
38fc158e | 1257 | vlan_tpid = be16_to_cpu(vlan_tags->outer_tpid); |
0d630f58 LP |
1258 | |
1259 | vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; | |
1260 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); | |
38fc158e | 1261 | flow->pre_tun_rule.vlan_tpid = cpu_to_be16(vlan_tpid); |
0d630f58 LP |
1262 | vlan = true; |
1263 | } else { | |
1264 | flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); | |
38fc158e | 1265 | flow->pre_tun_rule.vlan_tpid = cpu_to_be16(0xffff); |
0d630f58 | 1266 | } |
120ffd84 JH |
1267 | } |
1268 | ||
1269 | /* Action must be a single egress or pop_vlan and egress. */ | |
1270 | act_offset = 0; | |
1271 | act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; | |
1272 | if (vlan) { | |
1273 | if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { | |
1274 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); | |
1275 | return -EOPNOTSUPP; | |
1276 | } | |
1277 | ||
1278 | act_offset += act->len_lw << NFP_FL_LW_SIZ; | |
1279 | act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; | |
1280 | } | |
1281 | ||
1282 | if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { | |
1283 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); | |
1284 | return -EOPNOTSUPP; | |
1285 | } | |
1286 | ||
1287 | act_offset += act->len_lw << NFP_FL_LW_SIZ; | |
1288 | ||
1289 | /* Ensure there are no more actions after egress. */ | |
1290 | if (act_offset != flow->meta.act_len) { | |
1291 | NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); | |
1292 | return -EOPNOTSUPP; | |
1293 | } | |
1294 | ||
1295 | return 0; | |
f5c977ee JH |
1296 | } |
1297 | ||
2bda0a5e LP |
1298 | static bool offload_pre_check(struct flow_cls_offload *flow) |
1299 | { | |
1300 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow); | |
1301 | struct flow_dissector *dissector = rule->match.dissector; | |
ff763011 | 1302 | struct flow_match_ct ct; |
2bda0a5e | 1303 | |
2b3082c6 | 1304 | if (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CT)) { |
ff763011 WG |
1305 | flow_rule_match_ct(rule, &ct); |
1306 | /* Allow special case where CT match is all 0 */ | |
1307 | if (memchr_inv(ct.key, 0, sizeof(*ct.key))) | |
1308 | return false; | |
1309 | } | |
2bda0a5e LP |
1310 | |
1311 | if (flow->common.chain_index) | |
1312 | return false; | |
1313 | ||
1314 | return true; | |
1315 | } | |
1316 | ||
8a276873 PJV |
1317 | /** |
1318 | * nfp_flower_add_offload() - Adds a new flow to hardware. | |
1319 | * @app: Pointer to the APP handle | |
1320 | * @netdev: netdev structure. | |
1321 | * @flow: TC flower classifier offload structure. | |
1322 | * | |
1323 | * Adds a new flow to the repeated hash structure and action payload. | |
1324 | * | |
1325 | * Return: negative value on error, 0 if configured successfully. | |
1326 | */ | |
1327 | static int | |
1328 | nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, | |
f9e30088 | 1329 | struct flow_cls_offload *flow) |
8a276873 | 1330 | { |
4b15fb18 | 1331 | struct flow_rule *rule = flow_cls_offload_flow_rule(flow); |
bedeca15 | 1332 | enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; |
43f84b72 | 1333 | struct nfp_flower_priv *priv = app->priv; |
14179c4b | 1334 | struct netlink_ext_ack *extack = NULL; |
af9d842c PJV |
1335 | struct nfp_fl_payload *flow_pay; |
1336 | struct nfp_fl_key_ls *key_layer; | |
7885b4fc | 1337 | struct nfp_port *port = NULL; |
af9d842c PJV |
1338 | int err; |
1339 | ||
14179c4b | 1340 | extack = flow->common.extack; |
7885b4fc JH |
1341 | if (nfp_netdev_is_nfp_repr(netdev)) |
1342 | port = nfp_port_from_netdev(netdev); | |
1343 | ||
c8b034fb | 1344 | if (is_pre_ct_flow(flow)) |
a87ceb3d | 1345 | return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack, NULL); |
c8b034fb LP |
1346 | |
1347 | if (is_post_ct_flow(flow)) | |
1348 | return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack); | |
1349 | ||
2bda0a5e LP |
1350 | if (!offload_pre_check(flow)) |
1351 | return -EOPNOTSUPP; | |
1352 | ||
af9d842c PJV |
1353 | key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); |
1354 | if (!key_layer) | |
1355 | return -ENOMEM; | |
1356 | ||
4b15fb18 | 1357 | err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule, |
14179c4b | 1358 | &tun_type, extack); |
af9d842c PJV |
1359 | if (err) |
1360 | goto err_free_key_ls; | |
1361 | ||
4f63fde3 | 1362 | flow_pay = nfp_flower_allocate_new(key_layer); |
af9d842c PJV |
1363 | if (!flow_pay) { |
1364 | err = -ENOMEM; | |
1365 | goto err_free_key_ls; | |
1366 | } | |
1367 | ||
4b15fb18 | 1368 | err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev, |
bef6e97d | 1369 | flow_pay, tun_type, extack); |
5571e8c9 PJV |
1370 | if (err) |
1371 | goto err_destroy_flow; | |
1372 | ||
e75dc265 | 1373 | err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack); |
1a1e586f PJV |
1374 | if (err) |
1375 | goto err_destroy_flow; | |
1376 | ||
f5c977ee | 1377 | if (flow_pay->pre_tun_rule.dev) { |
0d630f58 | 1378 | err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack); |
f5c977ee JH |
1379 | if (err) |
1380 | goto err_destroy_flow; | |
1381 | } | |
1382 | ||
4b15fb18 | 1383 | err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack); |
43f84b72 PJV |
1384 | if (err) |
1385 | goto err_destroy_flow; | |
1386 | ||
43f84b72 | 1387 | flow_pay->tc_flower_cookie = flow->cookie; |
c01d0efa PJV |
1388 | err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, |
1389 | nfp_flower_table_params); | |
14179c4b PJV |
1390 | if (err) { |
1391 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads"); | |
11664948 | 1392 | goto err_release_metadata; |
14179c4b | 1393 | } |
c01d0efa | 1394 | |
e30b2b68 LP |
1395 | if (flow_pay->pre_tun_rule.dev) { |
1396 | if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { | |
1397 | struct nfp_predt_entry *predt; | |
1398 | ||
1399 | predt = kzalloc(sizeof(*predt), GFP_KERNEL); | |
1400 | if (!predt) { | |
1401 | err = -ENOMEM; | |
1402 | goto err_remove_rhash; | |
1403 | } | |
1404 | predt->flow_pay = flow_pay; | |
1405 | INIT_LIST_HEAD(&predt->nn_list); | |
1406 | spin_lock_bh(&priv->predt_lock); | |
1407 | list_add(&predt->list_head, &priv->predt_list); | |
e30b2b68 | 1408 | flow_pay->pre_tun_rule.predt = predt; |
591c90a1 LP |
1409 | nfp_tun_link_and_update_nn_entries(app, predt); |
1410 | spin_unlock_bh(&priv->predt_lock); | |
e30b2b68 LP |
1411 | } else { |
1412 | err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); | |
1413 | } | |
1414 | } else { | |
120ffd84 JH |
1415 | err = nfp_flower_xmit_flow(app, flow_pay, |
1416 | NFP_FLOWER_CMSG_TYPE_FLOW_ADD); | |
e30b2b68 LP |
1417 | } |
1418 | ||
b5f0cf08 JH |
1419 | if (err) |
1420 | goto err_remove_rhash; | |
c01d0efa | 1421 | |
7885b4fc JH |
1422 | if (port) |
1423 | port->tc_offload_cnt++; | |
43f84b72 | 1424 | |
8af56f40 JH |
1425 | flow_pay->in_hw = true; |
1426 | ||
43f84b72 PJV |
1427 | /* Deallocate flow payload when flower rule has been destroyed. */ |
1428 | kfree(key_layer); | |
1429 | ||
1430 | return 0; | |
af9d842c | 1431 | |
b5f0cf08 JH |
1432 | err_remove_rhash: |
1433 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, | |
1434 | &flow_pay->fl_node, | |
1435 | nfp_flower_table_params)); | |
11664948 JH |
1436 | err_release_metadata: |
1437 | nfp_modify_flow_metadata(app, flow_pay); | |
5571e8c9 | 1438 | err_destroy_flow: |
cfa18993 JH |
1439 | if (flow_pay->nfp_tun_ipv6) |
1440 | nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); | |
1a1e586f | 1441 | kfree(flow_pay->action_data); |
af9d842c PJV |
1442 | kfree(flow_pay->mask_data); |
1443 | kfree(flow_pay->unmasked_data); | |
1444 | kfree(flow_pay); | |
af9d842c PJV |
1445 | err_free_key_ls: |
1446 | kfree(key_layer); | |
1447 | return err; | |
8a276873 PJV |
1448 | } |
1449 | ||
8af56f40 JH |
1450 | static void |
1451 | nfp_flower_remove_merge_flow(struct nfp_app *app, | |
1452 | struct nfp_fl_payload *del_sub_flow, | |
1453 | struct nfp_fl_payload *merge_flow) | |
1454 | { | |
1455 | struct nfp_flower_priv *priv = app->priv; | |
1456 | struct nfp_fl_payload_link *link, *temp; | |
2ea538db | 1457 | struct nfp_merge_info *merge_info; |
8af56f40 | 1458 | struct nfp_fl_payload *origin; |
2ea538db | 1459 | u64 parent_ctx = 0; |
8af56f40 JH |
1460 | bool mod = false; |
1461 | int err; | |
1462 | ||
1463 | link = list_first_entry(&merge_flow->linked_flows, | |
1464 | struct nfp_fl_payload_link, merge_flow.list); | |
1465 | origin = link->sub_flow.flow; | |
1466 | ||
1467 | /* Re-add rule the merge had overwritten if it has not been deleted. */ | |
1468 | if (origin != del_sub_flow) | |
1469 | mod = true; | |
1470 | ||
1471 | err = nfp_modify_flow_metadata(app, merge_flow); | |
1472 | if (err) { | |
1473 | nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n"); | |
1474 | goto err_free_links; | |
1475 | } | |
1476 | ||
1477 | if (!mod) { | |
1478 | err = nfp_flower_xmit_flow(app, merge_flow, | |
1479 | NFP_FLOWER_CMSG_TYPE_FLOW_DEL); | |
1480 | if (err) { | |
1481 | nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n"); | |
1482 | goto err_free_links; | |
1483 | } | |
1484 | } else { | |
1485 | __nfp_modify_flow_metadata(priv, origin); | |
1486 | err = nfp_flower_xmit_flow(app, origin, | |
1487 | NFP_FLOWER_CMSG_TYPE_FLOW_MOD); | |
1488 | if (err) | |
1489 | nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n"); | |
1490 | origin->in_hw = true; | |
1491 | } | |
1492 | ||
1493 | err_free_links: | |
1494 | /* Clean any links connected with the merged flow. */ | |
1495 | list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, | |
2ea538db YZ |
1496 | merge_flow.list) { |
1497 | u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); | |
1498 | ||
1499 | parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); | |
8af56f40 | 1500 | nfp_flower_unlink_flow(link); |
2ea538db YZ |
1501 | } |
1502 | ||
1503 | merge_info = rhashtable_lookup_fast(&priv->merge_table, | |
1504 | &parent_ctx, | |
1505 | merge_table_params); | |
1506 | if (merge_info) { | |
1507 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, | |
1508 | &merge_info->ht_node, | |
1509 | merge_table_params)); | |
1510 | kfree(merge_info); | |
1511 | } | |
8af56f40 JH |
1512 | |
1513 | kfree(merge_flow->action_data); | |
1514 | kfree(merge_flow->mask_data); | |
1515 | kfree(merge_flow->unmasked_data); | |
1516 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, | |
1517 | &merge_flow->fl_node, | |
1518 | nfp_flower_table_params)); | |
1519 | kfree_rcu(merge_flow, rcu); | |
1520 | } | |
1521 | ||
453cdc30 | 1522 | void |
8af56f40 JH |
1523 | nfp_flower_del_linked_merge_flows(struct nfp_app *app, |
1524 | struct nfp_fl_payload *sub_flow) | |
1525 | { | |
1526 | struct nfp_fl_payload_link *link, *temp; | |
1527 | ||
1528 | /* Remove any merge flow formed from the deleted sub_flow. */ | |
1529 | list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, | |
1530 | sub_flow.list) | |
1531 | nfp_flower_remove_merge_flow(app, sub_flow, | |
1532 | link->merge_flow.flow); | |
1533 | } | |
1534 | ||
8a276873 PJV |
1535 | /** |
1536 | * nfp_flower_del_offload() - Removes a flow from hardware. | |
1537 | * @app: Pointer to the APP handle | |
1538 | * @netdev: netdev structure. | |
1539 | * @flow: TC flower classifier offload structure | |
1540 | * | |
1541 | * Removes a flow from the repeated hash structure and clears the | |
8af56f40 | 1542 | * action payload. Any flows merged from this are also deleted. |
8a276873 PJV |
1543 | * |
1544 | * Return: negative value on error, 0 if removed successfully. | |
1545 | */ | |
1546 | static int | |
1547 | nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, | |
f9e30088 | 1548 | struct flow_cls_offload *flow) |
8a276873 | 1549 | { |
c01d0efa | 1550 | struct nfp_flower_priv *priv = app->priv; |
d33d24a7 | 1551 | struct nfp_fl_ct_map_entry *ct_map_ent; |
14179c4b | 1552 | struct netlink_ext_ack *extack = NULL; |
43f84b72 | 1553 | struct nfp_fl_payload *nfp_flow; |
7885b4fc | 1554 | struct nfp_port *port = NULL; |
43f84b72 PJV |
1555 | int err; |
1556 | ||
14179c4b | 1557 | extack = flow->common.extack; |
7885b4fc JH |
1558 | if (nfp_netdev_is_nfp_repr(netdev)) |
1559 | port = nfp_port_from_netdev(netdev); | |
1560 | ||
d33d24a7 LP |
1561 | /* Check ct_map_table */ |
1562 | ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, | |
1563 | nfp_ct_map_params); | |
1564 | if (ct_map_ent) { | |
1565 | err = nfp_fl_ct_del_flow(ct_map_ent); | |
1566 | return err; | |
1567 | } | |
1568 | ||
d4b69bad | 1569 | nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); |
14179c4b PJV |
1570 | if (!nfp_flow) { |
1571 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist"); | |
4f63fde3 | 1572 | return -ENOENT; |
14179c4b | 1573 | } |
43f84b72 PJV |
1574 | |
1575 | err = nfp_modify_flow_metadata(app, nfp_flow); | |
81f3ddf2 | 1576 | if (err) |
8af56f40 | 1577 | goto err_free_merge_flow; |
43f84b72 | 1578 | |
2d9ad71a JH |
1579 | if (nfp_flow->nfp_tun_ipv4_addr) |
1580 | nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); | |
1581 | ||
cfa18993 JH |
1582 | if (nfp_flow->nfp_tun_ipv6) |
1583 | nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6); | |
1584 | ||
8af56f40 JH |
1585 | if (!nfp_flow->in_hw) { |
1586 | err = 0; | |
1587 | goto err_free_merge_flow; | |
1588 | } | |
1589 | ||
e30b2b68 LP |
1590 | if (nfp_flow->pre_tun_rule.dev) { |
1591 | if (priv->flower_ext_feats & NFP_FL_FEATS_DECAP_V2) { | |
1592 | struct nfp_predt_entry *predt; | |
1593 | ||
1594 | predt = nfp_flow->pre_tun_rule.predt; | |
1595 | if (predt) { | |
1596 | spin_lock_bh(&priv->predt_lock); | |
591c90a1 | 1597 | nfp_tun_unlink_and_update_nn_entries(app, predt); |
e30b2b68 LP |
1598 | list_del(&predt->list_head); |
1599 | spin_unlock_bh(&priv->predt_lock); | |
1600 | kfree(predt); | |
1601 | } | |
1602 | } else { | |
1603 | err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); | |
1604 | } | |
1605 | } else { | |
120ffd84 JH |
1606 | err = nfp_flower_xmit_flow(app, nfp_flow, |
1607 | NFP_FLOWER_CMSG_TYPE_FLOW_DEL); | |
e30b2b68 | 1608 | } |
8af56f40 | 1609 | /* Fall through on error. */ |
81f3ddf2 | 1610 | |
8af56f40 JH |
1611 | err_free_merge_flow: |
1612 | nfp_flower_del_linked_merge_flows(app, nfp_flow); | |
7885b4fc JH |
1613 | if (port) |
1614 | port->tc_offload_cnt--; | |
43f84b72 PJV |
1615 | kfree(nfp_flow->action_data); |
1616 | kfree(nfp_flow->mask_data); | |
1617 | kfree(nfp_flow->unmasked_data); | |
c01d0efa PJV |
1618 | WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
1619 | &nfp_flow->fl_node, | |
1620 | nfp_flower_table_params)); | |
43f84b72 PJV |
1621 | kfree_rcu(nfp_flow, rcu); |
1622 | return err; | |
8a276873 PJV |
1623 | } |
1624 | ||
aa6ce2ea JH |
1625 | static void |
1626 | __nfp_flower_update_merge_stats(struct nfp_app *app, | |
1627 | struct nfp_fl_payload *merge_flow) | |
1628 | { | |
1629 | struct nfp_flower_priv *priv = app->priv; | |
1630 | struct nfp_fl_payload_link *link; | |
1631 | struct nfp_fl_payload *sub_flow; | |
1632 | u64 pkts, bytes, used; | |
1633 | u32 ctx_id; | |
1634 | ||
1635 | ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); | |
1636 | pkts = priv->stats[ctx_id].pkts; | |
1637 | /* Do not cycle subflows if no stats to distribute. */ | |
1638 | if (!pkts) | |
1639 | return; | |
1640 | bytes = priv->stats[ctx_id].bytes; | |
1641 | used = priv->stats[ctx_id].used; | |
1642 | ||
1643 | /* Reset stats for the merge flow. */ | |
1644 | priv->stats[ctx_id].pkts = 0; | |
1645 | priv->stats[ctx_id].bytes = 0; | |
1646 | ||
1647 | /* The merge flow has received stats updates from firmware. | |
1648 | * Distribute these stats to all subflows that form the merge. | |
1649 | * The stats will collected from TC via the subflows. | |
1650 | */ | |
1651 | list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { | |
1652 | sub_flow = link->sub_flow.flow; | |
1653 | ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); | |
1654 | priv->stats[ctx_id].pkts += pkts; | |
1655 | priv->stats[ctx_id].bytes += bytes; | |
5b186cd6 HK |
1656 | priv->stats[ctx_id].used = max_t(u64, used, |
1657 | priv->stats[ctx_id].used); | |
aa6ce2ea JH |
1658 | } |
1659 | } | |
1660 | ||
40c10bd9 | 1661 | void |
aa6ce2ea JH |
1662 | nfp_flower_update_merge_stats(struct nfp_app *app, |
1663 | struct nfp_fl_payload *sub_flow) | |
1664 | { | |
1665 | struct nfp_fl_payload_link *link; | |
1666 | ||
1667 | /* Get merge flows that the subflow forms to distribute their stats. */ | |
1668 | list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) | |
1669 | __nfp_flower_update_merge_stats(app, link->merge_flow.flow); | |
1670 | } | |
1671 | ||
8a276873 PJV |
1672 | /** |
1673 | * nfp_flower_get_stats() - Populates flow stats obtained from hardware. | |
1674 | * @app: Pointer to the APP handle | |
54a4a034 | 1675 | * @netdev: Netdev structure. |
8a276873 PJV |
1676 | * @flow: TC flower classifier offload structure |
1677 | * | |
1678 | * Populates a flow statistics structure which which corresponds to a | |
1679 | * specific flow. | |
1680 | * | |
1681 | * Return: negative value on error, 0 if stats populated successfully. | |
1682 | */ | |
1683 | static int | |
54a4a034 | 1684 | nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, |
f9e30088 | 1685 | struct flow_cls_offload *flow) |
8a276873 | 1686 | { |
7fade107 | 1687 | struct nfp_flower_priv *priv = app->priv; |
40c10bd9 | 1688 | struct nfp_fl_ct_map_entry *ct_map_ent; |
14179c4b | 1689 | struct netlink_ext_ack *extack = NULL; |
abfcdc1d | 1690 | struct nfp_fl_payload *nfp_flow; |
7fade107 | 1691 | u32 ctx_id; |
abfcdc1d | 1692 | |
40c10bd9 LP |
1693 | /* Check ct_map table first */ |
1694 | ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, | |
1695 | nfp_ct_map_params); | |
1696 | if (ct_map_ent) | |
1697 | return nfp_fl_ct_stats(flow, ct_map_ent); | |
1698 | ||
14179c4b | 1699 | extack = flow->common.extack; |
d4b69bad | 1700 | nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); |
14179c4b PJV |
1701 | if (!nfp_flow) { |
1702 | NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist"); | |
abfcdc1d | 1703 | return -EINVAL; |
14179c4b | 1704 | } |
abfcdc1d | 1705 | |
7fade107 PJV |
1706 | ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); |
1707 | ||
1708 | spin_lock_bh(&priv->stats_lock); | |
aa6ce2ea JH |
1709 | /* If request is for a sub_flow, update stats from merged flows. */ |
1710 | if (!list_empty(&nfp_flow->linked_flows)) | |
1711 | nfp_flower_update_merge_stats(app, nfp_flow); | |
1712 | ||
3b1903ef | 1713 | flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, |
4b61d3e8 | 1714 | priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, |
93a129eb | 1715 | FLOW_ACTION_HW_STATS_DELAYED); |
abfcdc1d | 1716 | |
7fade107 PJV |
1717 | priv->stats[ctx_id].pkts = 0; |
1718 | priv->stats[ctx_id].bytes = 0; | |
1719 | spin_unlock_bh(&priv->stats_lock); | |
abfcdc1d PJV |
1720 | |
1721 | return 0; | |
8a276873 PJV |
1722 | } |
1723 | ||
1724 | static int | |
1725 | nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, | |
f9e30088 | 1726 | struct flow_cls_offload *flower) |
8a276873 | 1727 | { |
14690995 YL |
1728 | struct nfp_flower_priv *priv = app->priv; |
1729 | int ret; | |
1730 | ||
3107fdc8 | 1731 | if (!eth_proto_is_802_3(flower->common.protocol)) |
363fc53b JP |
1732 | return -EOPNOTSUPP; |
1733 | ||
14690995 | 1734 | mutex_lock(&priv->nfp_fl_lock); |
8a276873 | 1735 | switch (flower->command) { |
f9e30088 | 1736 | case FLOW_CLS_REPLACE: |
14690995 YL |
1737 | ret = nfp_flower_add_offload(app, netdev, flower); |
1738 | break; | |
f9e30088 | 1739 | case FLOW_CLS_DESTROY: |
14690995 YL |
1740 | ret = nfp_flower_del_offload(app, netdev, flower); |
1741 | break; | |
f9e30088 | 1742 | case FLOW_CLS_STATS: |
14690995 YL |
1743 | ret = nfp_flower_get_stats(app, netdev, flower); |
1744 | break; | |
0115552e | 1745 | default: |
14690995 YL |
1746 | ret = -EOPNOTSUPP; |
1747 | break; | |
0115552e | 1748 | } |
14690995 YL |
1749 | mutex_unlock(&priv->nfp_fl_lock); |
1750 | ||
1751 | return ret; | |
1a24d4f9 JH |
1752 | } |
1753 | ||
363fc53b JP |
1754 | static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, |
1755 | void *type_data, void *cb_priv) | |
1756 | { | |
2bda0a5e | 1757 | struct flow_cls_common_offload *common = type_data; |
2df7b2d2 | 1758 | struct nfp_repr *repr = cb_priv; |
363fc53b | 1759 | |
2bda0a5e | 1760 | if (!tc_can_offload_extack(repr->netdev, common->extack)) |
44ae12a7 JP |
1761 | return -EOPNOTSUPP; |
1762 | ||
363fc53b JP |
1763 | switch (type) { |
1764 | case TC_SETUP_CLSFLOWER: | |
2df7b2d2 | 1765 | return nfp_flower_repr_offload(repr->app, repr->netdev, |
4f63fde3 | 1766 | type_data); |
b66d035e PJV |
1767 | case TC_SETUP_CLSMATCHALL: |
1768 | return nfp_flower_setup_qos_offload(repr->app, repr->netdev, | |
1769 | type_data); | |
363fc53b JP |
1770 | default: |
1771 | return -EOPNOTSUPP; | |
1772 | } | |
1773 | } | |
1774 | ||
955bcb6e PNA |
1775 | static LIST_HEAD(nfp_block_cb_list); |
1776 | ||
363fc53b | 1777 | static int nfp_flower_setup_tc_block(struct net_device *netdev, |
955bcb6e | 1778 | struct flow_block_offload *f) |
8a276873 | 1779 | { |
2df7b2d2 | 1780 | struct nfp_repr *repr = netdev_priv(netdev); |
d6787147 | 1781 | struct nfp_flower_repr_priv *repr_priv; |
955bcb6e | 1782 | struct flow_block_cb *block_cb; |
5fd9fc4e | 1783 | |
32f8c409 | 1784 | if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
8a276873 PJV |
1785 | return -EOPNOTSUPP; |
1786 | ||
d6787147 | 1787 | repr_priv = repr->app_priv; |
955bcb6e PNA |
1788 | repr_priv->block_shared = f->block_shared; |
1789 | f->driver_block_list = &nfp_block_cb_list; | |
14690995 | 1790 | f->unlocked_driver_cb = true; |
d6787147 | 1791 | |
363fc53b | 1792 | switch (f->command) { |
9c0e189e | 1793 | case FLOW_BLOCK_BIND: |
0d4fd02e PNA |
1794 | if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr, |
1795 | &nfp_block_cb_list)) | |
1796 | return -EBUSY; | |
1797 | ||
0c7294dd | 1798 | block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, |
955bcb6e PNA |
1799 | repr, repr, NULL); |
1800 | if (IS_ERR(block_cb)) | |
1801 | return PTR_ERR(block_cb); | |
1802 | ||
1803 | flow_block_cb_add(block_cb, f); | |
1804 | list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); | |
1805 | return 0; | |
9c0e189e | 1806 | case FLOW_BLOCK_UNBIND: |
14bfb13f PNA |
1807 | block_cb = flow_block_cb_lookup(f->block, |
1808 | nfp_flower_setup_tc_block_cb, | |
955bcb6e PNA |
1809 | repr); |
1810 | if (!block_cb) | |
1811 | return -ENOENT; | |
1812 | ||
1813 | flow_block_cb_remove(block_cb, f); | |
1814 | list_del(&block_cb->driver_list); | |
363fc53b JP |
1815 | return 0; |
1816 | default: | |
1817 | return -EOPNOTSUPP; | |
1818 | } | |
1819 | } | |
1820 | ||
1821 | int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, | |
1822 | enum tc_setup_type type, void *type_data) | |
1823 | { | |
1824 | switch (type) { | |
363fc53b JP |
1825 | case TC_SETUP_BLOCK: |
1826 | return nfp_flower_setup_tc_block(netdev, type_data); | |
1827 | default: | |
1828 | return -EOPNOTSUPP; | |
1829 | } | |
8a276873 | 1830 | } |
3166dd07 JH |
1831 | |
1832 | struct nfp_flower_indr_block_cb_priv { | |
1833 | struct net_device *netdev; | |
1834 | struct nfp_app *app; | |
1835 | struct list_head list; | |
1836 | }; | |
1837 | ||
1838 | static struct nfp_flower_indr_block_cb_priv * | |
1839 | nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, | |
1840 | struct net_device *netdev) | |
1841 | { | |
1842 | struct nfp_flower_indr_block_cb_priv *cb_priv; | |
1843 | struct nfp_flower_priv *priv = app->priv; | |
1844 | ||
3166dd07 JH |
1845 | list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) |
1846 | if (cb_priv->netdev == netdev) | |
1847 | return cb_priv; | |
1848 | ||
1849 | return NULL; | |
1850 | } | |
1851 | ||
a1db2178 | 1852 | static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, |
1853 | void *type_data, void *cb_priv) | |
3166dd07 JH |
1854 | { |
1855 | struct nfp_flower_indr_block_cb_priv *priv = cb_priv; | |
3166dd07 JH |
1856 | |
1857 | switch (type) { | |
1858 | case TC_SETUP_CLSFLOWER: | |
1859 | return nfp_flower_repr_offload(priv->app, priv->netdev, | |
4f63fde3 | 1860 | type_data); |
3166dd07 JH |
1861 | default: |
1862 | return -EOPNOTSUPP; | |
1863 | } | |
1864 | } | |
1865 | ||
a1db2178 | 1866 | void nfp_flower_setup_indr_tc_release(void *cb_priv) |
955bcb6e PNA |
1867 | { |
1868 | struct nfp_flower_indr_block_cb_priv *priv = cb_priv; | |
1869 | ||
1870 | list_del(&priv->list); | |
1871 | kfree(priv); | |
1872 | } | |
1873 | ||
3166dd07 | 1874 | static int |
c40f4e50 | 1875 | nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, |
66f1939a | 1876 | struct flow_block_offload *f, void *data, |
1877 | void (*cleanup)(struct flow_block_cb *block_cb)) | |
3166dd07 JH |
1878 | { |
1879 | struct nfp_flower_indr_block_cb_priv *cb_priv; | |
1880 | struct nfp_flower_priv *priv = app->priv; | |
955bcb6e | 1881 | struct flow_block_cb *block_cb; |
3166dd07 | 1882 | |
739d7c57 JH |
1883 | if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
1884 | !nfp_flower_internal_port_can_offload(app, netdev)) || | |
1885 | (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && | |
1886 | nfp_flower_internal_port_can_offload(app, netdev))) | |
3166dd07 JH |
1887 | return -EOPNOTSUPP; |
1888 | ||
14690995 YL |
1889 | f->unlocked_driver_cb = true; |
1890 | ||
3166dd07 | 1891 | switch (f->command) { |
9c0e189e | 1892 | case FLOW_BLOCK_BIND: |
1edfb8ed VB |
1893 | cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); |
1894 | if (cb_priv && | |
1895 | flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb, | |
1896 | cb_priv, | |
1897 | &nfp_block_cb_list)) | |
1898 | return -EBUSY; | |
1899 | ||
3166dd07 JH |
1900 | cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); |
1901 | if (!cb_priv) | |
1902 | return -ENOMEM; | |
1903 | ||
1904 | cb_priv->netdev = netdev; | |
1905 | cb_priv->app = app; | |
1906 | list_add(&cb_priv->list, &priv->indr_block_cb_priv); | |
1907 | ||
66f1939a | 1908 | block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, |
1909 | cb_priv, cb_priv, | |
1910 | nfp_flower_setup_indr_tc_release, | |
c40f4e50 | 1911 | f, netdev, sch, data, app, cleanup); |
955bcb6e | 1912 | if (IS_ERR(block_cb)) { |
3166dd07 JH |
1913 | list_del(&cb_priv->list); |
1914 | kfree(cb_priv); | |
955bcb6e | 1915 | return PTR_ERR(block_cb); |
3166dd07 JH |
1916 | } |
1917 | ||
955bcb6e PNA |
1918 | flow_block_cb_add(block_cb, f); |
1919 | list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); | |
1920 | return 0; | |
9c0e189e | 1921 | case FLOW_BLOCK_UNBIND: |
3166dd07 | 1922 | cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); |
b12c97d4 JH |
1923 | if (!cb_priv) |
1924 | return -ENOENT; | |
1925 | ||
14bfb13f | 1926 | block_cb = flow_block_cb_lookup(f->block, |
955bcb6e PNA |
1927 | nfp_flower_setup_indr_block_cb, |
1928 | cb_priv); | |
1929 | if (!block_cb) | |
1930 | return -ENOENT; | |
3166dd07 | 1931 | |
66f1939a | 1932 | flow_indr_block_cb_remove(block_cb, f); |
955bcb6e | 1933 | list_del(&block_cb->driver_list); |
3166dd07 JH |
1934 | return 0; |
1935 | default: | |
1936 | return -EOPNOTSUPP; | |
1937 | } | |
1938 | return 0; | |
1939 | } | |
1940 | ||
59080da0 BZ |
1941 | static int |
1942 | nfp_setup_tc_no_dev(struct nfp_app *app, enum tc_setup_type type, void *data) | |
1943 | { | |
1944 | if (!data) | |
1945 | return -EOPNOTSUPP; | |
1946 | ||
1947 | switch (type) { | |
1948 | case TC_SETUP_ACT: | |
1949 | return nfp_setup_tc_act_offload(app, data); | |
1950 | default: | |
1951 | return -EOPNOTSUPP; | |
1952 | } | |
1953 | } | |
1954 | ||
50c1b1c9 | 1955 | int |
c40f4e50 | 1956 | nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, |
66f1939a | 1957 | enum tc_setup_type type, void *type_data, |
1958 | void *data, | |
1959 | void (*cleanup)(struct flow_block_cb *block_cb)) | |
3166dd07 | 1960 | { |
144d4c9e | 1961 | if (!netdev) |
59080da0 | 1962 | return nfp_setup_tc_no_dev(cb_priv, type, data); |
144d4c9e | 1963 | |
50c1b1c9 PNA |
1964 | if (!nfp_fl_is_netdev_to_offload(netdev)) |
1965 | return -EOPNOTSUPP; | |
1966 | ||
3166dd07 JH |
1967 | switch (type) { |
1968 | case TC_SETUP_BLOCK: | |
c40f4e50 | 1969 | return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, |
66f1939a | 1970 | type_data, data, cleanup); |
3166dd07 JH |
1971 | default: |
1972 | return -EOPNOTSUPP; | |
1973 | } | |
1974 | } |