net/mlx5: Support different attributes for priorities in namespace
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
CommitLineData
e8f887ac
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
e3a2b7ed
AV
33#include <net/flow_dissector.h>
34#include <net/pkt_cls.h>
35#include <net/tc_act/tc_gact.h>
12185a9f 36#include <net/tc_act/tc_skbedit.h>
e8f887ac
AV
37#include <linux/mlx5/fs.h>
38#include <linux/mlx5/device.h>
39#include <linux/rhashtable.h>
40#include "en.h"
41#include "en_tc.h"
42
43struct mlx5e_tc_flow {
44 struct rhash_head node;
45 u64 cookie;
46 struct mlx5_flow_rule *rule;
47};
48
49#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
50#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
51
52static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53 u32 *match_c, u32 *match_v,
54 u32 action, u32 flow_tag)
55{
56 struct mlx5_flow_destination dest = {
57 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
58 {.ft = priv->fts.vlan.t},
59 };
60 struct mlx5_flow_rule *rule;
61 bool table_created = false;
62
63 if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
64 priv->fts.tc.t =
65 mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
66 MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
d63cd286
MG
67 MLX5E_TC_FLOW_TABLE_NUM_GROUPS,
68 0);
e8f887ac
AV
69 if (IS_ERR(priv->fts.tc.t)) {
70 netdev_err(priv->netdev,
71 "Failed to create tc offload table\n");
72 return ERR_CAST(priv->fts.tc.t);
73 }
74
75 table_created = true;
76 }
77
78 rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
79 match_c, match_v,
80 action, flow_tag,
81 action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
82
83 if (IS_ERR(rule) && table_created) {
84 mlx5_destroy_flow_table(priv->fts.tc.t);
85 priv->fts.tc.t = NULL;
86 }
87
88 return rule;
89}
90
91static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
92 struct mlx5_flow_rule *rule)
93{
94 mlx5_del_flow_rule(rule);
95
96 if (!mlx5e_tc_num_filters(priv)) {
97 mlx5_destroy_flow_table(priv->fts.tc.t);
98 priv->fts.tc.t = NULL;
99 }
100}
101
e3a2b7ed
AV
102static int parse_cls_flower(struct mlx5e_priv *priv,
103 u32 *match_c, u32 *match_v,
104 struct tc_cls_flower_offload *f)
105{
106 void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
107 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
108 u16 addr_type = 0;
109 u8 ip_proto = 0;
110
111 if (f->dissector->used_keys &
112 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
113 BIT(FLOW_DISSECTOR_KEY_BASIC) |
114 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
115 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
116 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
117 BIT(FLOW_DISSECTOR_KEY_PORTS))) {
118 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
119 f->dissector->used_keys);
120 return -EOPNOTSUPP;
121 }
122
123 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
124 struct flow_dissector_key_control *key =
125 skb_flow_dissector_target(f->dissector,
126 FLOW_DISSECTOR_KEY_BASIC,
127 f->key);
128 addr_type = key->addr_type;
129 }
130
131 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
132 struct flow_dissector_key_basic *key =
133 skb_flow_dissector_target(f->dissector,
134 FLOW_DISSECTOR_KEY_BASIC,
135 f->key);
136 struct flow_dissector_key_basic *mask =
137 skb_flow_dissector_target(f->dissector,
138 FLOW_DISSECTOR_KEY_BASIC,
139 f->mask);
140 ip_proto = key->ip_proto;
141
142 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
143 ntohs(mask->n_proto));
144 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
145 ntohs(key->n_proto));
146
147 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
148 mask->ip_proto);
149 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
150 key->ip_proto);
151 }
152
153 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
154 struct flow_dissector_key_eth_addrs *key =
155 skb_flow_dissector_target(f->dissector,
156 FLOW_DISSECTOR_KEY_ETH_ADDRS,
157 f->key);
158 struct flow_dissector_key_eth_addrs *mask =
159 skb_flow_dissector_target(f->dissector,
160 FLOW_DISSECTOR_KEY_ETH_ADDRS,
161 f->mask);
162
163 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
164 dmac_47_16),
165 mask->dst);
166 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
167 dmac_47_16),
168 key->dst);
169
170 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
171 smac_47_16),
172 mask->src);
173 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
174 smac_47_16),
175 key->src);
176 }
177
178 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
179 struct flow_dissector_key_ipv4_addrs *key =
180 skb_flow_dissector_target(f->dissector,
181 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
182 f->key);
183 struct flow_dissector_key_ipv4_addrs *mask =
184 skb_flow_dissector_target(f->dissector,
185 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
186 f->mask);
187
188 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
189 src_ipv4_src_ipv6.ipv4_layout.ipv4),
190 &mask->src, sizeof(mask->src));
191 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
192 src_ipv4_src_ipv6.ipv4_layout.ipv4),
193 &key->src, sizeof(key->src));
194 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
195 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
196 &mask->dst, sizeof(mask->dst));
197 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
198 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
199 &key->dst, sizeof(key->dst));
200 }
201
202 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
203 struct flow_dissector_key_ipv6_addrs *key =
204 skb_flow_dissector_target(f->dissector,
205 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
206 f->key);
207 struct flow_dissector_key_ipv6_addrs *mask =
208 skb_flow_dissector_target(f->dissector,
209 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
210 f->mask);
211
212 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
213 src_ipv4_src_ipv6.ipv6_layout.ipv6),
214 &mask->src, sizeof(mask->src));
215 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
216 src_ipv4_src_ipv6.ipv6_layout.ipv6),
217 &key->src, sizeof(key->src));
218
219 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
220 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
221 &mask->dst, sizeof(mask->dst));
222 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
223 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
224 &key->dst, sizeof(key->dst));
225 }
226
227 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
228 struct flow_dissector_key_ports *key =
229 skb_flow_dissector_target(f->dissector,
230 FLOW_DISSECTOR_KEY_PORTS,
231 f->key);
232 struct flow_dissector_key_ports *mask =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_PORTS,
235 f->mask);
236 switch (ip_proto) {
237 case IPPROTO_TCP:
238 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
239 tcp_sport, ntohs(mask->src));
240 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
241 tcp_sport, ntohs(key->src));
242
243 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
244 tcp_dport, ntohs(mask->dst));
245 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
246 tcp_dport, ntohs(key->dst));
247 break;
248
249 case IPPROTO_UDP:
250 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
251 udp_sport, ntohs(mask->src));
252 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253 udp_sport, ntohs(key->src));
254
255 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
256 udp_dport, ntohs(mask->dst));
257 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
258 udp_dport, ntohs(key->dst));
259 break;
260 default:
261 netdev_err(priv->netdev,
262 "Only UDP and TCP transport are supported\n");
263 return -EINVAL;
264 }
265 }
266
267 return 0;
268}
269
270static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
271 u32 *action, u32 *flow_tag)
272{
273 const struct tc_action *a;
274
275 if (tc_no_actions(exts))
276 return -EINVAL;
277
278 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
279 *action = 0;
280
281 tc_for_each_action(a, exts) {
282 /* Only support a single action per rule */
283 if (*action)
284 return -EINVAL;
285
286 if (is_tcf_gact_shot(a)) {
287 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
288 continue;
289 }
290
291 if (is_tcf_skbedit_mark(a)) {
292 u32 mark = tcf_skbedit_mark(a);
293
294 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
295 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
296 mark);
297 return -EINVAL;
298 }
299
300 *flow_tag = mark;
301 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
302 continue;
303 }
304
305 return -EINVAL;
306 }
307
308 return 0;
309}
310
311int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
312 struct tc_cls_flower_offload *f)
313{
314 struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
315 u32 *match_c;
316 u32 *match_v;
317 int err = 0;
318 u32 flow_tag;
319 u32 action;
320 struct mlx5e_tc_flow *flow;
321 struct mlx5_flow_rule *old = NULL;
322
323 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
324 tc->ht_params);
325 if (flow)
326 old = flow->rule;
327 else
328 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
329
330 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
331 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
332 if (!match_c || !match_v || !flow) {
333 err = -ENOMEM;
334 goto err_free;
335 }
336
337 flow->cookie = f->cookie;
338
339 err = parse_cls_flower(priv, match_c, match_v, f);
340 if (err < 0)
341 goto err_free;
342
343 err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
344 if (err < 0)
345 goto err_free;
346
347 err = rhashtable_insert_fast(&tc->ht, &flow->node,
348 tc->ht_params);
349 if (err)
350 goto err_free;
351
352 flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
353 flow_tag);
354 if (IS_ERR(flow->rule)) {
355 err = PTR_ERR(flow->rule);
356 goto err_hash_del;
357 }
358
359 if (old)
360 mlx5e_tc_del_flow(priv, old);
361
362 goto out;
363
364err_hash_del:
365 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
366
367err_free:
368 if (!old)
369 kfree(flow);
370out:
371 kfree(match_c);
372 kfree(match_v);
373 return err;
374}
375
376int mlx5e_delete_flower(struct mlx5e_priv *priv,
377 struct tc_cls_flower_offload *f)
378{
379 struct mlx5e_tc_flow *flow;
380 struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
381
382 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
383 tc->ht_params);
384 if (!flow)
385 return -EINVAL;
386
387 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
388
389 mlx5e_tc_del_flow(priv, flow->rule);
390
391 kfree(flow);
392
393 return 0;
394}
395
e8f887ac
AV
396static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
397 .head_offset = offsetof(struct mlx5e_tc_flow, node),
398 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
399 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
400 .automatic_shrinking = true,
401};
402
403int mlx5e_tc_init(struct mlx5e_priv *priv)
404{
405 struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
406
407 tc->ht_params = mlx5e_tc_flow_ht_params;
408 return rhashtable_init(&tc->ht, &tc->ht_params);
409}
410
411static void _mlx5e_tc_del_flow(void *ptr, void *arg)
412{
413 struct mlx5e_tc_flow *flow = ptr;
414 struct mlx5e_priv *priv = arg;
415
416 mlx5e_tc_del_flow(priv, flow->rule);
417 kfree(flow);
418}
419
420void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
421{
422 struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
423
424 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
425
426 if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
427 mlx5_destroy_flow_table(priv->fts.tc.t);
428 priv->fts.tc.t = NULL;
429 }
430}