net/mlx5: Put elements related to offloaded TC rule in one struct
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include "en.h"
44 #include "en_tc.h"
45 #include "eswitch.h"
46
47 struct mlx5e_tc_flow {
48         struct rhash_head       node;
49         u64                     cookie;
50         struct mlx5_flow_rule   *rule;
51         struct mlx5_esw_flow_attr *attr;
52 };
53
54 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
55 #define MLX5E_TC_TABLE_NUM_GROUPS 4
56
57 static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
58                                                     struct mlx5_flow_spec *spec,
59                                                     u32 action, u32 flow_tag)
60 {
61         struct mlx5_core_dev *dev = priv->mdev;
62         struct mlx5_flow_destination dest = { 0 };
63         struct mlx5_fc *counter = NULL;
64         struct mlx5_flow_rule *rule;
65         bool table_created = false;
66
67         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
68                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
69                 dest.ft = priv->fs.vlan.ft.t;
70         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
71                 counter = mlx5_fc_create(dev, true);
72                 if (IS_ERR(counter))
73                         return ERR_CAST(counter);
74
75                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76                 dest.counter = counter;
77         }
78
79         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
80                 priv->fs.tc.t =
81                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
82                                                             MLX5E_TC_PRIO,
83                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
84                                                             MLX5E_TC_TABLE_NUM_GROUPS,
85                                                             0);
86                 if (IS_ERR(priv->fs.tc.t)) {
87                         netdev_err(priv->netdev,
88                                    "Failed to create tc offload table\n");
89                         rule = ERR_CAST(priv->fs.tc.t);
90                         goto err_create_ft;
91                 }
92
93                 table_created = true;
94         }
95
96         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
97         rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
98                                   action, flow_tag,
99                                   &dest);
100
101         if (IS_ERR(rule))
102                 goto err_add_rule;
103
104         return rule;
105
106 err_add_rule:
107         if (table_created) {
108                 mlx5_destroy_flow_table(priv->fs.tc.t);
109                 priv->fs.tc.t = NULL;
110         }
111 err_create_ft:
112         mlx5_fc_destroy(dev, counter);
113
114         return rule;
115 }
116
117 static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
118                                                     struct mlx5_flow_spec *spec,
119                                                     struct mlx5_esw_flow_attr *attr)
120 {
121         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
122
123         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
124 }
125
126 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
127                               struct mlx5_flow_rule *rule)
128 {
129         struct mlx5_fc *counter = NULL;
130
131         counter = mlx5_flow_rule_counter(rule);
132
133         mlx5_del_flow_rule(rule);
134
135         mlx5_fc_destroy(priv->mdev, counter);
136
137         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
138                 mlx5_destroy_flow_table(priv->fs.tc.t);
139                 priv->fs.tc.t = NULL;
140         }
141 }
142
143 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
144                             struct tc_cls_flower_offload *f)
145 {
146         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
147                                        outer_headers);
148         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
149                                        outer_headers);
150         u16 addr_type = 0;
151         u8 ip_proto = 0;
152
153         if (f->dissector->used_keys &
154             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
155               BIT(FLOW_DISSECTOR_KEY_BASIC) |
156               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
157               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
158               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
159               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
160                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
161                             f->dissector->used_keys);
162                 return -EOPNOTSUPP;
163         }
164
165         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
166                 struct flow_dissector_key_control *key =
167                         skb_flow_dissector_target(f->dissector,
168                                                   FLOW_DISSECTOR_KEY_CONTROL,
169                                                   f->key);
170                 addr_type = key->addr_type;
171         }
172
173         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
174                 struct flow_dissector_key_basic *key =
175                         skb_flow_dissector_target(f->dissector,
176                                                   FLOW_DISSECTOR_KEY_BASIC,
177                                                   f->key);
178                 struct flow_dissector_key_basic *mask =
179                         skb_flow_dissector_target(f->dissector,
180                                                   FLOW_DISSECTOR_KEY_BASIC,
181                                                   f->mask);
182                 ip_proto = key->ip_proto;
183
184                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
185                          ntohs(mask->n_proto));
186                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
187                          ntohs(key->n_proto));
188
189                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
190                          mask->ip_proto);
191                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
192                          key->ip_proto);
193         }
194
195         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
196                 struct flow_dissector_key_eth_addrs *key =
197                         skb_flow_dissector_target(f->dissector,
198                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
199                                                   f->key);
200                 struct flow_dissector_key_eth_addrs *mask =
201                         skb_flow_dissector_target(f->dissector,
202                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
203                                                   f->mask);
204
205                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
206                                              dmac_47_16),
207                                 mask->dst);
208                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
209                                              dmac_47_16),
210                                 key->dst);
211
212                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
213                                              smac_47_16),
214                                 mask->src);
215                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
216                                              smac_47_16),
217                                 key->src);
218         }
219
220         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
221                 struct flow_dissector_key_ipv4_addrs *key =
222                         skb_flow_dissector_target(f->dissector,
223                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
224                                                   f->key);
225                 struct flow_dissector_key_ipv4_addrs *mask =
226                         skb_flow_dissector_target(f->dissector,
227                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
228                                                   f->mask);
229
230                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
231                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
232                        &mask->src, sizeof(mask->src));
233                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
234                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
235                        &key->src, sizeof(key->src));
236                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
237                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
238                        &mask->dst, sizeof(mask->dst));
239                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
240                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
241                        &key->dst, sizeof(key->dst));
242         }
243
244         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
245                 struct flow_dissector_key_ipv6_addrs *key =
246                         skb_flow_dissector_target(f->dissector,
247                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
248                                                   f->key);
249                 struct flow_dissector_key_ipv6_addrs *mask =
250                         skb_flow_dissector_target(f->dissector,
251                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
252                                                   f->mask);
253
254                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
255                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
256                        &mask->src, sizeof(mask->src));
257                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
258                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
259                        &key->src, sizeof(key->src));
260
261                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
262                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
263                        &mask->dst, sizeof(mask->dst));
264                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
265                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
266                        &key->dst, sizeof(key->dst));
267         }
268
269         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
270                 struct flow_dissector_key_ports *key =
271                         skb_flow_dissector_target(f->dissector,
272                                                   FLOW_DISSECTOR_KEY_PORTS,
273                                                   f->key);
274                 struct flow_dissector_key_ports *mask =
275                         skb_flow_dissector_target(f->dissector,
276                                                   FLOW_DISSECTOR_KEY_PORTS,
277                                                   f->mask);
278                 switch (ip_proto) {
279                 case IPPROTO_TCP:
280                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
281                                  tcp_sport, ntohs(mask->src));
282                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
283                                  tcp_sport, ntohs(key->src));
284
285                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286                                  tcp_dport, ntohs(mask->dst));
287                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
288                                  tcp_dport, ntohs(key->dst));
289                         break;
290
291                 case IPPROTO_UDP:
292                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293                                  udp_sport, ntohs(mask->src));
294                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
295                                  udp_sport, ntohs(key->src));
296
297                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
298                                  udp_dport, ntohs(mask->dst));
299                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
300                                  udp_dport, ntohs(key->dst));
301                         break;
302                 default:
303                         netdev_err(priv->netdev,
304                                    "Only UDP and TCP transport are supported\n");
305                         return -EINVAL;
306                 }
307         }
308
309         return 0;
310 }
311
312 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
313                                 u32 *action, u32 *flow_tag)
314 {
315         const struct tc_action *a;
316         LIST_HEAD(actions);
317
318         if (tc_no_actions(exts))
319                 return -EINVAL;
320
321         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
322         *action = 0;
323
324         tcf_exts_to_list(exts, &actions);
325         list_for_each_entry(a, &actions, list) {
326                 /* Only support a single action per rule */
327                 if (*action)
328                         return -EINVAL;
329
330                 if (is_tcf_gact_shot(a)) {
331                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
332                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
333                                                flow_table_properties_nic_receive.flow_counter))
334                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
335                         continue;
336                 }
337
338                 if (is_tcf_skbedit_mark(a)) {
339                         u32 mark = tcf_skbedit_mark(a);
340
341                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
342                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
343                                             mark);
344                                 return -EINVAL;
345                         }
346
347                         *flow_tag = mark;
348                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
349                         continue;
350                 }
351
352                 return -EINVAL;
353         }
354
355         return 0;
356 }
357
358 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
359                                 struct mlx5_esw_flow_attr *attr)
360 {
361         const struct tc_action *a;
362         LIST_HEAD(actions);
363
364         if (tc_no_actions(exts))
365                 return -EINVAL;
366
367         memset(attr, 0, sizeof(*attr));
368         attr->in_rep = priv->ppriv;
369
370         tcf_exts_to_list(exts, &actions);
371         list_for_each_entry(a, &actions, list) {
372                 /* Only support a single action per rule */
373                 if (attr->action)
374                         return -EINVAL;
375
376                 if (is_tcf_gact_shot(a)) {
377                         attr->action = MLX5_FLOW_CONTEXT_ACTION_DROP |
378                                        MLX5_FLOW_CONTEXT_ACTION_COUNT;
379                         continue;
380                 }
381
382                 if (is_tcf_mirred_redirect(a)) {
383                         int ifindex = tcf_mirred_ifindex(a);
384                         struct net_device *out_dev;
385                         struct mlx5e_priv *out_priv;
386
387                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
388
389                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
390                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
391                                        priv->netdev->name, out_dev->name);
392                                 return -EINVAL;
393                         }
394
395                         attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
396                         out_priv = netdev_priv(out_dev);
397                         attr->out_rep = out_priv->ppriv;
398                         continue;
399                 }
400
401                 return -EINVAL;
402         }
403         return 0;
404 }
405
406 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
407                            struct tc_cls_flower_offload *f)
408 {
409         struct mlx5e_tc_table *tc = &priv->fs.tc;
410         int err = 0;
411         bool fdb_flow = false;
412         u32 flow_tag, action;
413         struct mlx5e_tc_flow *flow;
414         struct mlx5_flow_spec *spec;
415         struct mlx5_flow_rule *old = NULL;
416         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
417
418         if (esw && esw->mode == SRIOV_OFFLOADS)
419                 fdb_flow = true;
420
421         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
422                                       tc->ht_params);
423         if (flow) {
424                 old = flow->rule;
425         } else {
426                 if (fdb_flow)
427                         flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
428                                        GFP_KERNEL);
429                 else
430                         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
431         }
432
433         spec = mlx5_vzalloc(sizeof(*spec));
434         if (!spec || !flow) {
435                 err = -ENOMEM;
436                 goto err_free;
437         }
438
439         flow->cookie = f->cookie;
440
441         err = parse_cls_flower(priv, spec, f);
442         if (err < 0)
443                 goto err_free;
444
445         if (fdb_flow) {
446                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
447                 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
448                 if (err < 0)
449                         goto err_free;
450                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
451         } else {
452                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
453                 if (err < 0)
454                         goto err_free;
455                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
456         }
457
458         if (IS_ERR(flow->rule)) {
459                 err = PTR_ERR(flow->rule);
460                 goto err_free;
461         }
462
463         err = rhashtable_insert_fast(&tc->ht, &flow->node,
464                                      tc->ht_params);
465         if (err)
466                 goto err_del_rule;
467
468         if (old)
469                 mlx5e_tc_del_flow(priv, old);
470
471         goto out;
472
473 err_del_rule:
474         mlx5_del_flow_rule(flow->rule);
475
476 err_free:
477         if (!old)
478                 kfree(flow);
479 out:
480         kvfree(spec);
481         return err;
482 }
483
484 int mlx5e_delete_flower(struct mlx5e_priv *priv,
485                         struct tc_cls_flower_offload *f)
486 {
487         struct mlx5e_tc_flow *flow;
488         struct mlx5e_tc_table *tc = &priv->fs.tc;
489
490         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
491                                       tc->ht_params);
492         if (!flow)
493                 return -EINVAL;
494
495         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
496
497         mlx5e_tc_del_flow(priv, flow->rule);
498
499         kfree(flow);
500
501         return 0;
502 }
503
504 int mlx5e_stats_flower(struct mlx5e_priv *priv,
505                        struct tc_cls_flower_offload *f)
506 {
507         struct mlx5e_tc_table *tc = &priv->fs.tc;
508         struct mlx5e_tc_flow *flow;
509         struct tc_action *a;
510         struct mlx5_fc *counter;
511         LIST_HEAD(actions);
512         u64 bytes;
513         u64 packets;
514         u64 lastuse;
515
516         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
517                                       tc->ht_params);
518         if (!flow)
519                 return -EINVAL;
520
521         counter = mlx5_flow_rule_counter(flow->rule);
522         if (!counter)
523                 return 0;
524
525         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
526
527         tcf_exts_to_list(f->exts, &actions);
528         list_for_each_entry(a, &actions, list)
529                 tcf_action_stats_update(a, bytes, packets, lastuse);
530
531         return 0;
532 }
533
534 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
535         .head_offset = offsetof(struct mlx5e_tc_flow, node),
536         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
537         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
538         .automatic_shrinking = true,
539 };
540
541 int mlx5e_tc_init(struct mlx5e_priv *priv)
542 {
543         struct mlx5e_tc_table *tc = &priv->fs.tc;
544
545         tc->ht_params = mlx5e_tc_flow_ht_params;
546         return rhashtable_init(&tc->ht, &tc->ht_params);
547 }
548
549 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
550 {
551         struct mlx5e_tc_flow *flow = ptr;
552         struct mlx5e_priv *priv = arg;
553
554         mlx5e_tc_del_flow(priv, flow->rule);
555         kfree(flow);
556 }
557
558 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
559 {
560         struct mlx5e_tc_table *tc = &priv->fs.tc;
561
562         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
563
564         if (!IS_ERR_OR_NULL(tc->t)) {
565                 mlx5_destroy_flow_table(tc->t);
566                 tc->t = NULL;
567         }
568 }