2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/eswitch.h>
37 #include "mlx5_core.h"
40 #include "diag/fs_tracepoint.h"
41 #include "accel/ipsec.h"
42 #include "fpga/ipsec.h"
44 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
45 sizeof(struct init_tree_node))
47 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
48 ...) {.type = FS_TYPE_PRIO,\
49 .min_ft_level = min_level_val,\
50 .num_levels = num_levels_val,\
51 .num_leaf_prios = num_prios_val,\
53 .children = (struct init_tree_node[]) {__VA_ARGS__},\
54 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
57 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
58 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
61 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
62 .children = (struct init_tree_node[]) {__VA_ARGS__},\
63 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
66 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
69 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
71 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
72 .caps = (long[]) {__VA_ARGS__} }
74 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
75 FS_CAP(flow_table_properties_nic_receive.modify_root), \
76 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
77 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
79 #define LEFTOVERS_NUM_LEVELS 1
80 #define LEFTOVERS_NUM_PRIOS 1
82 #define BY_PASS_PRIO_NUM_LEVELS 1
83 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
86 #define ETHTOOL_PRIO_NUM_LEVELS 1
87 #define ETHTOOL_NUM_PRIOS 11
88 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
89 /* Vlan, mac, ttc, inner ttc, aRFS */
90 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
91 #define KERNEL_NIC_NUM_PRIOS 1
92 /* One more level for tc */
93 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
95 #define KERNEL_NIC_TC_NUM_PRIOS 1
96 #define KERNEL_NIC_TC_NUM_LEVELS 2
98 #define ANCHOR_NUM_LEVELS 1
99 #define ANCHOR_NUM_PRIOS 1
100 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
102 #define OFFLOADS_MAX_FT 1
103 #define OFFLOADS_NUM_PRIOS 1
104 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
106 #define LAG_PRIO_NUM_LEVELS 1
107 #define LAG_NUM_PRIOS 1
108 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
115 static struct init_tree_node {
116 enum fs_node_type type;
117 struct init_tree_node *children;
119 struct node_caps caps;
125 .type = FS_TYPE_NAMESPACE,
127 .children = (struct init_tree_node[]) {
128 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
130 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
131 BY_PASS_PRIO_NUM_LEVELS))),
132 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
134 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
135 LAG_PRIO_NUM_LEVELS))),
136 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
137 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
138 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
140 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
141 ETHTOOL_PRIO_NUM_LEVELS))),
142 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
143 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
144 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
145 KERNEL_NIC_PRIO_NUM_LEVELS))),
146 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
148 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
149 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
150 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
154 enum fs_i_lock_class {
160 static const struct rhashtable_params rhash_fte = {
161 .key_len = FIELD_SIZEOF(struct fs_fte, val),
162 .key_offset = offsetof(struct fs_fte, val),
163 .head_offset = offsetof(struct fs_fte, hash),
164 .automatic_shrinking = true,
168 static const struct rhashtable_params rhash_fg = {
169 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
170 .key_offset = offsetof(struct mlx5_flow_group, mask),
171 .head_offset = offsetof(struct mlx5_flow_group, hash),
172 .automatic_shrinking = true,
177 static void del_hw_flow_table(struct fs_node *node);
178 static void del_hw_flow_group(struct fs_node *node);
179 static void del_hw_fte(struct fs_node *node);
180 static void del_sw_flow_table(struct fs_node *node);
181 static void del_sw_flow_group(struct fs_node *node);
182 static void del_sw_fte(struct fs_node *node);
183 static void del_sw_prio(struct fs_node *node);
184 static void del_sw_ns(struct fs_node *node);
185 /* Delete rule (destination) is special case that
186 * requires to lock the FTE for all the deletion process.
188 static void del_sw_hw_rule(struct fs_node *node);
189 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
190 struct mlx5_flow_destination *d2);
191 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
192 static struct mlx5_flow_rule *
193 find_flow_rule(struct fs_fte *fte,
194 struct mlx5_flow_destination *dest);
196 static void tree_init_node(struct fs_node *node,
197 void (*del_hw_func)(struct fs_node *),
198 void (*del_sw_func)(struct fs_node *))
200 refcount_set(&node->refcount, 1);
201 INIT_LIST_HEAD(&node->list);
202 INIT_LIST_HEAD(&node->children);
203 init_rwsem(&node->lock);
204 node->del_hw_func = del_hw_func;
205 node->del_sw_func = del_sw_func;
206 node->active = false;
209 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
212 refcount_inc(&parent->refcount);
213 node->parent = parent;
215 /* Parent is the root */
219 node->root = parent->root;
222 static int tree_get_node(struct fs_node *node)
224 return refcount_inc_not_zero(&node->refcount);
227 static void nested_down_read_ref_node(struct fs_node *node,
228 enum fs_i_lock_class class)
231 down_read_nested(&node->lock, class);
232 refcount_inc(&node->refcount);
236 static void nested_down_write_ref_node(struct fs_node *node,
237 enum fs_i_lock_class class)
240 down_write_nested(&node->lock, class);
241 refcount_inc(&node->refcount);
245 static void down_write_ref_node(struct fs_node *node)
248 down_write(&node->lock);
249 refcount_inc(&node->refcount);
253 static void up_read_ref_node(struct fs_node *node)
255 refcount_dec(&node->refcount);
256 up_read(&node->lock);
259 static void up_write_ref_node(struct fs_node *node)
261 refcount_dec(&node->refcount);
262 up_write(&node->lock);
265 static void tree_put_node(struct fs_node *node)
267 struct fs_node *parent_node = node->parent;
269 if (refcount_dec_and_test(&node->refcount)) {
270 if (node->del_hw_func)
271 node->del_hw_func(node);
273 /* Only root namespace doesn't have parent and we just
274 * need to free its node.
276 down_write_ref_node(parent_node);
277 list_del_init(&node->list);
278 if (node->del_sw_func)
279 node->del_sw_func(node);
280 up_write_ref_node(parent_node);
286 if (!node && parent_node)
287 tree_put_node(parent_node);
290 static int tree_remove_node(struct fs_node *node)
292 if (refcount_read(&node->refcount) > 1) {
293 refcount_dec(&node->refcount);
300 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
303 struct fs_prio *iter_prio;
305 fs_for_each_prio(iter_prio, ns) {
306 if (iter_prio->prio == prio)
313 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
317 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
318 if (spec->match_value[i] & ~spec->match_criteria[i]) {
319 pr_warn("mlx5_core: match_value differs from match_criteria\n");
326 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
328 struct fs_node *root;
329 struct mlx5_flow_namespace *ns;
333 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
334 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
338 ns = container_of(root, struct mlx5_flow_namespace, node);
339 return container_of(ns, struct mlx5_flow_root_namespace, ns);
342 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
344 struct mlx5_flow_root_namespace *root = find_root(node);
347 return root->dev->priv.steering;
351 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
353 struct mlx5_flow_root_namespace *root = find_root(node);
360 static void del_sw_ns(struct fs_node *node)
365 static void del_sw_prio(struct fs_node *node)
370 static void del_hw_flow_table(struct fs_node *node)
372 struct mlx5_flow_root_namespace *root;
373 struct mlx5_flow_table *ft;
374 struct mlx5_core_dev *dev;
377 fs_get_obj(ft, node);
378 dev = get_dev(&ft->node);
379 root = find_root(&ft->node);
382 err = root->cmds->destroy_flow_table(dev, ft);
384 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
388 static void del_sw_flow_table(struct fs_node *node)
390 struct mlx5_flow_table *ft;
391 struct fs_prio *prio;
393 fs_get_obj(ft, node);
395 rhltable_destroy(&ft->fgs_hash);
396 fs_get_obj(prio, ft->node.parent);
401 static void del_sw_hw_rule(struct fs_node *node)
403 struct mlx5_flow_root_namespace *root;
404 struct mlx5_flow_rule *rule;
405 struct mlx5_flow_table *ft;
406 struct mlx5_flow_group *fg;
409 struct mlx5_core_dev *dev = get_dev(node);
411 bool update_fte = false;
413 fs_get_obj(rule, node);
414 fs_get_obj(fte, rule->node.parent);
415 fs_get_obj(fg, fte->node.parent);
416 fs_get_obj(ft, fg->node.parent);
417 trace_mlx5_fs_del_rule(rule);
418 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
419 mutex_lock(&rule->dest_attr.ft->lock);
420 list_del(&rule->next_ft);
421 mutex_unlock(&rule->dest_attr.ft->lock);
424 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
426 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
427 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
428 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
433 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
435 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
439 root = find_root(&ft->node);
440 if (update_fte && fte->dests_size) {
441 err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
444 "%s can't del rule fg id=%d fte_index=%d\n",
445 __func__, fg->id, fte->index);
450 static void del_hw_fte(struct fs_node *node)
452 struct mlx5_flow_root_namespace *root;
453 struct mlx5_flow_table *ft;
454 struct mlx5_flow_group *fg;
455 struct mlx5_core_dev *dev;
459 fs_get_obj(fte, node);
460 fs_get_obj(fg, fte->node.parent);
461 fs_get_obj(ft, fg->node.parent);
463 trace_mlx5_fs_del_fte(fte);
464 dev = get_dev(&ft->node);
465 root = find_root(&ft->node);
467 err = root->cmds->delete_fte(dev, ft, fte);
470 "flow steering can't delete fte in index %d of flow group id %d\n",
475 static void del_sw_fte(struct fs_node *node)
477 struct mlx5_flow_steering *steering = get_steering(node);
478 struct mlx5_flow_group *fg;
482 fs_get_obj(fte, node);
483 fs_get_obj(fg, fte->node.parent);
485 err = rhashtable_remove_fast(&fg->ftes_hash,
489 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
490 kmem_cache_free(steering->ftes_cache, fte);
493 static void del_hw_flow_group(struct fs_node *node)
495 struct mlx5_flow_root_namespace *root;
496 struct mlx5_flow_group *fg;
497 struct mlx5_flow_table *ft;
498 struct mlx5_core_dev *dev;
500 fs_get_obj(fg, node);
501 fs_get_obj(ft, fg->node.parent);
502 dev = get_dev(&ft->node);
503 trace_mlx5_fs_del_fg(fg);
505 root = find_root(&ft->node);
506 if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
507 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
511 static void del_sw_flow_group(struct fs_node *node)
513 struct mlx5_flow_steering *steering = get_steering(node);
514 struct mlx5_flow_group *fg;
515 struct mlx5_flow_table *ft;
518 fs_get_obj(fg, node);
519 fs_get_obj(ft, fg->node.parent);
521 rhashtable_destroy(&fg->ftes_hash);
522 ida_destroy(&fg->fte_allocator);
523 if (ft->autogroup.active)
524 ft->autogroup.num_groups--;
525 err = rhltable_remove(&ft->fgs_hash,
529 kmem_cache_free(steering->fgs_cache, fg);
532 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
537 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
541 fte->index = index + fg->start_index;
542 ret = rhashtable_insert_fast(&fg->ftes_hash,
548 tree_add_node(&fte->node, &fg->node);
549 list_add_tail(&fte->node.list, &fg->node.children);
553 ida_simple_remove(&fg->fte_allocator, index);
557 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
559 struct mlx5_flow_act *flow_act)
561 struct mlx5_flow_steering *steering = get_steering(&ft->node);
564 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
566 return ERR_PTR(-ENOMEM);
568 memcpy(fte->val, match_value, sizeof(fte->val));
569 fte->node.type = FS_TYPE_FLOW_ENTRY;
570 fte->action = *flow_act;
572 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
577 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
578 struct mlx5_flow_group *fg)
580 rhashtable_destroy(&fg->ftes_hash);
581 kmem_cache_free(steering->fgs_cache, fg);
584 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
585 u8 match_criteria_enable,
586 void *match_criteria,
590 struct mlx5_flow_group *fg;
593 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
595 return ERR_PTR(-ENOMEM);
597 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
599 kmem_cache_free(steering->fgs_cache, fg);
602 ida_init(&fg->fte_allocator);
603 fg->mask.match_criteria_enable = match_criteria_enable;
604 memcpy(&fg->mask.match_criteria, match_criteria,
605 sizeof(fg->mask.match_criteria));
606 fg->node.type = FS_TYPE_FLOW_GROUP;
607 fg->start_index = start_index;
608 fg->max_ftes = end_index - start_index + 1;
613 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
614 u8 match_criteria_enable,
615 void *match_criteria,
618 struct list_head *prev)
620 struct mlx5_flow_steering *steering = get_steering(&ft->node);
621 struct mlx5_flow_group *fg;
624 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
625 start_index, end_index);
629 /* initialize refcnt, add to parent list */
630 ret = rhltable_insert(&ft->fgs_hash,
634 dealloc_flow_group(steering, fg);
638 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
639 tree_add_node(&fg->node, &ft->node);
640 /* Add node to group list */
641 list_add(&fg->node.list, prev);
642 atomic_inc(&ft->node.version);
647 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
648 enum fs_flow_table_type table_type,
649 enum fs_flow_table_op_mod op_mod,
652 struct mlx5_flow_table *ft;
655 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
657 return ERR_PTR(-ENOMEM);
659 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
666 ft->node.type = FS_TYPE_FLOW_TABLE;
668 ft->type = table_type;
670 ft->max_fte = max_fte;
672 INIT_LIST_HEAD(&ft->fwd_rules);
673 mutex_init(&ft->lock);
678 /* If reverse is false, then we search for the first flow table in the
679 * root sub-tree from start(closest from right), else we search for the
680 * last flow table in the root sub-tree till start(closest from left).
682 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
683 struct list_head *start,
686 #define list_advance_entry(pos, reverse) \
687 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
689 #define list_for_each_advance_continue(pos, head, reverse) \
690 for (pos = list_advance_entry(pos, reverse); \
691 &pos->list != (head); \
692 pos = list_advance_entry(pos, reverse))
694 struct fs_node *iter = list_entry(start, struct fs_node, list);
695 struct mlx5_flow_table *ft = NULL;
700 list_for_each_advance_continue(iter, &root->children, reverse) {
701 if (iter->type == FS_TYPE_FLOW_TABLE) {
702 fs_get_obj(ft, iter);
705 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
713 /* If reverse if false then return the first flow table in next priority of
714 * prio in the tree, else return the last flow table in the previous priority
715 * of prio in the tree.
717 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
719 struct mlx5_flow_table *ft = NULL;
720 struct fs_node *curr_node;
721 struct fs_node *parent;
723 parent = prio->node.parent;
724 curr_node = &prio->node;
725 while (!ft && parent) {
726 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
728 parent = curr_node->parent;
733 /* Assuming all the tree is locked by mutex chain lock */
734 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
736 return find_closest_ft(prio, false);
739 /* Assuming all the tree is locked by mutex chain lock */
740 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
742 return find_closest_ft(prio, true);
745 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
746 struct fs_prio *prio,
747 struct mlx5_flow_table *ft)
749 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
750 struct mlx5_flow_table *iter;
754 fs_for_each_ft(iter, prio) {
756 err = root->cmds->modify_flow_table(dev, iter, ft);
758 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
760 /* The driver is out of sync with the FW */
769 /* Connect flow tables from previous priority of prio to ft */
770 static int connect_prev_fts(struct mlx5_core_dev *dev,
771 struct mlx5_flow_table *ft,
772 struct fs_prio *prio)
774 struct mlx5_flow_table *prev_ft;
776 prev_ft = find_prev_chained_ft(prio);
778 struct fs_prio *prev_prio;
780 fs_get_obj(prev_prio, prev_ft->node.parent);
781 return connect_fts_in_prio(dev, prev_prio, ft);
786 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
789 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
790 struct mlx5_ft_underlay_qp *uqp;
791 int min_level = INT_MAX;
796 min_level = root->root_ft->level;
798 if (ft->level >= min_level)
801 if (list_empty(&root->underlay_qpns)) {
802 /* Don't set any QPN (zero) in case QPN list is empty */
804 err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
806 list_for_each_entry(uqp, &root->underlay_qpns, list) {
808 err = root->cmds->update_root_ft(root->dev, ft,
816 mlx5_core_warn(root->dev,
817 "Update root flow table of id(%u) qpn(%d) failed\n",
825 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
826 struct mlx5_flow_destination *dest)
828 struct mlx5_flow_root_namespace *root;
829 struct mlx5_flow_table *ft;
830 struct mlx5_flow_group *fg;
832 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
835 fs_get_obj(fte, rule->node.parent);
836 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
838 down_write_ref_node(&fte->node);
839 fs_get_obj(fg, fte->node.parent);
840 fs_get_obj(ft, fg->node.parent);
842 memcpy(&rule->dest_attr, dest, sizeof(*dest));
843 root = find_root(&ft->node);
844 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
846 up_write_ref_node(&fte->node);
851 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
852 struct mlx5_flow_destination *new_dest,
853 struct mlx5_flow_destination *old_dest)
858 if (handle->num_rules != 1)
860 return _mlx5_modify_rule_destination(handle->rule[0],
864 for (i = 0; i < handle->num_rules; i++) {
865 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
866 return _mlx5_modify_rule_destination(handle->rule[i],
873 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
874 static int connect_fwd_rules(struct mlx5_core_dev *dev,
875 struct mlx5_flow_table *new_next_ft,
876 struct mlx5_flow_table *old_next_ft)
878 struct mlx5_flow_destination dest = {};
879 struct mlx5_flow_rule *iter;
882 /* new_next_ft and old_next_ft could be NULL only
883 * when we create/destroy the anchor flow table.
885 if (!new_next_ft || !old_next_ft)
888 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
889 dest.ft = new_next_ft;
891 mutex_lock(&old_next_ft->lock);
892 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
893 mutex_unlock(&old_next_ft->lock);
894 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
895 err = _mlx5_modify_rule_destination(iter, &dest);
897 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
903 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
904 struct fs_prio *prio)
906 struct mlx5_flow_table *next_ft;
909 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
911 if (list_empty(&prio->node.children)) {
912 err = connect_prev_fts(dev, ft, prio);
916 next_ft = find_next_chained_ft(prio);
917 err = connect_fwd_rules(dev, ft, next_ft);
922 if (MLX5_CAP_FLOWTABLE(dev,
923 flow_table_properties_nic_receive.modify_root))
924 err = update_root_ft_create(ft, prio);
928 static void list_add_flow_table(struct mlx5_flow_table *ft,
929 struct fs_prio *prio)
931 struct list_head *prev = &prio->node.children;
932 struct mlx5_flow_table *iter;
934 fs_for_each_ft(iter, prio) {
935 if (iter->level > ft->level)
937 prev = &iter->node.list;
939 list_add(&ft->node.list, prev);
942 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
943 struct mlx5_flow_table_attr *ft_attr,
944 enum fs_flow_table_op_mod op_mod,
947 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
948 struct mlx5_flow_table *next_ft = NULL;
949 struct fs_prio *fs_prio = NULL;
950 struct mlx5_flow_table *ft;
955 pr_err("mlx5: flow steering failed to find root of namespace\n");
956 return ERR_PTR(-ENODEV);
959 mutex_lock(&root->chain_lock);
960 fs_prio = find_prio(ns, ft_attr->prio);
965 if (ft_attr->level >= fs_prio->num_levels) {
969 /* The level is related to the
970 * priority level range.
972 ft_attr->level += fs_prio->start_level;
973 ft = alloc_flow_table(ft_attr->level,
975 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
977 op_mod, ft_attr->flags);
983 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
984 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
985 next_ft = find_next_chained_ft(fs_prio);
986 err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
987 ft->type, ft->level, log_table_sz,
988 next_ft, &ft->id, ft->flags);
992 err = connect_flow_table(root->dev, ft, fs_prio);
995 ft->node.active = true;
996 down_write_ref_node(&fs_prio->node);
997 tree_add_node(&ft->node, &fs_prio->node);
998 list_add_flow_table(ft, fs_prio);
1000 up_write_ref_node(&fs_prio->node);
1001 mutex_unlock(&root->chain_lock);
1004 root->cmds->destroy_flow_table(root->dev, ft);
1008 mutex_unlock(&root->chain_lock);
1009 return ERR_PTR(err);
1012 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1013 struct mlx5_flow_table_attr *ft_attr)
1015 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1018 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1019 int prio, int max_fte,
1020 u32 level, u16 vport)
1022 struct mlx5_flow_table_attr ft_attr = {};
1024 ft_attr.max_fte = max_fte;
1025 ft_attr.level = level;
1026 ft_attr.prio = prio;
1028 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1031 struct mlx5_flow_table*
1032 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1033 int prio, u32 level)
1035 struct mlx5_flow_table_attr ft_attr = {};
1037 ft_attr.level = level;
1038 ft_attr.prio = prio;
1039 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1041 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1043 struct mlx5_flow_table*
1044 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1046 int num_flow_table_entries,
1051 struct mlx5_flow_table_attr ft_attr = {};
1052 struct mlx5_flow_table *ft;
1054 if (max_num_groups > num_flow_table_entries)
1055 return ERR_PTR(-EINVAL);
1057 ft_attr.max_fte = num_flow_table_entries;
1058 ft_attr.prio = prio;
1059 ft_attr.level = level;
1060 ft_attr.flags = flags;
1062 ft = mlx5_create_flow_table(ns, &ft_attr);
1066 ft->autogroup.active = true;
1067 ft->autogroup.required_groups = max_num_groups;
1071 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1073 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1076 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1077 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1078 fg_in, match_criteria);
1079 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1081 match_criteria_enable);
1082 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1084 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1086 struct mlx5_core_dev *dev = get_dev(&ft->node);
1087 struct mlx5_flow_group *fg;
1090 if (ft->autogroup.active)
1091 return ERR_PTR(-EPERM);
1093 down_write_ref_node(&ft->node);
1094 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1095 start_index, end_index,
1096 ft->node.children.prev);
1097 up_write_ref_node(&ft->node);
1101 err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
1103 tree_put_node(&fg->node);
1104 return ERR_PTR(err);
1106 trace_mlx5_fs_add_fg(fg);
1107 fg->node.active = true;
1112 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1114 struct mlx5_flow_rule *rule;
1116 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1120 INIT_LIST_HEAD(&rule->next_ft);
1121 rule->node.type = FS_TYPE_FLOW_DEST;
1123 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1128 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1130 struct mlx5_flow_handle *handle;
1132 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1136 handle->num_rules = num_rules;
1141 static void destroy_flow_handle(struct fs_fte *fte,
1142 struct mlx5_flow_handle *handle,
1143 struct mlx5_flow_destination *dest,
1147 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1149 list_del(&handle->rule[i]->node.list);
1150 kfree(handle->rule[i]);
1156 static struct mlx5_flow_handle *
1157 create_flow_handle(struct fs_fte *fte,
1158 struct mlx5_flow_destination *dest,
1163 struct mlx5_flow_handle *handle;
1164 struct mlx5_flow_rule *rule = NULL;
1165 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1166 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1170 handle = alloc_handle((dest_num) ? dest_num : 1);
1172 return ERR_PTR(-ENOMEM);
1176 rule = find_flow_rule(fte, dest + i);
1178 refcount_inc(&rule->node.refcount);
1184 rule = alloc_rule(dest + i);
1188 /* Add dest to dests list- we need flow tables to be in the
1189 * end of the list for forward to next prio rules.
1191 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1193 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1194 list_add(&rule->node.list, &fte->node.children);
1196 list_add_tail(&rule->node.list, &fte->node.children);
1200 type = dest[i].type ==
1201 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1202 *modify_mask |= type ? count : dst;
1205 handle->rule[i] = rule;
1206 } while (++i < dest_num);
1211 destroy_flow_handle(fte, handle, dest, i);
1212 return ERR_PTR(-ENOMEM);
1215 /* fte should not be deleted while calling this function */
1216 static struct mlx5_flow_handle *
1217 add_rule_fte(struct fs_fte *fte,
1218 struct mlx5_flow_group *fg,
1219 struct mlx5_flow_destination *dest,
1223 struct mlx5_flow_root_namespace *root;
1224 struct mlx5_flow_handle *handle;
1225 struct mlx5_flow_table *ft;
1226 int modify_mask = 0;
1228 bool new_rule = false;
1230 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1232 if (IS_ERR(handle) || !new_rule)
1236 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1238 fs_get_obj(ft, fg->node.parent);
1239 root = find_root(&fg->node);
1240 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1241 err = root->cmds->create_fte(get_dev(&ft->node),
1244 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1249 fte->node.active = true;
1250 fte->status |= FS_FTE_STATUS_EXISTING;
1251 atomic_inc(&fte->node.version);
1257 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1258 return ERR_PTR(err);
1261 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1262 struct mlx5_flow_spec *spec)
1264 struct list_head *prev = &ft->node.children;
1265 struct mlx5_flow_group *fg;
1266 unsigned int candidate_index = 0;
1267 unsigned int group_size = 0;
1269 if (!ft->autogroup.active)
1270 return ERR_PTR(-ENOENT);
1272 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1273 /* We save place for flow groups in addition to max types */
1274 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1276 /* ft->max_fte == ft->autogroup.max_types */
1277 if (group_size == 0)
1280 /* sorted by start_index */
1281 fs_for_each_fg(fg, ft) {
1282 if (candidate_index + group_size > fg->start_index)
1283 candidate_index = fg->start_index + fg->max_ftes;
1286 prev = &fg->node.list;
1289 if (candidate_index + group_size > ft->max_fte)
1290 return ERR_PTR(-ENOSPC);
1292 fg = alloc_insert_flow_group(ft,
1293 spec->match_criteria_enable,
1294 spec->match_criteria,
1296 candidate_index + group_size - 1,
1301 ft->autogroup.num_groups++;
1307 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1308 struct mlx5_flow_group *fg)
1310 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1311 struct mlx5_core_dev *dev = get_dev(&ft->node);
1312 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1313 void *match_criteria_addr;
1314 u8 src_esw_owner_mask_on;
1319 in = kvzalloc(inlen, GFP_KERNEL);
1323 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1324 fg->mask.match_criteria_enable);
1325 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1326 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1329 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1331 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1332 source_eswitch_owner_vhca_id);
1333 MLX5_SET(create_flow_group_in, in,
1334 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1336 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1337 in, match_criteria);
1338 memcpy(match_criteria_addr, fg->mask.match_criteria,
1339 sizeof(fg->mask.match_criteria));
1341 err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
1343 fg->node.active = true;
1344 trace_mlx5_fs_add_fg(fg);
1351 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1352 struct mlx5_flow_destination *d2)
1354 if (d1->type == d2->type) {
1355 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1356 d1->vport.num == d2->vport.num) ||
1357 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1358 d1->ft == d2->ft) ||
1359 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1360 d1->tir_num == d2->tir_num) ||
1361 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1362 d1->ft_num == d2->ft_num))
1369 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1370 struct mlx5_flow_destination *dest)
1372 struct mlx5_flow_rule *rule;
1374 list_for_each_entry(rule, &fte->node.children, node.list) {
1375 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1381 static bool check_conflicting_actions(u32 action1, u32 action2)
1383 u32 xored_actions = action1 ^ action2;
1385 /* if one rule only wants to count, it's ok */
1386 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1387 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1390 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1391 MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1392 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1393 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1394 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1395 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1396 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1397 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1403 static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1405 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1406 mlx5_core_warn(get_dev(&fte->node),
1407 "Found two FTEs with conflicting actions\n");
1411 if (flow_act->has_flow_tag &&
1412 fte->action.flow_tag != flow_act->flow_tag) {
1413 mlx5_core_warn(get_dev(&fte->node),
1414 "FTE flow tag %u already exists with different flow tag %u\n",
1415 fte->action.flow_tag,
1416 flow_act->flow_tag);
1423 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1425 struct mlx5_flow_act *flow_act,
1426 struct mlx5_flow_destination *dest,
1430 struct mlx5_flow_handle *handle;
1435 ret = check_conflicting_ftes(fte, flow_act);
1437 return ERR_PTR(ret);
1439 old_action = fte->action.action;
1440 fte->action.action |= flow_act->action;
1441 handle = add_rule_fte(fte, fg, dest, dest_num,
1442 old_action != flow_act->action);
1443 if (IS_ERR(handle)) {
1444 fte->action.action = old_action;
1447 trace_mlx5_fs_set_fte(fte, false);
1449 for (i = 0; i < handle->num_rules; i++) {
1450 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1451 tree_add_node(&handle->rule[i]->node, &fte->node);
1452 trace_mlx5_fs_add_rule(handle->rule[i]);
1458 struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
1460 struct mlx5_flow_rule *dst;
1463 fs_get_obj(fte, handle->rule[0]->node.parent);
1465 fs_for_each_dst(dst, fte) {
1466 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1467 return dst->dest_attr.counter;
1473 static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1475 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1481 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1482 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1485 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1487 struct mlx5_flow_table *ft)
1489 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1490 return counter_is_valid(dest->counter, action);
1492 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1495 if (!dest || ((dest->type ==
1496 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1497 (dest->ft->level <= ft->level)))
1503 struct list_head list;
1504 struct mlx5_flow_group *g;
1507 struct match_list_head {
1508 struct list_head list;
1509 struct match_list first;
1512 static void free_match_list(struct match_list_head *head)
1514 if (!list_empty(&head->list)) {
1515 struct match_list *iter, *match_tmp;
1517 list_del(&head->first.list);
1518 tree_put_node(&head->first.g->node);
1519 list_for_each_entry_safe(iter, match_tmp, &head->list,
1521 tree_put_node(&iter->g->node);
1522 list_del(&iter->list);
1528 static int build_match_list(struct match_list_head *match_head,
1529 struct mlx5_flow_table *ft,
1530 struct mlx5_flow_spec *spec)
1532 struct rhlist_head *tmp, *list;
1533 struct mlx5_flow_group *g;
1537 INIT_LIST_HEAD(&match_head->list);
1538 /* Collect all fgs which has a matching match_criteria */
1539 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1540 /* RCU is atomic, we can't execute FW commands here */
1541 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1542 struct match_list *curr_match;
1544 if (likely(list_empty(&match_head->list))) {
1545 if (!tree_get_node(&g->node))
1547 match_head->first.g = g;
1548 list_add_tail(&match_head->first.list,
1553 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1555 free_match_list(match_head);
1559 if (!tree_get_node(&g->node)) {
1564 list_add_tail(&curr_match->list, &match_head->list);
1571 static u64 matched_fgs_get_version(struct list_head *match_head)
1573 struct match_list *iter;
1576 list_for_each_entry(iter, match_head, list)
1577 version += (u64)atomic_read(&iter->g->node.version);
1581 static struct mlx5_flow_handle *
1582 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1583 struct list_head *match_head,
1584 struct mlx5_flow_spec *spec,
1585 struct mlx5_flow_act *flow_act,
1586 struct mlx5_flow_destination *dest,
1590 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1591 struct mlx5_flow_group *g;
1592 struct mlx5_flow_handle *rule;
1593 struct match_list *iter;
1594 bool take_write = false;
1599 fte = alloc_fte(ft, spec->match_value, flow_act);
1601 return ERR_PTR(-ENOMEM);
1603 list_for_each_entry(iter, match_head, list) {
1604 nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
1607 search_again_locked:
1608 version = matched_fgs_get_version(match_head);
1609 /* Try to find a fg that already contains a matching fte */
1610 list_for_each_entry(iter, match_head, list) {
1611 struct fs_fte *fte_tmp;
1614 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
1616 if (!fte_tmp || !tree_get_node(&fte_tmp->node))
1619 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1621 list_for_each_entry(iter, match_head, list)
1622 up_read_ref_node(&iter->g->node);
1624 list_for_each_entry(iter, match_head, list)
1625 up_write_ref_node(&iter->g->node);
1628 rule = add_rule_fg(g, spec->match_value,
1629 flow_act, dest, dest_num, fte_tmp);
1630 up_write_ref_node(&fte_tmp->node);
1631 tree_put_node(&fte_tmp->node);
1632 kmem_cache_free(steering->ftes_cache, fte);
1636 /* No group with matching fte found. Try to add a new fte to any
1641 list_for_each_entry(iter, match_head, list)
1642 up_read_ref_node(&iter->g->node);
1643 list_for_each_entry(iter, match_head, list)
1644 nested_down_write_ref_node(&iter->g->node,
1649 /* Check the ft version, for case that new flow group
1650 * was added while the fgs weren't locked
1652 if (atomic_read(&ft->node.version) != ft_version) {
1653 rule = ERR_PTR(-EAGAIN);
1657 /* Check the fgs version, for case the new FTE with the
1658 * same values was added while the fgs weren't locked
1660 if (version != matched_fgs_get_version(match_head))
1661 goto search_again_locked;
1663 list_for_each_entry(iter, match_head, list) {
1666 if (!g->node.active)
1668 err = insert_fte(g, fte);
1672 list_for_each_entry(iter, match_head, list)
1673 up_write_ref_node(&iter->g->node);
1674 kmem_cache_free(steering->ftes_cache, fte);
1675 return ERR_PTR(err);
1678 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1679 list_for_each_entry(iter, match_head, list)
1680 up_write_ref_node(&iter->g->node);
1681 rule = add_rule_fg(g, spec->match_value,
1682 flow_act, dest, dest_num, fte);
1683 up_write_ref_node(&fte->node);
1684 tree_put_node(&fte->node);
1687 rule = ERR_PTR(-ENOENT);
1689 list_for_each_entry(iter, match_head, list)
1690 up_write_ref_node(&iter->g->node);
1691 kmem_cache_free(steering->ftes_cache, fte);
1695 static struct mlx5_flow_handle *
1696 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1697 struct mlx5_flow_spec *spec,
1698 struct mlx5_flow_act *flow_act,
1699 struct mlx5_flow_destination *dest,
1703 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1704 struct mlx5_flow_group *g;
1705 struct mlx5_flow_handle *rule;
1706 struct match_list_head match_head;
1707 bool take_write = false;
1713 if (!check_valid_spec(spec))
1714 return ERR_PTR(-EINVAL);
1716 for (i = 0; i < dest_num; i++) {
1717 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1718 return ERR_PTR(-EINVAL);
1720 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1721 search_again_locked:
1722 version = atomic_read(&ft->node.version);
1724 /* Collect all fgs which has a matching match_criteria */
1725 err = build_match_list(&match_head, ft, spec);
1728 up_write_ref_node(&ft->node);
1729 return ERR_PTR(err);
1733 up_read_ref_node(&ft->node);
1735 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1737 free_match_list(&match_head);
1738 if (!IS_ERR(rule) ||
1739 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1741 up_write_ref_node(&ft->node);
1746 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1750 if (PTR_ERR(rule) == -EAGAIN ||
1751 version != atomic_read(&ft->node.version))
1752 goto search_again_locked;
1754 g = alloc_auto_flow_group(ft, spec);
1757 up_write_ref_node(&ft->node);
1761 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1762 up_write_ref_node(&ft->node);
1764 err = create_auto_flow_group(ft, g);
1766 goto err_release_fg;
1768 fte = alloc_fte(ft, spec->match_value, flow_act);
1771 goto err_release_fg;
1774 err = insert_fte(g, fte);
1776 kmem_cache_free(steering->ftes_cache, fte);
1777 goto err_release_fg;
1780 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1781 up_write_ref_node(&g->node);
1782 rule = add_rule_fg(g, spec->match_value, flow_act, dest,
1784 up_write_ref_node(&fte->node);
1785 tree_put_node(&fte->node);
1786 tree_put_node(&g->node);
1790 up_write_ref_node(&g->node);
1791 tree_put_node(&g->node);
1792 return ERR_PTR(err);
1795 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1797 return ((ft->type == FS_FT_NIC_RX) &&
1798 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1801 struct mlx5_flow_handle *
1802 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1803 struct mlx5_flow_spec *spec,
1804 struct mlx5_flow_act *flow_act,
1805 struct mlx5_flow_destination *dest,
1808 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1809 struct mlx5_flow_destination gen_dest = {};
1810 struct mlx5_flow_table *next_ft = NULL;
1811 struct mlx5_flow_handle *handle = NULL;
1812 u32 sw_action = flow_act->action;
1813 struct fs_prio *prio;
1815 fs_get_obj(prio, ft->node.parent);
1816 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1817 if (!fwd_next_prio_supported(ft))
1818 return ERR_PTR(-EOPNOTSUPP);
1820 return ERR_PTR(-EINVAL);
1821 mutex_lock(&root->chain_lock);
1822 next_ft = find_next_chained_ft(prio);
1824 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1825 gen_dest.ft = next_ft;
1828 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1830 mutex_unlock(&root->chain_lock);
1831 return ERR_PTR(-EOPNOTSUPP);
1835 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1837 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1838 if (!IS_ERR_OR_NULL(handle) &&
1839 (list_empty(&handle->rule[0]->next_ft))) {
1840 mutex_lock(&next_ft->lock);
1841 list_add(&handle->rule[0]->next_ft,
1842 &next_ft->fwd_rules);
1843 mutex_unlock(&next_ft->lock);
1844 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1846 mutex_unlock(&root->chain_lock);
1850 EXPORT_SYMBOL(mlx5_add_flow_rules);
1852 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1856 for (i = handle->num_rules - 1; i >= 0; i--)
1857 tree_remove_node(&handle->rule[i]->node);
1860 EXPORT_SYMBOL(mlx5_del_flow_rules);
1862 /* Assuming prio->node.children(flow tables) is sorted by level */
1863 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1865 struct fs_prio *prio;
1867 fs_get_obj(prio, ft->node.parent);
1869 if (!list_is_last(&ft->node.list, &prio->node.children))
1870 return list_next_entry(ft, node.list);
1871 return find_next_chained_ft(prio);
1874 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1876 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1877 struct mlx5_ft_underlay_qp *uqp;
1878 struct mlx5_flow_table *new_root_ft = NULL;
1882 if (root->root_ft != ft)
1885 new_root_ft = find_next_ft(ft);
1887 root->root_ft = NULL;
1891 if (list_empty(&root->underlay_qpns)) {
1892 /* Don't set any QPN (zero) in case QPN list is empty */
1894 err = root->cmds->update_root_ft(root->dev, new_root_ft,
1897 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1899 err = root->cmds->update_root_ft(root->dev,
1908 mlx5_core_warn(root->dev,
1909 "Update root flow table of id(%u) qpn(%d) failed\n",
1912 root->root_ft = new_root_ft;
1917 /* Connect flow table from previous priority to
1918 * the next flow table.
1920 static int disconnect_flow_table(struct mlx5_flow_table *ft)
1922 struct mlx5_core_dev *dev = get_dev(&ft->node);
1923 struct mlx5_flow_table *next_ft;
1924 struct fs_prio *prio;
1927 err = update_root_ft_destroy(ft);
1931 fs_get_obj(prio, ft->node.parent);
1932 if (!(list_first_entry(&prio->node.children,
1933 struct mlx5_flow_table,
1937 next_ft = find_next_chained_ft(prio);
1938 err = connect_fwd_rules(dev, next_ft, ft);
1942 err = connect_prev_fts(dev, next_ft, prio);
1944 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
1949 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
1951 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1954 mutex_lock(&root->chain_lock);
1955 err = disconnect_flow_table(ft);
1957 mutex_unlock(&root->chain_lock);
1960 if (tree_remove_node(&ft->node))
1961 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
1963 mutex_unlock(&root->chain_lock);
1967 EXPORT_SYMBOL(mlx5_destroy_flow_table);
1969 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
1971 if (tree_remove_node(&fg->node))
1972 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
1976 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
1977 enum mlx5_flow_namespace_type type)
1979 struct mlx5_flow_steering *steering = dev->priv.steering;
1980 struct mlx5_flow_root_namespace *root_ns;
1982 struct fs_prio *fs_prio;
1983 struct mlx5_flow_namespace *ns;
1989 case MLX5_FLOW_NAMESPACE_BYPASS:
1990 case MLX5_FLOW_NAMESPACE_LAG:
1991 case MLX5_FLOW_NAMESPACE_OFFLOADS:
1992 case MLX5_FLOW_NAMESPACE_ETHTOOL:
1993 case MLX5_FLOW_NAMESPACE_KERNEL:
1994 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
1995 case MLX5_FLOW_NAMESPACE_ANCHOR:
1998 case MLX5_FLOW_NAMESPACE_FDB:
1999 if (steering->fdb_root_ns)
2000 return &steering->fdb_root_ns->ns;
2003 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2004 if (steering->sniffer_rx_root_ns)
2005 return &steering->sniffer_rx_root_ns->ns;
2008 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2009 if (steering->sniffer_tx_root_ns)
2010 return &steering->sniffer_tx_root_ns->ns;
2013 case MLX5_FLOW_NAMESPACE_EGRESS:
2014 if (steering->egress_root_ns)
2015 return &steering->egress_root_ns->ns;
2022 root_ns = steering->root_ns;
2026 fs_prio = find_prio(&root_ns->ns, prio);
2030 ns = list_first_entry(&fs_prio->node.children,
2036 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2038 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2039 enum mlx5_flow_namespace_type type,
2042 struct mlx5_flow_steering *steering = dev->priv.steering;
2044 if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2048 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2049 if (steering->esw_egress_root_ns &&
2050 steering->esw_egress_root_ns[vport])
2051 return &steering->esw_egress_root_ns[vport]->ns;
2054 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2055 if (steering->esw_ingress_root_ns &&
2056 steering->esw_ingress_root_ns[vport])
2057 return &steering->esw_ingress_root_ns[vport]->ns;
2065 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2066 unsigned int prio, int num_levels)
2068 struct fs_prio *fs_prio;
2070 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2072 return ERR_PTR(-ENOMEM);
2074 fs_prio->node.type = FS_TYPE_PRIO;
2075 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2076 tree_add_node(&fs_prio->node, &ns->node);
2077 fs_prio->num_levels = num_levels;
2078 fs_prio->prio = prio;
2079 list_add_tail(&fs_prio->node.list, &ns->node.children);
2084 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2087 ns->node.type = FS_TYPE_NAMESPACE;
2092 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2094 struct mlx5_flow_namespace *ns;
2096 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2098 return ERR_PTR(-ENOMEM);
2100 fs_init_namespace(ns);
2101 tree_init_node(&ns->node, NULL, del_sw_ns);
2102 tree_add_node(&ns->node, &prio->node);
2103 list_add_tail(&ns->node.list, &prio->node.children);
2108 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2109 struct init_tree_node *prio_metadata)
2111 struct fs_prio *fs_prio;
2114 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2115 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2116 if (IS_ERR(fs_prio))
2117 return PTR_ERR(fs_prio);
2122 #define FLOW_TABLE_BIT_SZ 1
2123 #define GET_FLOW_TABLE_CAP(dev, offset) \
2124 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2126 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2127 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2131 for (i = 0; i < caps->arr_sz; i++) {
2132 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2138 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2139 struct init_tree_node *init_node,
2140 struct fs_node *fs_parent_node,
2141 struct init_tree_node *init_parent_node,
2144 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2145 flow_table_properties_nic_receive.
2147 struct mlx5_flow_namespace *fs_ns;
2148 struct fs_prio *fs_prio;
2149 struct fs_node *base;
2153 if (init_node->type == FS_TYPE_PRIO) {
2154 if ((init_node->min_ft_level > max_ft_level) ||
2155 !has_required_caps(steering->dev, &init_node->caps))
2158 fs_get_obj(fs_ns, fs_parent_node);
2159 if (init_node->num_leaf_prios)
2160 return create_leaf_prios(fs_ns, prio, init_node);
2161 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2162 if (IS_ERR(fs_prio))
2163 return PTR_ERR(fs_prio);
2164 base = &fs_prio->node;
2165 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2166 fs_get_obj(fs_prio, fs_parent_node);
2167 fs_ns = fs_create_namespace(fs_prio);
2169 return PTR_ERR(fs_ns);
2170 base = &fs_ns->node;
2175 for (i = 0; i < init_node->ar_size; i++) {
2176 err = init_root_tree_recursive(steering, &init_node->children[i],
2177 base, init_node, prio);
2180 if (init_node->children[i].type == FS_TYPE_PRIO &&
2181 init_node->children[i].num_leaf_prios) {
2182 prio += init_node->children[i].num_leaf_prios;
2189 static int init_root_tree(struct mlx5_flow_steering *steering,
2190 struct init_tree_node *init_node,
2191 struct fs_node *fs_parent_node)
2194 struct mlx5_flow_namespace *fs_ns;
2197 fs_get_obj(fs_ns, fs_parent_node);
2198 for (i = 0; i < init_node->ar_size; i++) {
2199 err = init_root_tree_recursive(steering, &init_node->children[i],
2208 static struct mlx5_flow_root_namespace
2209 *create_root_ns(struct mlx5_flow_steering *steering,
2210 enum fs_flow_table_type table_type)
2212 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2213 struct mlx5_flow_root_namespace *root_ns;
2214 struct mlx5_flow_namespace *ns;
2216 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2217 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2218 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2220 /* Create the root namespace */
2221 root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
2225 root_ns->dev = steering->dev;
2226 root_ns->table_type = table_type;
2227 root_ns->cmds = cmds;
2229 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2232 fs_init_namespace(ns);
2233 mutex_init(&root_ns->chain_lock);
2234 tree_init_node(&ns->node, NULL, NULL);
2235 tree_add_node(&ns->node, NULL);
2240 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2242 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2244 struct fs_prio *prio;
2246 fs_for_each_prio(prio, ns) {
2247 /* This updates prio start_level and num_levels */
2248 set_prio_attrs_in_prio(prio, acc_level);
2249 acc_level += prio->num_levels;
2254 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2256 struct mlx5_flow_namespace *ns;
2257 int acc_level_ns = acc_level;
2259 prio->start_level = acc_level;
2260 fs_for_each_ns(ns, prio)
2261 /* This updates start_level and num_levels of ns's priority descendants */
2262 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2263 if (!prio->num_levels)
2264 prio->num_levels = acc_level_ns - prio->start_level;
2265 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2268 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2270 struct mlx5_flow_namespace *ns = &root_ns->ns;
2271 struct fs_prio *prio;
2272 int start_level = 0;
2274 fs_for_each_prio(prio, ns) {
2275 set_prio_attrs_in_prio(prio, start_level);
2276 start_level += prio->num_levels;
2280 #define ANCHOR_PRIO 0
2281 #define ANCHOR_SIZE 1
2282 #define ANCHOR_LEVEL 0
2283 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2285 struct mlx5_flow_namespace *ns = NULL;
2286 struct mlx5_flow_table_attr ft_attr = {};
2287 struct mlx5_flow_table *ft;
2289 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2293 ft_attr.max_fte = ANCHOR_SIZE;
2294 ft_attr.level = ANCHOR_LEVEL;
2295 ft_attr.prio = ANCHOR_PRIO;
2297 ft = mlx5_create_flow_table(ns, &ft_attr);
2299 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2305 static int init_root_ns(struct mlx5_flow_steering *steering)
2309 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2310 if (!steering->root_ns)
2313 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2317 set_prio_attrs(steering->root_ns);
2318 err = create_anchor_flow_table(steering);
2325 cleanup_root_ns(steering->root_ns);
2326 steering->root_ns = NULL;
2330 static void clean_tree(struct fs_node *node)
2333 struct fs_node *iter;
2334 struct fs_node *temp;
2336 tree_get_node(node);
2337 list_for_each_entry_safe(iter, temp, &node->children, list)
2339 tree_put_node(node);
2340 tree_remove_node(node);
2344 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2349 clean_tree(&root_ns->ns.node);
2352 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2354 struct mlx5_flow_steering *steering = dev->priv.steering;
2357 if (!steering->esw_egress_root_ns)
2360 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2361 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2363 kfree(steering->esw_egress_root_ns);
2366 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2368 struct mlx5_flow_steering *steering = dev->priv.steering;
2371 if (!steering->esw_ingress_root_ns)
2374 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2375 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2377 kfree(steering->esw_ingress_root_ns);
2380 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2382 struct mlx5_flow_steering *steering = dev->priv.steering;
2384 cleanup_root_ns(steering->root_ns);
2385 cleanup_egress_acls_root_ns(dev);
2386 cleanup_ingress_acls_root_ns(dev);
2387 cleanup_root_ns(steering->fdb_root_ns);
2388 cleanup_root_ns(steering->sniffer_rx_root_ns);
2389 cleanup_root_ns(steering->sniffer_tx_root_ns);
2390 cleanup_root_ns(steering->egress_root_ns);
2391 mlx5_cleanup_fc_stats(dev);
2392 kmem_cache_destroy(steering->ftes_cache);
2393 kmem_cache_destroy(steering->fgs_cache);
2397 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2399 struct fs_prio *prio;
2401 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2402 if (!steering->sniffer_tx_root_ns)
2405 /* Create single prio */
2406 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2408 cleanup_root_ns(steering->sniffer_tx_root_ns);
2409 return PTR_ERR(prio);
2414 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2416 struct fs_prio *prio;
2418 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2419 if (!steering->sniffer_rx_root_ns)
2422 /* Create single prio */
2423 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2425 cleanup_root_ns(steering->sniffer_rx_root_ns);
2426 return PTR_ERR(prio);
2431 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2433 struct fs_prio *prio;
2435 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2436 if (!steering->fdb_root_ns)
2439 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
2443 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
2447 set_prio_attrs(steering->fdb_root_ns);
2451 cleanup_root_ns(steering->fdb_root_ns);
2452 steering->fdb_root_ns = NULL;
2453 return PTR_ERR(prio);
2456 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2458 struct fs_prio *prio;
2460 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2461 if (!steering->esw_egress_root_ns[vport])
2465 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2466 return PTR_ERR_OR_ZERO(prio);
2469 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2471 struct fs_prio *prio;
2473 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2474 if (!steering->esw_ingress_root_ns[vport])
2478 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2479 return PTR_ERR_OR_ZERO(prio);
2482 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2484 struct mlx5_flow_steering *steering = dev->priv.steering;
2488 steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2489 sizeof(*steering->esw_egress_root_ns),
2491 if (!steering->esw_egress_root_ns)
2494 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2495 err = init_egress_acl_root_ns(steering, i);
2497 goto cleanup_root_ns;
2503 for (i--; i >= 0; i--)
2504 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2505 kfree(steering->esw_egress_root_ns);
2509 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2511 struct mlx5_flow_steering *steering = dev->priv.steering;
2515 steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2516 sizeof(*steering->esw_ingress_root_ns),
2518 if (!steering->esw_ingress_root_ns)
2521 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2522 err = init_ingress_acl_root_ns(steering, i);
2524 goto cleanup_root_ns;
2530 for (i--; i >= 0; i--)
2531 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2532 kfree(steering->esw_ingress_root_ns);
2536 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2538 struct fs_prio *prio;
2540 steering->egress_root_ns = create_root_ns(steering,
2542 if (!steering->egress_root_ns)
2546 prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
2547 return PTR_ERR_OR_ZERO(prio);
2550 int mlx5_init_fs(struct mlx5_core_dev *dev)
2552 struct mlx5_flow_steering *steering;
2555 err = mlx5_init_fc_stats(dev);
2559 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2562 steering->dev = dev;
2563 dev->priv.steering = steering;
2565 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2566 sizeof(struct mlx5_flow_group), 0,
2568 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2570 if (!steering->ftes_cache || !steering->fgs_cache) {
2575 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2576 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2577 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2578 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2579 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2580 err = init_root_ns(steering);
2585 if (MLX5_ESWITCH_MANAGER(dev)) {
2586 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2587 err = init_fdb_root_ns(steering);
2591 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2592 err = init_egress_acls_root_ns(dev);
2596 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2597 err = init_ingress_acls_root_ns(dev);
2603 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2604 err = init_sniffer_rx_root_ns(steering);
2609 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2610 err = init_sniffer_tx_root_ns(steering);
2615 if (MLX5_IPSEC_DEV(dev)) {
2616 err = init_egress_root_ns(steering);
2623 mlx5_cleanup_fs(dev);
2627 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2629 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2630 struct mlx5_ft_underlay_qp *new_uqp;
2633 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2637 mutex_lock(&root->chain_lock);
2639 if (!root->root_ft) {
2641 goto update_ft_fail;
2644 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2647 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2649 goto update_ft_fail;
2652 new_uqp->qpn = underlay_qpn;
2653 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2655 mutex_unlock(&root->chain_lock);
2660 mutex_unlock(&root->chain_lock);
2664 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2666 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2668 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2669 struct mlx5_ft_underlay_qp *uqp;
2673 mutex_lock(&root->chain_lock);
2674 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2675 if (uqp->qpn == underlay_qpn) {
2682 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2688 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2691 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2694 list_del(&uqp->list);
2695 mutex_unlock(&root->chain_lock);
2701 mutex_unlock(&root->chain_lock);
2704 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);