1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
6 #define DR_RULE_MAX_STES_OPTIMIZED 5
7 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
9 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
10 enum mlx5dr_domain_nic_type nic_type,
11 struct mlx5dr_ste *new_last_ste,
12 struct list_head *miss_list,
13 struct list_head *send_list)
15 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
16 struct mlx5dr_ste_send_info *ste_info_last;
17 struct mlx5dr_ste *last_ste;
19 /* The new entry will be inserted after the last */
20 last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
23 ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
27 mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
28 mlx5dr_ste_get_icm_addr(new_last_ste));
29 list_add_tail(&new_last_ste->miss_list_node, miss_list);
31 mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
32 0, mlx5dr_ste_get_hw_ste(last_ste),
33 ste_info_last, send_list, true);
38 static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
39 struct mlx5dr_matcher_rx_tx *nic_matcher,
42 struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
45 if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
48 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
49 mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
52 static struct mlx5dr_ste *
53 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
54 struct mlx5dr_matcher_rx_tx *nic_matcher,
57 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
58 struct mlx5dr_ste_htbl *new_htbl;
59 struct mlx5dr_ste *ste;
61 /* Create new table for miss entry */
62 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
64 MLX5DR_STE_LU_TYPE_DONT_CARE,
67 mlx5dr_dbg(dmn, "Failed allocating collision table\n");
71 /* One and only entry, never grows */
72 ste = new_htbl->chunk->ste_arr;
73 dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
74 mlx5dr_htbl_get(new_htbl);
79 static struct mlx5dr_ste *
80 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
81 struct mlx5dr_matcher_rx_tx *nic_matcher,
83 struct mlx5dr_ste *orig_ste)
85 struct mlx5dr_ste *ste;
87 ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
89 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
93 ste->ste_chain_location = orig_ste->ste_chain_location;
94 ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
96 /* In collision entry, all members share the same miss_list_head */
97 ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
100 if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
102 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
109 mlx5dr_ste_free(ste, matcher, nic_matcher);
114 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
115 struct mlx5dr_domain *dmn)
119 list_del(&ste_info->send_list);
121 /* Copy data to ste, only reduced size or control, the last 16B (mask)
122 * is already written to the hw.
124 if (ste_info->size == DR_STE_SIZE_CTRL)
125 memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
126 ste_info->data, DR_STE_SIZE_CTRL);
128 memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
129 ste_info->data, DR_STE_SIZE_REDUCED);
131 ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
132 ste_info->size, ste_info->offset);
137 mlx5dr_send_info_free(ste_info);
141 static int dr_rule_send_update_list(struct list_head *send_ste_list,
142 struct mlx5dr_domain *dmn,
145 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
149 list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
150 send_ste_list, send_list) {
151 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
157 list_for_each_entry_safe(ste_info, tmp_ste_info,
158 send_ste_list, send_list) {
159 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
169 static struct mlx5dr_ste *
170 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
172 struct mlx5dr_ste *ste;
174 if (list_empty(miss_list))
177 /* Check if hw_ste is present in the list */
178 list_for_each_entry(ste, miss_list, miss_list_node) {
179 if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
186 static struct mlx5dr_ste *
187 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
188 struct mlx5dr_matcher_rx_tx *nic_matcher,
189 struct list_head *update_list,
190 struct mlx5dr_ste *col_ste,
193 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
194 struct mlx5dr_ste *new_ste;
197 new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
201 /* Update collision pointing STE */
202 new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
204 /* In collision entry, all members share the same miss_list_head */
205 new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
207 /* Update the previous from the list */
208 ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
209 new_ste, mlx5dr_ste_get_miss_list(col_ste),
212 mlx5dr_dbg(dmn, "Failed update dup entry\n");
219 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
223 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
224 struct mlx5dr_matcher_rx_tx *nic_matcher,
225 struct mlx5dr_ste *cur_ste,
226 struct mlx5dr_ste *new_ste)
228 new_ste->next_htbl = cur_ste->next_htbl;
229 new_ste->ste_chain_location = cur_ste->ste_chain_location;
231 if (new_ste->next_htbl)
232 new_ste->next_htbl->pointing_ste = new_ste;
234 /* We need to copy the refcount since this ste
235 * may have been traversed several times
237 new_ste->refcount = cur_ste->refcount;
239 /* Link old STEs rule to the new ste */
240 mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
243 static struct mlx5dr_ste *
244 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
245 struct mlx5dr_matcher_rx_tx *nic_matcher,
246 struct mlx5dr_ste *cur_ste,
247 struct mlx5dr_ste_htbl *new_htbl,
248 struct list_head *update_list)
250 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
251 struct mlx5dr_ste_send_info *ste_info;
252 bool use_update_list = false;
253 u8 hw_ste[DR_STE_SIZE] = {};
254 struct mlx5dr_ste *new_ste;
258 /* Copy STE mask from the matcher */
259 sb_idx = cur_ste->ste_chain_location - 1;
260 mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
262 /* Copy STE control and tag */
263 memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
264 dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
266 new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
267 new_ste = &new_htbl->chunk->ste_arr[new_idx];
269 if (mlx5dr_ste_is_not_used(new_ste)) {
270 mlx5dr_htbl_get(new_htbl);
271 list_add_tail(&new_ste->miss_list_node,
272 mlx5dr_ste_get_miss_list(new_ste));
274 new_ste = dr_rule_rehash_handle_collision(matcher,
280 mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
284 new_htbl->ctrl.num_of_collisions++;
285 use_update_list = true;
288 memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
290 new_htbl->ctrl.num_of_valid_entries++;
292 if (use_update_list) {
293 ste_info = mlx5dr_send_info_alloc(dmn,
294 nic_matcher->nic_tbl->nic_dmn->type);
298 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
303 dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
308 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
312 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
313 struct mlx5dr_matcher_rx_tx *nic_matcher,
314 struct list_head *cur_miss_list,
315 struct mlx5dr_ste_htbl *new_htbl,
316 struct list_head *update_list)
318 struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
320 if (list_empty(cur_miss_list))
323 list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
324 new_ste = dr_rule_rehash_copy_ste(matcher,
332 list_del(&cur_ste->miss_list_node);
333 mlx5dr_htbl_put(cur_ste->htbl);
338 mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
343 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
344 struct mlx5dr_matcher_rx_tx *nic_matcher,
345 struct mlx5dr_ste_htbl *cur_htbl,
346 struct mlx5dr_ste_htbl *new_htbl,
347 struct list_head *update_list)
349 struct mlx5dr_ste *cur_ste;
354 cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
356 if (cur_entries < 1) {
357 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
361 for (i = 0; i < cur_entries; i++) {
362 cur_ste = &cur_htbl->chunk->ste_arr[i];
363 if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
366 err = dr_rule_rehash_copy_miss_list(matcher,
368 mlx5dr_ste_get_miss_list(cur_ste),
374 /* In order to decrease the number of allocated ste_send_info
375 * structs, send the current table row now.
377 err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
379 mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
388 static struct mlx5dr_ste_htbl *
389 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
390 struct mlx5dr_rule_rx_tx *nic_rule,
391 struct mlx5dr_ste_htbl *cur_htbl,
393 struct list_head *update_list,
394 enum mlx5dr_icm_chunk_size new_size)
396 struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
397 struct mlx5dr_matcher *matcher = rule->matcher;
398 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
399 struct mlx5dr_matcher_rx_tx *nic_matcher;
400 struct mlx5dr_ste_send_info *ste_info;
401 struct mlx5dr_htbl_connect_info info;
402 struct mlx5dr_domain_rx_tx *nic_dmn;
403 u8 formatted_ste[DR_STE_SIZE] = {};
404 LIST_HEAD(rehash_table_send_list);
405 struct mlx5dr_ste *ste_to_update;
406 struct mlx5dr_ste_htbl *new_htbl;
409 nic_matcher = nic_rule->nic_matcher;
410 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
412 ste_info = mlx5dr_send_info_alloc(dmn,
413 nic_matcher->nic_tbl->nic_dmn->type);
417 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
420 cur_htbl->byte_mask);
422 mlx5dr_err(dmn, "Failed to allocate new hash table\n");
426 /* Write new table to HW */
427 info.type = CONNECT_MISS;
428 info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
429 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
436 new_htbl->pointing_ste = cur_htbl->pointing_ste;
437 new_htbl->pointing_ste->next_htbl = new_htbl;
438 err = dr_rule_rehash_copy_htbl(matcher,
442 &rehash_table_send_list);
446 if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
447 nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
448 mlx5dr_err(dmn, "Failed writing table to HW\n");
452 /* Writing to the hw is done in regular order of rehash_table_send_list,
453 * in order to have the origin data written before the miss address of
454 * collision entries, if exists.
456 if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
457 mlx5dr_err(dmn, "Failed updating table to HW\n");
461 /* Connect previous hash table to current */
462 if (ste_location == 1) {
463 /* The previous table is an anchor, anchors size is always one STE */
464 struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
466 /* On matcher s_anchor we keep an extra refcount */
467 mlx5dr_htbl_get(new_htbl);
468 mlx5dr_htbl_put(cur_htbl);
470 nic_matcher->s_htbl = new_htbl;
472 /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
473 * (48B len) which works only on first 32B
475 mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
476 prev_htbl->chunk->hw_ste_arr,
477 mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
478 mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
480 ste_to_update = &prev_htbl->chunk->ste_arr[0];
482 mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
483 mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
485 ste_to_update = cur_htbl->pointing_ste;
488 mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
489 0, mlx5dr_ste_get_hw_ste(ste_to_update),
490 ste_info, update_list, false);
495 /* Clean all ste_info's from the new table */
496 list_for_each_entry_safe(del_ste_info, tmp_ste_info,
497 &rehash_table_send_list, send_list) {
498 list_del(&del_ste_info->send_list);
499 mlx5dr_send_info_free(del_ste_info);
503 mlx5dr_ste_htbl_free(new_htbl);
505 mlx5dr_send_info_free(ste_info);
506 mlx5dr_info(dmn, "Failed creating rehash table\n");
510 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
511 struct mlx5dr_rule_rx_tx *nic_rule,
512 struct mlx5dr_ste_htbl *cur_htbl,
514 struct list_head *update_list)
516 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
517 enum mlx5dr_icm_chunk_size new_size;
519 new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
520 new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
522 if (new_size == cur_htbl->chunk->size)
523 return NULL; /* Skip rehash, we already at the max size */
525 return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
526 update_list, new_size);
529 static struct mlx5dr_ste *
530 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
531 struct mlx5dr_matcher_rx_tx *nic_matcher,
532 struct mlx5dr_ste *ste,
534 struct list_head *miss_list,
535 struct list_head *send_list)
537 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
538 struct mlx5dr_ste_send_info *ste_info;
539 struct mlx5dr_ste *new_ste;
541 ste_info = mlx5dr_send_info_alloc(dmn,
542 nic_matcher->nic_tbl->nic_dmn->type);
546 new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
550 if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
551 new_ste, miss_list, send_list)) {
552 mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
556 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
557 ste_info, send_list, false);
559 ste->htbl->ctrl.num_of_collisions++;
560 ste->htbl->ctrl.num_of_valid_entries++;
565 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
567 mlx5dr_send_info_free(ste_info);
571 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
573 struct mlx5dr_rule_action_member *action_mem;
574 struct mlx5dr_rule_action_member *tmp;
576 list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
577 list_del(&action_mem->list);
578 refcount_dec(&action_mem->action->refcount);
583 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
585 struct mlx5dr_action *actions[])
587 struct mlx5dr_rule_action_member *action_mem;
590 for (i = 0; i < num_actions; i++) {
591 action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
593 goto free_action_members;
595 action_mem->action = actions[i];
596 INIT_LIST_HEAD(&action_mem->list);
597 list_add_tail(&action_mem->list, &rule->rule_actions_list);
598 refcount_inc(&action_mem->action->refcount);
604 dr_rule_remove_action_members(rule);
608 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
609 struct mlx5dr_ste *ste,
612 /* Update rule member is usually done for the last STE or during rule
613 * creation to recover from mid-creation failure (for this peruse the
614 * force flag is used)
616 if (ste->next_htbl && !force)
619 /* Update is required since each rule keeps track of its last STE */
620 ste->rule_rx_tx = nic_rule;
621 nic_rule->last_rule_ste = ste;
624 static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
626 struct mlx5dr_ste *first_ste;
628 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
629 struct mlx5dr_ste, miss_list_node);
631 return first_ste->htbl->pointing_ste;
634 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
635 struct mlx5dr_ste *curr_ste,
645 /* Iterate from last to first */
647 first = curr_ste->ste_chain_location == 1;
648 ste_arr[*num_of_stes] = curr_ste;
650 curr_ste = dr_rule_get_pointed_ste(curr_ste);
656 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
657 struct mlx5dr_rule_rx_tx *nic_rule)
659 struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
660 struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
663 if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
667 mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
670 static u16 dr_get_bits_per_mask(u16 byte_mask)
675 byte_mask = byte_mask & (byte_mask - 1);
682 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
683 struct mlx5dr_domain *dmn,
684 struct mlx5dr_domain_rx_tx *nic_dmn)
686 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
689 if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
692 if (!mlx5dr_ste_htbl_may_grow(htbl))
695 if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
698 threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
699 if (ctrl->num_of_collisions >= threshold &&
700 (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
706 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
707 struct mlx5dr_rule_rx_tx *nic_rule,
708 struct list_head *send_ste_list,
709 struct mlx5dr_ste *last_ste,
711 u32 new_hw_ste_arr_sz)
713 struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
714 struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
715 u8 num_of_builders = nic_matcher->num_of_builders;
716 struct mlx5dr_matcher *matcher = rule->matcher;
717 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
718 u8 *curr_hw_ste, *prev_hw_ste;
719 struct mlx5dr_ste *action_ste;
723 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
724 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
725 * to support the action.
728 for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
729 curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
730 prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
731 action_ste = dr_rule_create_collision_htbl(matcher,
737 mlx5dr_ste_get(action_ste);
739 action_ste->htbl->pointing_ste = last_ste;
740 last_ste->next_htbl = action_ste->htbl;
741 last_ste = action_ste;
743 /* While free ste we go over the miss list, so add this ste to the list */
744 list_add_tail(&action_ste->miss_list_node,
745 mlx5dr_ste_get_miss_list(action_ste));
747 ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
748 nic_matcher->nic_tbl->nic_dmn->type);
749 if (!ste_info_arr[k])
752 /* Point current ste to the new action */
753 mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
757 mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
759 mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
762 send_ste_list, false);
765 last_ste->next_htbl = NULL;
770 mlx5dr_ste_put(action_ste, matcher, nic_matcher);
774 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
775 struct mlx5dr_matcher_rx_tx *nic_matcher,
776 struct mlx5dr_ste_htbl *cur_htbl,
777 struct mlx5dr_ste *ste,
780 struct list_head *miss_list,
781 struct list_head *send_list)
783 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
784 struct mlx5dr_ste_send_info *ste_info;
786 /* Take ref on table, only on first time this ste is used */
787 mlx5dr_htbl_get(cur_htbl);
789 /* new entry -> new branch */
790 list_add_tail(&ste->miss_list_node, miss_list);
792 dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
794 ste->ste_chain_location = ste_location;
796 ste_info = mlx5dr_send_info_alloc(dmn,
797 nic_matcher->nic_tbl->nic_dmn->type);
799 goto clean_ste_setting;
801 if (mlx5dr_ste_create_next_htbl(matcher,
806 mlx5dr_dbg(dmn, "Failed allocating table\n");
810 cur_htbl->ctrl.num_of_valid_entries++;
812 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
813 ste_info, send_list, false);
818 mlx5dr_send_info_free(ste_info);
820 list_del_init(&ste->miss_list_node);
821 mlx5dr_htbl_put(cur_htbl);
826 static struct mlx5dr_ste *
827 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
828 struct mlx5dr_rule_rx_tx *nic_rule,
829 struct list_head *send_ste_list,
830 struct mlx5dr_ste_htbl *cur_htbl,
833 struct mlx5dr_ste_htbl **put_htbl)
835 struct mlx5dr_matcher *matcher = rule->matcher;
836 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
837 struct mlx5dr_matcher_rx_tx *nic_matcher;
838 struct mlx5dr_domain_rx_tx *nic_dmn;
839 struct mlx5dr_ste_htbl *new_htbl;
840 struct mlx5dr_ste *matched_ste;
841 struct list_head *miss_list;
842 bool skip_rehash = false;
843 struct mlx5dr_ste *ste;
846 nic_matcher = nic_rule->nic_matcher;
847 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
850 index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
851 miss_list = &cur_htbl->chunk->miss_list[index];
852 ste = &cur_htbl->chunk->ste_arr[index];
854 if (mlx5dr_ste_is_not_used(ste)) {
855 if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
861 /* Hash table index in use, check if this ste is in the miss list */
862 matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
864 /* If it is last STE in the chain, and has the same tag
865 * it means that all the previous stes are the same,
866 * if so, this rule is duplicated.
868 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
871 mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
874 if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
875 /* Hash table index in use, try to resize of the hash */
878 /* Hold the table till we update.
879 * Release in dr_rule_create_rule()
881 *put_htbl = cur_htbl;
882 mlx5dr_htbl_get(cur_htbl);
884 new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
885 ste_location, send_ste_list);
887 mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
888 cur_htbl->chunk->size);
889 mlx5dr_htbl_put(cur_htbl);
895 /* Hash table index in use, add another collision (miss) */
896 ste = dr_rule_handle_collision(matcher,
903 mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
912 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
913 u32 s_idx, u32 e_idx)
917 for (i = s_idx; i < e_idx; i++) {
918 if (value[i] & ~mask[i]) {
919 pr_info("Rule parameters contains a value not specified by mask\n");
926 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
927 struct mlx5dr_match_parameters *value,
928 struct mlx5dr_match_param *param)
930 u8 match_criteria = matcher->match_criteria;
931 size_t value_size = value->match_sz;
932 u8 *mask_p = (u8 *)&matcher->mask;
933 u8 *param_p = (u8 *)param;
937 (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
938 mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
942 mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
944 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
945 s_idx = offsetof(struct mlx5dr_match_param, outer);
946 e_idx = min(s_idx + sizeof(param->outer), value_size);
948 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
949 mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
954 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
955 s_idx = offsetof(struct mlx5dr_match_param, misc);
956 e_idx = min(s_idx + sizeof(param->misc), value_size);
958 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
959 mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
964 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
965 s_idx = offsetof(struct mlx5dr_match_param, inner);
966 e_idx = min(s_idx + sizeof(param->inner), value_size);
968 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
969 mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
974 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
975 s_idx = offsetof(struct mlx5dr_match_param, misc2);
976 e_idx = min(s_idx + sizeof(param->misc2), value_size);
978 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
979 mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
984 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
985 s_idx = offsetof(struct mlx5dr_match_param, misc3);
986 e_idx = min(s_idx + sizeof(param->misc3), value_size);
988 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
989 mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
994 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
995 s_idx = offsetof(struct mlx5dr_match_param, misc4);
996 e_idx = min(s_idx + sizeof(param->misc4), value_size);
998 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
999 mlx5dr_err(matcher->tbl->dmn,
1000 "Rule misc4 parameters contains a value not specified by mask\n");
1005 if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1006 s_idx = offsetof(struct mlx5dr_match_param, misc5);
1007 e_idx = min(s_idx + sizeof(param->misc5), value_size);
1009 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
1010 mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
1017 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
1018 struct mlx5dr_rule_rx_tx *nic_rule)
1020 /* Check if this nic rule was actually created, or was it skipped
1021 * and only the other type of the RX/TX nic rule was created.
1023 if (!nic_rule->last_rule_ste)
1026 mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1027 dr_rule_clean_rule_members(rule, nic_rule);
1029 nic_rule->nic_matcher->rules--;
1030 if (!nic_rule->nic_matcher->rules)
1031 mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
1032 nic_rule->nic_matcher);
1034 mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
1039 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
1041 dr_rule_destroy_rule_nic(rule, &rule->rx);
1042 dr_rule_destroy_rule_nic(rule, &rule->tx);
1046 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
1048 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
1050 mlx5dr_dbg_rule_del(rule);
1052 switch (dmn->type) {
1053 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1054 dr_rule_destroy_rule_nic(rule, &rule->rx);
1056 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1057 dr_rule_destroy_rule_nic(rule, &rule->tx);
1059 case MLX5DR_DOMAIN_TYPE_FDB:
1060 dr_rule_destroy_rule_fdb(rule);
1066 dr_rule_remove_action_members(rule);
1071 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1073 if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1074 return DR_RULE_IPV6;
1076 return DR_RULE_IPV4;
1079 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1080 enum mlx5dr_domain_nic_type nic_type,
1081 struct mlx5dr_match_param *mask,
1082 struct mlx5dr_match_param *value,
1085 bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
1087 if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1090 if (mask->misc.source_port) {
1091 if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
1094 if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
1098 if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1101 if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1108 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1109 struct mlx5dr_rule_rx_tx *nic_rule,
1110 struct mlx5dr_match_param *param,
1112 struct mlx5dr_action *actions[])
1114 u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
1115 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1116 struct mlx5dr_matcher *matcher = rule->matcher;
1117 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1118 struct mlx5dr_matcher_rx_tx *nic_matcher;
1119 struct mlx5dr_domain_rx_tx *nic_dmn;
1120 struct mlx5dr_ste_htbl *htbl = NULL;
1121 struct mlx5dr_ste_htbl *cur_htbl;
1122 struct mlx5dr_ste *ste = NULL;
1123 LIST_HEAD(send_ste_list);
1124 bool hw_ste_arr_is_opt;
1125 u8 *hw_ste_arr = NULL;
1126 u32 new_hw_ste_arr_sz;
1129 nic_matcher = nic_rule->nic_matcher;
1130 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1132 if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
1136 ret = mlx5dr_matcher_select_builders(matcher,
1138 dr_rule_get_ipv(¶m->outer),
1139 dr_rule_get_ipv(¶m->inner));
1143 hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
1144 if (likely(hw_ste_arr_is_opt)) {
1145 hw_ste_arr = hw_ste_arr_optimized;
1147 hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
1148 DR_STE_SIZE, GFP_KERNEL);
1154 mlx5dr_domain_nic_lock(nic_dmn);
1156 ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
1160 /* Set the tag values inside the ste array */
1161 ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1163 goto remove_from_nic_tbl;
1165 /* Set the actions values/addresses inside the ste array */
1166 ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1167 num_actions, hw_ste_arr,
1168 &new_hw_ste_arr_sz);
1170 goto remove_from_nic_tbl;
1172 cur_htbl = nic_matcher->s_htbl;
1174 /* Go over the array of STEs, and build dr_ste accordingly.
1175 * The loop is over only the builders which are equal or less to the
1176 * number of stes, in case we have actions that lives in other stes.
1178 for (i = 0; i < nic_matcher->num_of_builders; i++) {
1179 /* Calculate CRC and keep new ste entry */
1180 u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1182 ste = dr_rule_handle_ste_branch(rule,
1190 mlx5dr_err(dmn, "Failed creating next branch\n");
1195 cur_htbl = ste->next_htbl;
1197 mlx5dr_ste_get(ste);
1198 mlx5dr_rule_set_last_member(nic_rule, ste, true);
1201 /* Connect actions */
1202 ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1203 ste, hw_ste_arr, new_hw_ste_arr_sz);
1205 mlx5dr_dbg(dmn, "Failed apply actions\n");
1208 ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1210 mlx5dr_err(dmn, "Failed sending ste!\n");
1215 mlx5dr_htbl_put(htbl);
1217 nic_matcher->rules++;
1219 mlx5dr_domain_nic_unlock(nic_dmn);
1221 if (unlikely(!hw_ste_arr_is_opt))
1227 dr_rule_clean_rule_members(rule, nic_rule);
1228 /* Clean all ste_info's */
1229 list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1230 list_del(&ste_info->send_list);
1231 mlx5dr_send_info_free(ste_info);
1234 remove_from_nic_tbl:
1235 if (!nic_matcher->rules)
1236 mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
1239 mlx5dr_domain_nic_unlock(nic_dmn);
1241 if (unlikely(!hw_ste_arr_is_opt))
1248 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1249 struct mlx5dr_match_param *param,
1251 struct mlx5dr_action *actions[])
1253 struct mlx5dr_match_param copy_param = {};
1256 /* Copy match_param since they will be consumed during the first
1257 * nic_rule insertion.
1259 memcpy(©_param, param, sizeof(struct mlx5dr_match_param));
1261 ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1262 num_actions, actions);
1266 ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param,
1267 num_actions, actions);
1269 goto destroy_rule_nic_rx;
1273 destroy_rule_nic_rx:
1274 dr_rule_destroy_rule_nic(rule, &rule->rx);
1278 static struct mlx5dr_rule *
1279 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1280 struct mlx5dr_match_parameters *value,
1282 struct mlx5dr_action *actions[],
1285 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1286 struct mlx5dr_match_param param = {};
1287 struct mlx5dr_rule *rule;
1290 if (!dr_rule_verify(matcher, value, ¶m))
1293 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1297 rule->matcher = matcher;
1298 rule->flow_source = flow_source;
1299 INIT_LIST_HEAD(&rule->rule_actions_list);
1301 ret = dr_rule_add_action_members(rule, num_actions, actions);
1305 switch (dmn->type) {
1306 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1307 rule->rx.nic_matcher = &matcher->rx;
1308 ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m,
1309 num_actions, actions);
1311 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1312 rule->tx.nic_matcher = &matcher->tx;
1313 ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m,
1314 num_actions, actions);
1316 case MLX5DR_DOMAIN_TYPE_FDB:
1317 rule->rx.nic_matcher = &matcher->rx;
1318 rule->tx.nic_matcher = &matcher->tx;
1319 ret = dr_rule_create_rule_fdb(rule, ¶m,
1320 num_actions, actions);
1328 goto remove_action_members;
1330 INIT_LIST_HEAD(&rule->dbg_node);
1331 mlx5dr_dbg_rule_add(rule);
1334 remove_action_members:
1335 dr_rule_remove_action_members(rule);
1338 mlx5dr_err(dmn, "Failed creating rule\n");
1342 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1343 struct mlx5dr_match_parameters *value,
1345 struct mlx5dr_action *actions[],
1348 struct mlx5dr_rule *rule;
1350 refcount_inc(&matcher->refcount);
1352 rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1354 refcount_dec(&matcher->refcount);
1359 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1361 struct mlx5dr_matcher *matcher = rule->matcher;
1364 ret = dr_rule_destroy_rule(rule);
1366 refcount_dec(&matcher->refcount);