2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
39 #include "mlx5_core.h"
42 int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
43 struct mlx5_flow_table *ft, u32 underlay_qpn,
46 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
47 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
49 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
53 MLX5_SET(set_flow_table_root_in, in, opcode,
54 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
55 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
58 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
59 MLX5_SET(set_flow_table_root_in, in, table_id, 0);
61 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
62 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
65 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
67 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
68 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
71 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
74 int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
76 enum fs_flow_table_op_mod op_mod,
77 enum fs_flow_table_type type, unsigned int level,
78 unsigned int log_size, struct mlx5_flow_table
79 *next_ft, unsigned int *table_id, u32 flags)
81 int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
82 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
83 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
86 MLX5_SET(create_flow_table_in, in, opcode,
87 MLX5_CMD_OP_CREATE_FLOW_TABLE);
89 MLX5_SET(create_flow_table_in, in, table_type, type);
90 MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
91 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
93 MLX5_SET(create_flow_table_in, in, vport_number, vport);
94 MLX5_SET(create_flow_table_in, in, other_vport, 1);
97 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
99 MLX5_SET(create_flow_table_in, in, flow_table_context.encap_en,
103 case FS_FT_OP_MOD_NORMAL:
105 MLX5_SET(create_flow_table_in, in,
106 flow_table_context.table_miss_action, 1);
107 MLX5_SET(create_flow_table_in, in,
108 flow_table_context.table_miss_id, next_ft->id);
112 case FS_FT_OP_MOD_LAG_DEMUX:
113 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
115 MLX5_SET(create_flow_table_in, in,
116 flow_table_context.lag_master_next_table_id,
121 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
123 *table_id = MLX5_GET(create_flow_table_out, out,
128 int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
129 struct mlx5_flow_table *ft)
131 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
134 MLX5_SET(destroy_flow_table_in, in, opcode,
135 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
136 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
137 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
139 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
140 MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
143 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
146 int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
147 struct mlx5_flow_table *ft,
148 struct mlx5_flow_table *next_ft)
150 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
151 u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
153 MLX5_SET(modify_flow_table_in, in, opcode,
154 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
155 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
156 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
158 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
159 MLX5_SET(modify_flow_table_in, in, modify_field_select,
160 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
162 MLX5_SET(modify_flow_table_in, in,
163 flow_table_context.lag_master_next_table_id, next_ft->id);
165 MLX5_SET(modify_flow_table_in, in,
166 flow_table_context.lag_master_next_table_id, 0);
170 MLX5_SET(modify_flow_table_in, in, vport_number,
172 MLX5_SET(modify_flow_table_in, in, other_vport, 1);
174 MLX5_SET(modify_flow_table_in, in, modify_field_select,
175 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
177 MLX5_SET(modify_flow_table_in, in,
178 flow_table_context.table_miss_action, 1);
179 MLX5_SET(modify_flow_table_in, in,
180 flow_table_context.table_miss_id,
183 MLX5_SET(modify_flow_table_in, in,
184 flow_table_context.table_miss_action, 0);
188 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
191 int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
192 struct mlx5_flow_table *ft,
194 unsigned int *group_id)
196 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
197 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
200 MLX5_SET(create_flow_group_in, in, opcode,
201 MLX5_CMD_OP_CREATE_FLOW_GROUP);
202 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
203 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
205 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
206 MLX5_SET(create_flow_group_in, in, other_vport, 1);
209 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
211 *group_id = MLX5_GET(create_flow_group_out, out,
216 int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
217 struct mlx5_flow_table *ft,
218 unsigned int group_id)
220 u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
221 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
223 MLX5_SET(destroy_flow_group_in, in, opcode,
224 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
225 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
226 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
227 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
229 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
230 MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
233 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
236 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
237 int opmod, int modify_mask,
238 struct mlx5_flow_table *ft,
242 unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
243 fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
244 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
245 struct mlx5_flow_rule *dst;
246 void *in_flow_context;
247 void *in_match_value;
252 in = kvzalloc(inlen, GFP_KERNEL);
256 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
257 MLX5_SET(set_fte_in, in, op_mod, opmod);
258 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
259 MLX5_SET(set_fte_in, in, table_type, ft->type);
260 MLX5_SET(set_fte_in, in, table_id, ft->id);
261 MLX5_SET(set_fte_in, in, flow_index, fte->index);
263 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
264 MLX5_SET(set_fte_in, in, other_vport, 1);
267 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
268 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
269 MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
270 MLX5_SET(flow_context, in_flow_context, action, fte->action);
271 MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
272 MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
273 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
275 memcpy(in_match_value, &fte->val, sizeof(fte->val));
277 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
278 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
281 list_for_each_entry(dst, &fte->node.children, node.list) {
284 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
287 MLX5_SET(dest_format_struct, in_dests, destination_type,
288 dst->dest_attr.type);
289 if (dst->dest_attr.type ==
290 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
291 id = dst->dest_attr.ft->id;
293 id = dst->dest_attr.tir_num;
295 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
296 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
300 MLX5_SET(flow_context, in_flow_context, destination_list_size,
304 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
305 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
306 log_max_flow_counter,
310 list_for_each_entry(dst, &fte->node.children, node.list) {
311 if (dst->dest_attr.type !=
312 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
315 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
316 dst->dest_attr.counter->id);
317 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
320 if (list_size > max_list_size) {
325 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
329 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
335 int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
336 struct mlx5_flow_table *ft,
340 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
343 int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
344 struct mlx5_flow_table *ft,
350 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
351 flow_table_properties_nic_receive.
357 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
360 int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
361 struct mlx5_flow_table *ft,
364 u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
365 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
367 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
368 MLX5_SET(delete_fte_in, in, table_type, ft->type);
369 MLX5_SET(delete_fte_in, in, table_id, ft->id);
370 MLX5_SET(delete_fte_in, in, flow_index, index);
372 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
373 MLX5_SET(delete_fte_in, in, other_vport, 1);
376 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
379 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
381 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
382 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
385 MLX5_SET(alloc_flow_counter_in, in, opcode,
386 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
388 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
390 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
394 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
396 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
397 u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
399 MLX5_SET(dealloc_flow_counter_in, in, opcode,
400 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
401 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
402 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
405 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
406 u64 *packets, u64 *bytes)
408 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
409 MLX5_ST_SZ_BYTES(traffic_counter)] = {0};
410 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
414 MLX5_SET(query_flow_counter_in, in, opcode,
415 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
416 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
417 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
418 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
422 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
423 *packets = MLX5_GET64(traffic_counter, stats, packets);
424 *bytes = MLX5_GET64(traffic_counter, stats, octets);
428 struct mlx5_cmd_fc_bulk {
435 struct mlx5_cmd_fc_bulk *
436 mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
438 struct mlx5_cmd_fc_bulk *b;
440 MLX5_ST_SZ_BYTES(query_flow_counter_out) +
441 MLX5_ST_SZ_BYTES(traffic_counter) * num;
443 b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
454 void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
460 mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
462 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
464 MLX5_SET(query_flow_counter_in, in, opcode,
465 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
466 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
467 MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
468 MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
469 return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
472 void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
473 struct mlx5_cmd_fc_bulk *b, u32 id,
474 u64 *packets, u64 *bytes)
476 int index = id - b->id;
479 if (index < 0 || index >= b->num) {
480 mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
481 id, b->id, b->id + b->num - 1);
485 stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
486 flow_statistics[index]);
487 *packets = MLX5_GET64(traffic_counter, stats, packets);
488 *bytes = MLX5_GET64(traffic_counter, stats, octets);
491 int mlx5_encap_alloc(struct mlx5_core_dev *dev,
497 int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
498 u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
499 void *encap_header_in;
505 if (size > max_encap_size) {
506 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
507 size, max_encap_size);
511 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
516 encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
517 header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
518 inlen = header - (void *)in + size;
520 memset(in, 0, inlen);
521 MLX5_SET(alloc_encap_header_in, in, opcode,
522 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
523 MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
524 MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
525 memcpy(header, encap_header, size);
527 memset(out, 0, sizeof(out));
528 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
530 *encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
535 void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
537 u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
538 u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
540 memset(in, 0, sizeof(in));
541 MLX5_SET(dealloc_encap_header_in, in, opcode,
542 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
543 MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
545 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
548 int mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
549 u8 namespace, u8 num_actions,
550 void *modify_actions, u32 *modify_header_id)
552 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)];
553 int max_actions, actions_size, inlen, err;
559 case MLX5_FLOW_NAMESPACE_FDB:
560 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
561 table_type = FS_FT_FDB;
563 case MLX5_FLOW_NAMESPACE_KERNEL:
564 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
565 table_type = FS_FT_NIC_RX;
571 if (num_actions > max_actions) {
572 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
573 num_actions, max_actions);
577 actions_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto) * num_actions;
578 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
580 in = kzalloc(inlen, GFP_KERNEL);
584 MLX5_SET(alloc_modify_header_context_in, in, opcode,
585 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
586 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
587 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
589 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
590 memcpy(actions_in, modify_actions, actions_size);
592 memset(out, 0, sizeof(out));
593 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
595 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
600 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev, u32 modify_header_id)
602 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)];
603 u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)];
605 memset(in, 0, sizeof(in));
606 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
607 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
608 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
611 mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));