2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/flow_table.h>
36 struct mlx5e_rq_param {
37 u32 rqc[MLX5_ST_SZ_DW(rqc)];
38 struct mlx5_wq_param wq;
41 struct mlx5e_sq_param {
42 u32 sqc[MLX5_ST_SZ_DW(sqc)];
43 struct mlx5_wq_param wq;
47 struct mlx5e_cq_param {
48 u32 cqc[MLX5_ST_SZ_DW(cqc)];
49 struct mlx5_wq_param wq;
53 struct mlx5e_channel_param {
54 struct mlx5e_rq_param rq;
55 struct mlx5e_sq_param sq;
56 struct mlx5e_cq_param rx_cq;
57 struct mlx5e_cq_param tx_cq;
60 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
62 struct mlx5_core_dev *mdev = priv->mdev;
65 port_state = mlx5_query_vport_state(mdev,
66 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
68 if (port_state == VPORT_STATE_UP)
69 netif_carrier_on(priv->netdev);
71 netif_carrier_off(priv->netdev);
74 static void mlx5e_update_carrier_work(struct work_struct *work)
76 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
79 mutex_lock(&priv->state_lock);
80 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
81 mlx5e_update_carrier(priv);
82 mutex_unlock(&priv->state_lock);
85 void mlx5e_update_stats(struct mlx5e_priv *priv)
87 struct mlx5_core_dev *mdev = priv->mdev;
88 struct mlx5e_vport_stats *s = &priv->stats.vport;
89 struct mlx5e_rq_stats *rq_stats;
90 struct mlx5e_sq_stats *sq_stats;
91 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
93 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
97 out = mlx5_vzalloc(outlen);
101 /* Collect firts the SW counters and then HW for consistency */
104 s->tx_queue_stopped = 0;
105 s->tx_queue_wake = 0;
106 s->tx_queue_dropped = 0;
112 for (i = 0; i < priv->params.num_channels; i++) {
113 rq_stats = &priv->channel[i]->rq.stats;
115 s->lro_packets += rq_stats->lro_packets;
116 s->lro_bytes += rq_stats->lro_bytes;
117 s->rx_csum_none += rq_stats->csum_none;
118 s->rx_wqe_err += rq_stats->wqe_err;
120 for (j = 0; j < priv->params.num_tc; j++) {
121 sq_stats = &priv->channel[i]->sq[j].stats;
123 s->tso_packets += sq_stats->tso_packets;
124 s->tso_bytes += sq_stats->tso_bytes;
125 s->tx_queue_stopped += sq_stats->stopped;
126 s->tx_queue_wake += sq_stats->wake;
127 s->tx_queue_dropped += sq_stats->dropped;
128 tx_offload_none += sq_stats->csum_offload_none;
133 memset(in, 0, sizeof(in));
135 MLX5_SET(query_vport_counter_in, in, opcode,
136 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
137 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
138 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
140 memset(out, 0, outlen);
142 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
145 #define MLX5_GET_CTR(p, x) \
146 MLX5_GET64(query_vport_counter_out, p, x)
148 s->rx_error_packets =
149 MLX5_GET_CTR(out, received_errors.packets);
151 MLX5_GET_CTR(out, received_errors.octets);
152 s->tx_error_packets =
153 MLX5_GET_CTR(out, transmit_errors.packets);
155 MLX5_GET_CTR(out, transmit_errors.octets);
157 s->rx_unicast_packets =
158 MLX5_GET_CTR(out, received_eth_unicast.packets);
159 s->rx_unicast_bytes =
160 MLX5_GET_CTR(out, received_eth_unicast.octets);
161 s->tx_unicast_packets =
162 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
163 s->tx_unicast_bytes =
164 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
166 s->rx_multicast_packets =
167 MLX5_GET_CTR(out, received_eth_multicast.packets);
168 s->rx_multicast_bytes =
169 MLX5_GET_CTR(out, received_eth_multicast.octets);
170 s->tx_multicast_packets =
171 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
172 s->tx_multicast_bytes =
173 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
175 s->rx_broadcast_packets =
176 MLX5_GET_CTR(out, received_eth_broadcast.packets);
177 s->rx_broadcast_bytes =
178 MLX5_GET_CTR(out, received_eth_broadcast.octets);
179 s->tx_broadcast_packets =
180 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
181 s->tx_broadcast_bytes =
182 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
185 s->rx_unicast_packets +
186 s->rx_multicast_packets +
187 s->rx_broadcast_packets;
189 s->rx_unicast_bytes +
190 s->rx_multicast_bytes +
191 s->rx_broadcast_bytes;
193 s->tx_unicast_packets +
194 s->tx_multicast_packets +
195 s->tx_broadcast_packets;
197 s->tx_unicast_bytes +
198 s->tx_multicast_bytes +
199 s->tx_broadcast_bytes;
201 /* Update calculated offload counters */
202 s->tx_csum_offload = s->tx_packets - tx_offload_none;
203 s->rx_csum_good = s->rx_packets - s->rx_csum_none;
209 static void mlx5e_update_stats_work(struct work_struct *work)
211 struct delayed_work *dwork = to_delayed_work(work);
212 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
214 mutex_lock(&priv->state_lock);
215 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
216 mlx5e_update_stats(priv);
217 schedule_delayed_work(dwork,
219 MLX5E_UPDATE_STATS_INTERVAL));
221 mutex_unlock(&priv->state_lock);
224 static void __mlx5e_async_event(struct mlx5e_priv *priv,
225 enum mlx5_dev_event event)
228 case MLX5_DEV_EVENT_PORT_UP:
229 case MLX5_DEV_EVENT_PORT_DOWN:
230 schedule_work(&priv->update_carrier_work);
238 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
239 enum mlx5_dev_event event, unsigned long param)
241 struct mlx5e_priv *priv = vpriv;
243 spin_lock(&priv->async_events_spinlock);
244 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
245 __mlx5e_async_event(priv, event);
246 spin_unlock(&priv->async_events_spinlock);
249 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
251 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
254 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
256 spin_lock_irq(&priv->async_events_spinlock);
257 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
258 spin_unlock_irq(&priv->async_events_spinlock);
261 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
262 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
264 static int mlx5e_create_rq(struct mlx5e_channel *c,
265 struct mlx5e_rq_param *param,
268 struct mlx5e_priv *priv = c->priv;
269 struct mlx5_core_dev *mdev = priv->mdev;
270 void *rqc = param->rqc;
271 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
276 param->wq.db_numa_node = cpu_to_node(c->cpu);
278 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
283 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
285 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
286 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
287 cpu_to_node(c->cpu));
290 goto err_rq_wq_destroy;
293 rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
294 MLX5E_SW2HW_MTU(priv->netdev->mtu);
295 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
297 for (i = 0; i < wq_sz; i++) {
298 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
299 u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
301 wqe->data.lkey = c->mkey_be;
302 wqe->data.byte_count =
303 cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
307 rq->netdev = c->netdev;
314 mlx5_wq_destroy(&rq->wq_ctrl);
319 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
322 mlx5_wq_destroy(&rq->wq_ctrl);
325 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
327 struct mlx5e_channel *c = rq->channel;
328 struct mlx5e_priv *priv = c->priv;
329 struct mlx5_core_dev *mdev = priv->mdev;
337 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
338 sizeof(u64) * rq->wq_ctrl.buf.npages;
339 in = mlx5_vzalloc(inlen);
343 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
344 wq = MLX5_ADDR_OF(rqc, rqc, wq);
346 memcpy(rqc, param->rqc, sizeof(param->rqc));
348 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
349 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
350 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
351 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
352 MLX5_ADAPTER_PAGE_SHIFT);
353 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
355 mlx5_fill_page_array(&rq->wq_ctrl.buf,
356 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
358 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
365 static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
367 struct mlx5e_channel *c = rq->channel;
368 struct mlx5e_priv *priv = c->priv;
369 struct mlx5_core_dev *mdev = priv->mdev;
376 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
377 in = mlx5_vzalloc(inlen);
381 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
383 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
384 MLX5_SET(rqc, rqc, state, next_state);
386 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
393 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
395 struct mlx5e_channel *c = rq->channel;
396 struct mlx5e_priv *priv = c->priv;
397 struct mlx5_core_dev *mdev = priv->mdev;
399 mlx5_core_destroy_rq(mdev, rq->rqn);
402 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
404 struct mlx5e_channel *c = rq->channel;
405 struct mlx5e_priv *priv = c->priv;
406 struct mlx5_wq_ll *wq = &rq->wq;
409 for (i = 0; i < 1000; i++) {
410 if (wq->cur_sz >= priv->params.min_rx_wqes)
419 static int mlx5e_open_rq(struct mlx5e_channel *c,
420 struct mlx5e_rq_param *param,
425 err = mlx5e_create_rq(c, param, rq);
429 err = mlx5e_enable_rq(rq, param);
433 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
437 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
438 mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
443 mlx5e_disable_rq(rq);
445 mlx5e_destroy_rq(rq);
450 static void mlx5e_close_rq(struct mlx5e_rq *rq)
452 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
453 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
455 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
456 while (!mlx5_wq_ll_is_empty(&rq->wq))
459 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
460 napi_synchronize(&rq->channel->napi);
462 mlx5e_disable_rq(rq);
463 mlx5e_destroy_rq(rq);
466 static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
472 static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
474 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
475 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
477 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
478 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
481 if (!sq->skb || !sq->dma_fifo) {
482 mlx5e_free_sq_db(sq);
486 sq->dma_fifo_mask = df_sz - 1;
491 static int mlx5e_create_sq(struct mlx5e_channel *c,
493 struct mlx5e_sq_param *param,
496 struct mlx5e_priv *priv = c->priv;
497 struct mlx5_core_dev *mdev = priv->mdev;
499 void *sqc = param->sqc;
500 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
504 err = mlx5_alloc_map_uar(mdev, &sq->uar);
508 param->wq.db_numa_node = cpu_to_node(c->cpu);
510 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
513 goto err_unmap_free_uar;
515 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
516 sq->uar_map = sq->uar.map;
517 sq->uar_bf_map = sq->uar.bf_map;
518 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
519 sq->max_inline = param->max_inline;
521 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
523 goto err_sq_wq_destroy;
525 txq_ix = c->ix + tc * priv->params.num_channels;
526 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
529 sq->mkey_be = c->mkey_be;
532 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
533 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
534 priv->txq_to_sq_map[txq_ix] = sq;
539 mlx5_wq_destroy(&sq->wq_ctrl);
542 mlx5_unmap_free_uar(mdev, &sq->uar);
547 static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
549 struct mlx5e_channel *c = sq->channel;
550 struct mlx5e_priv *priv = c->priv;
552 mlx5e_free_sq_db(sq);
553 mlx5_wq_destroy(&sq->wq_ctrl);
554 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
557 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
559 struct mlx5e_channel *c = sq->channel;
560 struct mlx5e_priv *priv = c->priv;
561 struct mlx5_core_dev *mdev = priv->mdev;
569 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
570 sizeof(u64) * sq->wq_ctrl.buf.npages;
571 in = mlx5_vzalloc(inlen);
575 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
576 wq = MLX5_ADDR_OF(sqc, sqc, wq);
578 memcpy(sqc, param->sqc, sizeof(param->sqc));
580 MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
581 MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
582 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
583 MLX5_SET(sqc, sqc, tis_lst_sz, 1);
584 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
586 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
587 MLX5_SET(wq, wq, uar_page, sq->uar.index);
588 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
589 MLX5_ADAPTER_PAGE_SHIFT);
590 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
592 mlx5_fill_page_array(&sq->wq_ctrl.buf,
593 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
595 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
602 static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
604 struct mlx5e_channel *c = sq->channel;
605 struct mlx5e_priv *priv = c->priv;
606 struct mlx5_core_dev *mdev = priv->mdev;
613 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
614 in = mlx5_vzalloc(inlen);
618 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
620 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
621 MLX5_SET(sqc, sqc, state, next_state);
623 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
630 static void mlx5e_disable_sq(struct mlx5e_sq *sq)
632 struct mlx5e_channel *c = sq->channel;
633 struct mlx5e_priv *priv = c->priv;
634 struct mlx5_core_dev *mdev = priv->mdev;
636 mlx5_core_destroy_sq(mdev, sq->sqn);
639 static int mlx5e_open_sq(struct mlx5e_channel *c,
641 struct mlx5e_sq_param *param,
646 err = mlx5e_create_sq(c, tc, param, sq);
650 err = mlx5e_enable_sq(sq, param);
654 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
658 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
659 netdev_tx_reset_queue(sq->txq);
660 netif_tx_start_queue(sq->txq);
665 mlx5e_disable_sq(sq);
667 mlx5e_destroy_sq(sq);
672 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
674 __netif_tx_lock_bh(txq);
675 netif_tx_stop_queue(txq);
676 __netif_tx_unlock_bh(txq);
679 static void mlx5e_close_sq(struct mlx5e_sq *sq)
681 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
682 napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
683 netif_tx_disable_queue(sq->txq);
685 /* ensure hw is notified of all pending wqes */
686 if (mlx5e_sq_has_room_for(sq, 1))
687 mlx5e_send_nop(sq, true);
689 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
690 while (sq->cc != sq->pc) /* wait till sq is empty */
693 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
694 napi_synchronize(&sq->channel->napi);
696 mlx5e_disable_sq(sq);
697 mlx5e_destroy_sq(sq);
700 static int mlx5e_create_cq(struct mlx5e_channel *c,
701 struct mlx5e_cq_param *param,
704 struct mlx5e_priv *priv = c->priv;
705 struct mlx5_core_dev *mdev = priv->mdev;
706 struct mlx5_core_cq *mcq = &cq->mcq;
712 param->wq.buf_numa_node = cpu_to_node(c->cpu);
713 param->wq.db_numa_node = cpu_to_node(c->cpu);
714 param->eq_ix = c->ix;
716 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
721 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
726 mcq->set_ci_db = cq->wq_ctrl.db.db;
727 mcq->arm_db = cq->wq_ctrl.db.db + 1;
730 mcq->vector = param->eq_ix;
731 mcq->comp = mlx5e_completion_event;
732 mcq->event = mlx5e_cq_error_event;
734 mcq->uar = &priv->cq_uar;
736 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
737 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
747 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
749 mlx5_wq_destroy(&cq->wq_ctrl);
752 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
754 struct mlx5e_channel *c = cq->channel;
755 struct mlx5e_priv *priv = c->priv;
756 struct mlx5_core_dev *mdev = priv->mdev;
757 struct mlx5_core_cq *mcq = &cq->mcq;
766 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
767 sizeof(u64) * cq->wq_ctrl.buf.npages;
768 in = mlx5_vzalloc(inlen);
772 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
774 memcpy(cqc, param->cqc, sizeof(param->cqc));
776 mlx5_fill_page_array(&cq->wq_ctrl.buf,
777 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
779 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
781 MLX5_SET(cqc, cqc, c_eqn, eqn);
782 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
783 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
784 MLX5_ADAPTER_PAGE_SHIFT);
785 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
787 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
799 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
801 struct mlx5e_channel *c = cq->channel;
802 struct mlx5e_priv *priv = c->priv;
803 struct mlx5_core_dev *mdev = priv->mdev;
805 mlx5_core_destroy_cq(mdev, &cq->mcq);
808 static int mlx5e_open_cq(struct mlx5e_channel *c,
809 struct mlx5e_cq_param *param,
811 u16 moderation_usecs,
812 u16 moderation_frames)
815 struct mlx5e_priv *priv = c->priv;
816 struct mlx5_core_dev *mdev = priv->mdev;
818 err = mlx5e_create_cq(c, param, cq);
822 err = mlx5e_enable_cq(cq, param);
826 err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
835 mlx5e_destroy_cq(cq);
840 static void mlx5e_close_cq(struct mlx5e_cq *cq)
842 mlx5e_disable_cq(cq);
843 mlx5e_destroy_cq(cq);
846 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
848 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
851 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
852 struct mlx5e_channel_param *cparam)
854 struct mlx5e_priv *priv = c->priv;
858 for (tc = 0; tc < c->num_tc; tc++) {
859 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
860 priv->params.tx_cq_moderation_usec,
861 priv->params.tx_cq_moderation_pkts);
863 goto err_close_tx_cqs;
869 for (tc--; tc >= 0; tc--)
870 mlx5e_close_cq(&c->sq[tc].cq);
875 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
879 for (tc = 0; tc < c->num_tc; tc++)
880 mlx5e_close_cq(&c->sq[tc].cq);
883 static int mlx5e_open_sqs(struct mlx5e_channel *c,
884 struct mlx5e_channel_param *cparam)
889 for (tc = 0; tc < c->num_tc; tc++) {
890 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
898 for (tc--; tc >= 0; tc--)
899 mlx5e_close_sq(&c->sq[tc]);
904 static void mlx5e_close_sqs(struct mlx5e_channel *c)
908 for (tc = 0; tc < c->num_tc; tc++)
909 mlx5e_close_sq(&c->sq[tc]);
912 static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
917 for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
918 c->tc_to_txq_map[i] = c->ix + i * num_channels;
921 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
922 struct mlx5e_channel_param *cparam,
923 struct mlx5e_channel **cp)
925 struct net_device *netdev = priv->netdev;
926 int cpu = mlx5e_get_cpu(priv, ix);
927 struct mlx5e_channel *c;
930 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
937 c->pdev = &priv->mdev->pdev->dev;
938 c->netdev = priv->netdev;
939 c->mkey_be = cpu_to_be32(priv->mr.key);
940 c->num_tc = priv->params.num_tc;
942 mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
944 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
946 err = mlx5e_open_tx_cqs(c, cparam);
950 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
951 priv->params.rx_cq_moderation_usec,
952 priv->params.rx_cq_moderation_pkts);
954 goto err_close_tx_cqs;
956 napi_enable(&c->napi);
958 err = mlx5e_open_sqs(c, cparam);
960 goto err_disable_napi;
962 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
966 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
975 napi_disable(&c->napi);
976 mlx5e_close_cq(&c->rq.cq);
979 mlx5e_close_tx_cqs(c);
982 netif_napi_del(&c->napi);
988 static void mlx5e_close_channel(struct mlx5e_channel *c)
990 mlx5e_close_rq(&c->rq);
992 napi_disable(&c->napi);
993 mlx5e_close_cq(&c->rq.cq);
994 mlx5e_close_tx_cqs(c);
995 netif_napi_del(&c->napi);
999 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1000 struct mlx5e_rq_param *param)
1002 void *rqc = param->rqc;
1003 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1005 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1006 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1007 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1008 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1009 MLX5_SET(wq, wq, pd, priv->pdn);
1011 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1012 param->wq.linear = 1;
1015 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1016 struct mlx5e_sq_param *param)
1018 void *sqc = param->sqc;
1019 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1021 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
1022 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1023 MLX5_SET(wq, wq, pd, priv->pdn);
1025 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1026 param->max_inline = priv->params.tx_max_inline;
1029 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1030 struct mlx5e_cq_param *param)
1032 void *cqc = param->cqc;
1034 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1037 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1038 struct mlx5e_cq_param *param)
1040 void *cqc = param->cqc;
1042 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
1044 mlx5e_build_common_cq_param(priv, param);
1047 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1048 struct mlx5e_cq_param *param)
1050 void *cqc = param->cqc;
1052 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
1054 mlx5e_build_common_cq_param(priv, param);
1057 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1058 struct mlx5e_channel_param *cparam)
1060 memset(cparam, 0, sizeof(*cparam));
1062 mlx5e_build_rq_param(priv, &cparam->rq);
1063 mlx5e_build_sq_param(priv, &cparam->sq);
1064 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1065 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
1068 static int mlx5e_open_channels(struct mlx5e_priv *priv)
1070 struct mlx5e_channel_param cparam;
1071 int nch = priv->params.num_channels;
1076 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1079 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
1080 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1082 if (!priv->channel || !priv->txq_to_sq_map)
1083 goto err_free_txq_to_sq_map;
1085 mlx5e_build_channel_param(priv, &cparam);
1086 for (i = 0; i < nch; i++) {
1087 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1089 goto err_close_channels;
1092 for (j = 0; j < nch; j++) {
1093 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1095 goto err_close_channels;
1101 for (i--; i >= 0; i--)
1102 mlx5e_close_channel(priv->channel[i]);
1104 err_free_txq_to_sq_map:
1105 kfree(priv->txq_to_sq_map);
1106 kfree(priv->channel);
1111 static void mlx5e_close_channels(struct mlx5e_priv *priv)
1115 for (i = 0; i < priv->params.num_channels; i++)
1116 mlx5e_close_channel(priv->channel[i]);
1118 kfree(priv->txq_to_sq_map);
1119 kfree(priv->channel);
1122 static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
1124 struct mlx5_core_dev *mdev = priv->mdev;
1125 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1126 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1128 memset(in, 0, sizeof(in));
1130 MLX5_SET(tisc, tisc, prio, tc);
1131 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
1133 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
1136 static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
1138 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
1141 static int mlx5e_open_tises(struct mlx5e_priv *priv)
1146 for (tc = 0; tc < priv->params.num_tc; tc++) {
1147 err = mlx5e_open_tis(priv, tc);
1149 goto err_close_tises;
1155 for (tc--; tc >= 0; tc--)
1156 mlx5e_close_tis(priv, tc);
1161 static void mlx5e_close_tises(struct mlx5e_priv *priv)
1165 for (tc = 0; tc < priv->params.num_tc; tc++)
1166 mlx5e_close_tis(priv, tc);
1169 static int mlx5e_rx_hash_fn(int hfunc)
1171 return (hfunc == ETH_RSS_HASH_TOP) ?
1172 MLX5_RX_HASH_FN_TOEPLITZ :
1173 MLX5_RX_HASH_FN_INVERTED_XOR8;
1176 static int mlx5e_bits_invert(unsigned long a, int size)
1181 for (i = 0; i < size; i++)
1182 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1187 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1188 enum mlx5e_rqt_ix rqt_ix)
1194 case MLX5E_INDIRECTION_RQT:
1195 log_sz = priv->params.rx_hash_log_tbl_sz;
1196 for (i = 0; i < (1 << log_sz); i++) {
1199 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1200 ix = mlx5e_bits_invert(i, log_sz);
1202 ix = ix % priv->params.num_channels;
1203 MLX5_SET(rqtc, rqtc, rq_num[i],
1204 priv->channel[ix]->rq.rqn);
1209 default: /* MLX5E_SINGLE_RQ_RQT */
1210 MLX5_SET(rqtc, rqtc, rq_num[0],
1211 priv->channel[0]->rq.rqn);
1217 static int mlx5e_open_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1219 struct mlx5_core_dev *mdev = priv->mdev;
1227 log_sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 0 :
1228 priv->params.rx_hash_log_tbl_sz;
1231 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1232 in = mlx5_vzalloc(inlen);
1236 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1238 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1239 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1241 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1243 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1250 static void mlx5e_close_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1252 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1255 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1257 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1259 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1261 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1263 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1264 MLX5_HASH_FIELD_SEL_DST_IP)
1266 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1267 MLX5_HASH_FIELD_SEL_DST_IP |\
1268 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1269 MLX5_HASH_FIELD_SEL_L4_DPORT)
1271 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1272 MLX5_HASH_FIELD_SEL_DST_IP |\
1273 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1275 if (priv->params.lro_en) {
1276 MLX5_SET(tirc, tirc, lro_enable_mask,
1277 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1278 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1279 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1280 (priv->params.lro_wqe_sz -
1281 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1282 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1283 MLX5_CAP_ETH(priv->mdev,
1284 lro_timer_supported_periods[3]));
1287 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1291 MLX5_SET(tirc, tirc, indirect_table,
1292 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1293 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
1296 MLX5_SET(tirc, tirc, indirect_table,
1297 priv->rqtn[MLX5E_INDIRECTION_RQT]);
1298 MLX5_SET(tirc, tirc, rx_hash_fn,
1299 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1300 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1301 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1302 rx_hash_toeplitz_key);
1303 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1304 rx_hash_toeplitz_key);
1306 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1307 netdev_rss_key_fill(rss_key, len);
1313 case MLX5E_TT_IPV4_TCP:
1314 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1315 MLX5_L3_PROT_TYPE_IPV4);
1316 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1317 MLX5_L4_PROT_TYPE_TCP);
1318 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1319 MLX5_HASH_IP_L4PORTS);
1322 case MLX5E_TT_IPV6_TCP:
1323 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1324 MLX5_L3_PROT_TYPE_IPV6);
1325 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1326 MLX5_L4_PROT_TYPE_TCP);
1327 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1328 MLX5_HASH_IP_L4PORTS);
1331 case MLX5E_TT_IPV4_UDP:
1332 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1333 MLX5_L3_PROT_TYPE_IPV4);
1334 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1335 MLX5_L4_PROT_TYPE_UDP);
1336 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1337 MLX5_HASH_IP_L4PORTS);
1340 case MLX5E_TT_IPV6_UDP:
1341 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1342 MLX5_L3_PROT_TYPE_IPV6);
1343 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1344 MLX5_L4_PROT_TYPE_UDP);
1345 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1346 MLX5_HASH_IP_L4PORTS);
1349 case MLX5E_TT_IPV4_IPSEC_AH:
1350 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1351 MLX5_L3_PROT_TYPE_IPV4);
1352 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1353 MLX5_HASH_IP_IPSEC_SPI);
1356 case MLX5E_TT_IPV6_IPSEC_AH:
1357 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1358 MLX5_L3_PROT_TYPE_IPV6);
1359 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1360 MLX5_HASH_IP_IPSEC_SPI);
1363 case MLX5E_TT_IPV4_IPSEC_ESP:
1364 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1365 MLX5_L3_PROT_TYPE_IPV4);
1366 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1367 MLX5_HASH_IP_IPSEC_SPI);
1370 case MLX5E_TT_IPV6_IPSEC_ESP:
1371 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1372 MLX5_L3_PROT_TYPE_IPV6);
1373 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1374 MLX5_HASH_IP_IPSEC_SPI);
1378 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1379 MLX5_L3_PROT_TYPE_IPV4);
1380 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1385 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1386 MLX5_L3_PROT_TYPE_IPV6);
1387 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1393 static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
1395 struct mlx5_core_dev *mdev = priv->mdev;
1401 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1402 in = mlx5_vzalloc(inlen);
1406 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1408 mlx5e_build_tir_ctx(priv, tirc, tt);
1410 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
1417 static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
1419 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
1422 static int mlx5e_open_tirs(struct mlx5e_priv *priv)
1427 for (i = 0; i < MLX5E_NUM_TT; i++) {
1428 err = mlx5e_open_tir(priv, i);
1430 goto err_close_tirs;
1436 for (i--; i >= 0; i--)
1437 mlx5e_close_tir(priv, i);
1442 static void mlx5e_close_tirs(struct mlx5e_priv *priv)
1446 for (i = 0; i < MLX5E_NUM_TT; i++)
1447 mlx5e_close_tir(priv, i);
1450 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1452 struct mlx5e_priv *priv = netdev_priv(netdev);
1453 struct mlx5_core_dev *mdev = priv->mdev;
1457 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1461 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1463 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1464 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1465 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1467 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1471 int mlx5e_open_locked(struct net_device *netdev)
1473 struct mlx5e_priv *priv = netdev_priv(netdev);
1477 num_txqs = priv->params.num_channels * priv->params.num_tc;
1478 netif_set_real_num_tx_queues(netdev, num_txqs);
1479 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1481 err = mlx5e_set_dev_port_mtu(netdev);
1485 err = mlx5e_open_tises(priv);
1487 netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
1492 err = mlx5e_open_channels(priv);
1494 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1496 goto err_close_tises;
1499 err = mlx5e_open_rqt(priv, MLX5E_INDIRECTION_RQT);
1501 netdev_err(netdev, "%s: mlx5e_open_rqt(INDIR) failed, %d\n",
1503 goto err_close_channels;
1506 err = mlx5e_open_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1508 netdev_err(netdev, "%s: mlx5e_open_rqt(SINGLE) failed, %d\n",
1510 goto err_close_rqt_indir;
1513 err = mlx5e_open_tirs(priv);
1515 netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
1517 goto err_close_rqt_single;
1520 err = mlx5e_open_flow_table(priv);
1522 netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
1524 goto err_close_tirs;
1527 err = mlx5e_add_all_vlan_rules(priv);
1529 netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
1531 goto err_close_flow_table;
1534 mlx5e_init_eth_addr(priv);
1536 set_bit(MLX5E_STATE_OPENED, &priv->state);
1538 mlx5e_update_carrier(priv);
1539 mlx5e_set_rx_mode_core(priv);
1541 schedule_delayed_work(&priv->update_stats_work, 0);
1544 err_close_flow_table:
1545 mlx5e_close_flow_table(priv);
1548 mlx5e_close_tirs(priv);
1550 err_close_rqt_single:
1551 mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1553 err_close_rqt_indir:
1554 mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
1557 mlx5e_close_channels(priv);
1560 mlx5e_close_tises(priv);
1565 static int mlx5e_open(struct net_device *netdev)
1567 struct mlx5e_priv *priv = netdev_priv(netdev);
1570 mutex_lock(&priv->state_lock);
1571 err = mlx5e_open_locked(netdev);
1572 mutex_unlock(&priv->state_lock);
1577 int mlx5e_close_locked(struct net_device *netdev)
1579 struct mlx5e_priv *priv = netdev_priv(netdev);
1581 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1583 mlx5e_set_rx_mode_core(priv);
1584 mlx5e_del_all_vlan_rules(priv);
1585 netif_carrier_off(priv->netdev);
1586 mlx5e_close_flow_table(priv);
1587 mlx5e_close_tirs(priv);
1588 mlx5e_close_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1589 mlx5e_close_rqt(priv, MLX5E_INDIRECTION_RQT);
1590 mlx5e_close_channels(priv);
1591 mlx5e_close_tises(priv);
1596 static int mlx5e_close(struct net_device *netdev)
1598 struct mlx5e_priv *priv = netdev_priv(netdev);
1601 mutex_lock(&priv->state_lock);
1602 err = mlx5e_close_locked(netdev);
1603 mutex_unlock(&priv->state_lock);
1608 static struct rtnl_link_stats64 *
1609 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1611 struct mlx5e_priv *priv = netdev_priv(dev);
1612 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
1614 stats->rx_packets = vstats->rx_packets;
1615 stats->rx_bytes = vstats->rx_bytes;
1616 stats->tx_packets = vstats->tx_packets;
1617 stats->tx_bytes = vstats->tx_bytes;
1618 stats->multicast = vstats->rx_multicast_packets +
1619 vstats->tx_multicast_packets;
1620 stats->tx_errors = vstats->tx_error_packets;
1621 stats->rx_errors = vstats->rx_error_packets;
1622 stats->tx_dropped = vstats->tx_queue_dropped;
1623 stats->rx_crc_errors = 0;
1624 stats->rx_length_errors = 0;
1629 static void mlx5e_set_rx_mode(struct net_device *dev)
1631 struct mlx5e_priv *priv = netdev_priv(dev);
1633 schedule_work(&priv->set_rx_mode_work);
1636 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
1638 struct mlx5e_priv *priv = netdev_priv(netdev);
1639 struct sockaddr *saddr = addr;
1641 if (!is_valid_ether_addr(saddr->sa_data))
1642 return -EADDRNOTAVAIL;
1644 netif_addr_lock_bh(netdev);
1645 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1646 netif_addr_unlock_bh(netdev);
1648 schedule_work(&priv->set_rx_mode_work);
1653 static int mlx5e_set_features(struct net_device *netdev,
1654 netdev_features_t features)
1656 struct mlx5e_priv *priv = netdev_priv(netdev);
1658 netdev_features_t changes = features ^ netdev->features;
1660 mutex_lock(&priv->state_lock);
1662 if (changes & NETIF_F_LRO) {
1663 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1666 mlx5e_close_locked(priv->netdev);
1668 priv->params.lro_en = !!(features & NETIF_F_LRO);
1671 err = mlx5e_open_locked(priv->netdev);
1674 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
1675 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1676 mlx5e_enable_vlan_filter(priv);
1678 mlx5e_disable_vlan_filter(priv);
1681 mutex_unlock(&priv->state_lock);
1686 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
1688 struct mlx5e_priv *priv = netdev_priv(netdev);
1689 struct mlx5_core_dev *mdev = priv->mdev;
1694 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
1696 if (new_mtu > max_mtu) {
1698 "%s: Bad MTU (%d) > (%d) Max\n",
1699 __func__, new_mtu, max_mtu);
1703 mutex_lock(&priv->state_lock);
1705 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
1707 mlx5e_close_locked(netdev);
1709 netdev->mtu = new_mtu;
1712 err = mlx5e_open_locked(netdev);
1714 mutex_unlock(&priv->state_lock);
1719 static struct net_device_ops mlx5e_netdev_ops = {
1720 .ndo_open = mlx5e_open,
1721 .ndo_stop = mlx5e_close,
1722 .ndo_start_xmit = mlx5e_xmit,
1723 .ndo_get_stats64 = mlx5e_get_stats,
1724 .ndo_set_rx_mode = mlx5e_set_rx_mode,
1725 .ndo_set_mac_address = mlx5e_set_mac,
1726 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
1727 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
1728 .ndo_set_features = mlx5e_set_features,
1729 .ndo_change_mtu = mlx5e_change_mtu,
1732 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
1734 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1736 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
1737 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
1738 !MLX5_CAP_ETH(mdev, csum_cap) ||
1739 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
1740 !MLX5_CAP_ETH(mdev, vlan_cap) ||
1741 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
1742 MLX5_CAP_FLOWTABLE(mdev,
1743 flow_table_properties_nic_receive.max_ft_level)
1745 mlx5_core_warn(mdev,
1746 "Not creating net device, some required device capabilities are missing\n");
1752 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
1754 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
1756 return bf_buf_size -
1757 sizeof(struct mlx5e_tx_wqe) +
1758 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
1761 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
1762 struct net_device *netdev,
1763 int num_comp_vectors)
1765 struct mlx5e_priv *priv = netdev_priv(netdev);
1767 priv->params.log_sq_size =
1768 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1769 priv->params.log_rq_size =
1770 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
1771 priv->params.rx_cq_moderation_usec =
1772 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
1773 priv->params.rx_cq_moderation_pkts =
1774 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
1775 priv->params.tx_cq_moderation_usec =
1776 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
1777 priv->params.tx_cq_moderation_pkts =
1778 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
1779 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
1780 priv->params.min_rx_wqes =
1781 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
1782 priv->params.rx_hash_log_tbl_sz =
1783 (order_base_2(num_comp_vectors) >
1784 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
1785 order_base_2(num_comp_vectors) :
1786 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
1787 priv->params.num_tc = 1;
1788 priv->params.default_vlan_prio = 0;
1789 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
1791 priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
1792 priv->params.lro_wqe_sz =
1793 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
1796 priv->netdev = netdev;
1797 priv->params.num_channels = num_comp_vectors;
1798 priv->default_vlan_prio = priv->params.default_vlan_prio;
1800 spin_lock_init(&priv->async_events_spinlock);
1801 mutex_init(&priv->state_lock);
1803 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
1804 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
1805 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
1808 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
1810 struct mlx5e_priv *priv = netdev_priv(netdev);
1812 mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
1815 static void mlx5e_build_netdev(struct net_device *netdev)
1817 struct mlx5e_priv *priv = netdev_priv(netdev);
1818 struct mlx5_core_dev *mdev = priv->mdev;
1820 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
1822 if (priv->params.num_tc > 1)
1823 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
1825 netdev->netdev_ops = &mlx5e_netdev_ops;
1826 netdev->watchdog_timeo = 15 * HZ;
1828 netdev->ethtool_ops = &mlx5e_ethtool_ops;
1830 netdev->vlan_features |= NETIF_F_SG;
1831 netdev->vlan_features |= NETIF_F_IP_CSUM;
1832 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1833 netdev->vlan_features |= NETIF_F_GRO;
1834 netdev->vlan_features |= NETIF_F_TSO;
1835 netdev->vlan_features |= NETIF_F_TSO6;
1836 netdev->vlan_features |= NETIF_F_RXCSUM;
1837 netdev->vlan_features |= NETIF_F_RXHASH;
1839 if (!!MLX5_CAP_ETH(mdev, lro_cap))
1840 netdev->vlan_features |= NETIF_F_LRO;
1842 netdev->hw_features = netdev->vlan_features;
1843 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1844 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1846 netdev->features = netdev->hw_features;
1847 if (!priv->params.lro_en)
1848 netdev->features &= ~NETIF_F_LRO;
1850 netdev->features |= NETIF_F_HIGHDMA;
1852 netdev->priv_flags |= IFF_UNICAST_FLT;
1854 mlx5e_set_netdev_dev_addr(netdev);
1857 static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
1858 struct mlx5_core_mr *mr)
1860 struct mlx5_core_dev *mdev = priv->mdev;
1861 struct mlx5_create_mkey_mbox_in *in;
1864 in = mlx5_vzalloc(sizeof(*in));
1868 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
1869 MLX5_PERM_LOCAL_READ |
1870 MLX5_ACCESS_MODE_PA;
1871 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
1872 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1874 err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
1882 static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
1884 struct net_device *netdev;
1885 struct mlx5e_priv *priv;
1886 int ncv = mdev->priv.eq_table.num_comp_vectors;
1889 if (mlx5e_check_required_hca_cap(mdev))
1892 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
1894 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
1898 mlx5e_build_netdev_priv(mdev, netdev, ncv);
1899 mlx5e_build_netdev(netdev);
1901 netif_carrier_off(netdev);
1903 priv = netdev_priv(netdev);
1905 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
1907 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
1908 goto err_free_netdev;
1911 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
1913 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
1914 goto err_unmap_free_uar;
1917 err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
1919 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
1920 goto err_dealloc_pd;
1923 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
1925 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
1926 goto err_dealloc_transport_domain;
1929 err = register_netdev(netdev);
1931 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
1932 goto err_destroy_mkey;
1935 mlx5e_enable_async_events(priv);
1940 mlx5_core_destroy_mkey(mdev, &priv->mr);
1942 err_dealloc_transport_domain:
1943 mlx5_dealloc_transport_domain(mdev, priv->tdn);
1946 mlx5_core_dealloc_pd(mdev, priv->pdn);
1949 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
1952 free_netdev(netdev);
1957 static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
1959 struct mlx5e_priv *priv = vpriv;
1960 struct net_device *netdev = priv->netdev;
1962 unregister_netdev(netdev);
1963 mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
1964 mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
1965 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
1966 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
1967 mlx5e_disable_async_events(priv);
1968 flush_scheduled_work();
1969 free_netdev(netdev);
1972 static void *mlx5e_get_netdev(void *vpriv)
1974 struct mlx5e_priv *priv = vpriv;
1976 return priv->netdev;
1979 static struct mlx5_interface mlx5e_interface = {
1980 .add = mlx5e_create_netdev,
1981 .remove = mlx5e_destroy_netdev,
1982 .event = mlx5e_async_event,
1983 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
1984 .get_dev = mlx5e_get_netdev,
1987 void mlx5e_init(void)
1989 mlx5_register_interface(&mlx5e_interface);
1992 void mlx5e_cleanup(void)
1994 mlx5_unregister_interface(&mlx5e_interface);