1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies
8 #include "en/fs_tt_redirect.h"
9 #include <linux/list.h>
10 #include <linux/spinlock.h>
13 struct mlx5_flow_handle *l2_rule;
14 struct mlx5_flow_handle *udp_v4_rule;
15 struct mlx5_flow_handle *udp_v6_rule;
19 struct mlx5e_ptp_params {
20 struct mlx5e_params params;
21 struct mlx5e_sq_param txq_sq_param;
22 struct mlx5e_rq_param rq_param;
25 struct mlx5e_ptp_port_ts_cqe_tracker {
28 struct list_head entry;
31 struct mlx5e_ptp_port_ts_cqe_list {
32 struct mlx5e_ptp_port_ts_cqe_tracker *nodes;
33 struct list_head tracker_list_head;
34 /* Sync list operations in xmit and napi_poll contexts */
35 spinlock_t tracker_list_lock;
39 mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
41 struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
43 WARN_ON_ONCE(tracker->inuse);
44 tracker->inuse = true;
45 spin_lock(&list->tracker_list_lock);
46 list_add_tail(&tracker->entry, &list->tracker_list_head);
47 spin_unlock(&list->tracker_list_lock);
51 mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metadata)
53 struct mlx5e_ptp_port_ts_cqe_tracker *tracker = &list->nodes[metadata];
55 WARN_ON_ONCE(!tracker->inuse);
56 tracker->inuse = false;
57 spin_lock(&list->tracker_list_lock);
58 list_del(&tracker->entry);
59 spin_unlock(&list->tracker_list_lock);
62 void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
64 mlx5e_ptp_port_ts_cqe_list_add(ptpsq->ts_cqe_pending_list, metadata);
67 struct mlx5e_skb_cb_hwtstamp {
69 ktime_t port_hwtstamp;
72 void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
74 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
77 static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
79 BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
80 return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
83 static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
84 struct mlx5e_ptp_cq_stats *cq_stats)
86 struct skb_shared_hwtstamps hwts = {};
89 diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
90 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
92 /* Maximal allowed diff is 1 / 128 second */
93 if (diff > (NSEC_PER_SEC >> 7)) {
95 cq_stats->abort_abs_diff_ns += diff;
99 hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
100 skb_tstamp_tx(skb, &hwts);
103 void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
105 struct mlx5e_ptp_cq_stats *cq_stats)
107 switch (hwtstamp_type) {
108 case (MLX5E_SKB_CB_CQE_HWTSTAMP):
109 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
111 case (MLX5E_SKB_CB_PORT_HWTSTAMP):
112 mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
116 /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
117 * skb soon to be released.
119 if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
120 !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
123 mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
124 memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
127 static struct sk_buff *
128 mlx5e_ptp_metadata_map_lookup(struct mlx5e_ptp_metadata_map *map, u16 metadata)
130 return map->data[metadata];
133 static struct sk_buff *
134 mlx5e_ptp_metadata_map_remove(struct mlx5e_ptp_metadata_map *map, u16 metadata)
138 skb = map->data[metadata];
139 map->data[metadata] = NULL;
144 static bool mlx5e_ptp_metadata_map_unhealthy(struct mlx5e_ptp_metadata_map *map)
146 /* Considered beginning unhealthy state if size * 15 / 2^4 cannot be reclaimed. */
147 return map->undelivered_counter > (map->capacity >> 4) * 15;
150 static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
153 struct mlx5e_ptp_port_ts_cqe_list *cqe_list = ptpsq->ts_cqe_pending_list;
154 ktime_t timeout = ns_to_ktime(MLX5E_PTP_TS_CQE_UNDELIVERED_TIMEOUT);
155 struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
156 struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
158 spin_lock(&cqe_list->tracker_list_lock);
159 list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
160 struct sk_buff *skb =
161 mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
162 ktime_t dma_tstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
165 ktime_after(ktime_add(dma_tstamp, timeout), port_tstamp))
168 metadata_map->undelivered_counter++;
169 WARN_ON_ONCE(!pos->inuse);
171 list_del(&pos->entry);
173 spin_unlock(&cqe_list->tracker_list_lock);
176 #define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
178 static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
179 struct mlx5_cqe64 *cqe,
184 struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
185 u8 metadata_id = PTP_WQE_CTR2IDX(be16_to_cpu(cqe->wqe_counter));
186 bool is_err_cqe = !!MLX5E_RX_ERR_CQE(cqe);
187 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
191 if (likely(pending_cqe_list->nodes[metadata_id].inuse)) {
192 mlx5e_ptp_port_ts_cqe_list_remove(pending_cqe_list, metadata_id);
194 /* Reclaim space in the unlikely event CQE was delivered after
197 ptpsq->metadata_map.undelivered_counter--;
198 ptpsq->cq_stats->late_cqe++;
201 skb = mlx5e_ptp_metadata_map_remove(&ptpsq->metadata_map, metadata_id);
203 if (unlikely(is_err_cqe)) {
204 ptpsq->cq_stats->err_cqe++;
208 hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
209 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
210 hwtstamp, ptpsq->cq_stats);
211 ptpsq->cq_stats->cqe++;
213 mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
215 napi_consume_skb(skb, budget);
216 md_buff[*md_buff_sz++] = metadata_id;
217 if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
218 !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
219 queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
222 static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
224 struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
225 int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
226 u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
227 u8 metadata_buff_sz = 0;
228 struct mlx5_cqwq *cqwq;
229 struct mlx5_cqe64 *cqe;
234 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
237 cqe = mlx5_cqwq_get_cqe(cqwq);
244 mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
245 metadata_buff, &metadata_buff_sz, napi_budget);
246 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
248 mlx5_cqwq_update_db_record(cqwq);
250 /* ensure cq space is freed before enabling more cqes */
253 while (metadata_buff_sz > 0)
254 mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
255 metadata_buff[--metadata_buff_sz]);
257 mlx5e_txqsq_wake(&ptpsq->txqsq);
259 return work_done == budget;
262 static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
264 struct mlx5e_ptp *c = container_of(napi, struct mlx5e_ptp, napi);
265 struct mlx5e_ch_stats *ch_stats = c->stats;
266 struct mlx5e_rq *rq = &c->rq;
275 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
276 for (i = 0; i < c->num_tc; i++) {
277 busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
278 busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
281 if (test_bit(MLX5E_PTP_STATE_RX, c->state) && likely(budget)) {
282 work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
283 busy |= work_done == budget;
284 busy |= INDIRECT_CALL_2(rq->post_wqes,
285 mlx5e_post_rx_mpwqes,
295 if (unlikely(!napi_complete_done(napi, work_done)))
300 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
301 for (i = 0; i < c->num_tc; i++) {
302 mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
303 mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
306 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
307 mlx5e_cq_arm(&rq->cq);
315 static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix,
316 struct mlx5e_params *params,
317 struct mlx5e_sq_param *param,
318 struct mlx5e_txqsq *sq, int tc,
319 struct mlx5e_ptpsq *ptpsq)
321 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
322 struct mlx5_core_dev *mdev = c->mdev;
323 struct mlx5_wq_cyc *wq = &sq->wq;
328 sq->clock = &mdev->clock;
329 sq->mkey_be = c->mkey_be;
330 sq->netdev = c->netdev;
333 sq->ch_ix = MLX5E_PTP_CHANNEL_IX;
335 sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
336 sq->min_inline_mode = params->tx_min_inline_mode;
337 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
338 sq->stats = &c->priv->ptp_stats.sq[tc];
340 INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
341 if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
342 set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
343 sq->stop_room = param->stop_room;
344 sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev);
346 node = dev_to_node(mlx5_core_dma_dev(mdev));
348 param->wq.db_numa_node = node;
349 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
352 wq->db = &wq->db[MLX5_SND_DBR];
354 err = mlx5e_alloc_txqsq_db(sq, node);
356 goto err_sq_wq_destroy;
361 mlx5_wq_destroy(&sq->wq_ctrl);
366 static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
368 mlx5_core_destroy_sq(mdev, sqn);
371 static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
373 struct mlx5e_ptp_metadata_fifo *metadata_freelist = &ptpsq->metadata_freelist;
374 struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
375 struct mlx5e_ptp_port_ts_cqe_list *cqe_list;
379 cqe_list = kvzalloc_node(sizeof(*ptpsq->ts_cqe_pending_list), GFP_KERNEL, numa);
382 ptpsq->ts_cqe_pending_list = cqe_list;
384 db_sz = min_t(u32, mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq),
385 1 << MLX5_CAP_GEN_2(ptpsq->txqsq.mdev,
386 ts_cqe_metadata_size2wqe_counter));
387 ptpsq->ts_cqe_ctr_mask = db_sz - 1;
389 cqe_list->nodes = kvzalloc_node(array_size(db_sz, sizeof(*cqe_list->nodes)),
391 if (!cqe_list->nodes)
393 INIT_LIST_HEAD(&cqe_list->tracker_list_head);
394 spin_lock_init(&cqe_list->tracker_list_lock);
396 metadata_freelist->data =
397 kvzalloc_node(array_size(db_sz, sizeof(*metadata_freelist->data)),
399 if (!metadata_freelist->data)
400 goto free_cqe_list_nodes;
401 metadata_freelist->mask = ptpsq->ts_cqe_ctr_mask;
403 for (md = 0; md < db_sz; ++md) {
404 cqe_list->nodes[md].metadata_id = md;
405 metadata_freelist->data[md] = md;
407 metadata_freelist->pc = db_sz;
410 kvzalloc_node(array_size(db_sz, sizeof(*metadata_map->data)),
412 if (!metadata_map->data)
413 goto free_metadata_freelist;
414 metadata_map->capacity = db_sz;
418 free_metadata_freelist:
419 kvfree(metadata_freelist->data);
421 kvfree(cqe_list->nodes);
427 static void mlx5e_ptp_drain_metadata_map(struct mlx5e_ptp_metadata_map *map)
431 for (idx = 0; idx < map->capacity; ++idx) {
432 struct sk_buff *skb = map->data[idx];
434 dev_kfree_skb_any(skb);
438 static void mlx5e_ptp_free_traffic_db(struct mlx5e_ptpsq *ptpsq)
440 mlx5e_ptp_drain_metadata_map(&ptpsq->metadata_map);
441 kvfree(ptpsq->metadata_map.data);
442 kvfree(ptpsq->metadata_freelist.data);
443 kvfree(ptpsq->ts_cqe_pending_list->nodes);
444 kvfree(ptpsq->ts_cqe_pending_list);
447 static void mlx5e_ptpsq_unhealthy_work(struct work_struct *work)
449 struct mlx5e_ptpsq *ptpsq =
450 container_of(work, struct mlx5e_ptpsq, report_unhealthy_work);
452 mlx5e_reporter_tx_ptpsq_unhealthy(ptpsq);
455 static int mlx5e_ptp_open_txqsq(struct mlx5e_ptp *c, u32 tisn,
456 int txq_ix, struct mlx5e_ptp_params *cparams,
457 int tc, struct mlx5e_ptpsq *ptpsq)
459 struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
460 struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
461 struct mlx5e_create_sq_param csp = {};
464 err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
471 csp.cqn = txqsq->cq.mcq.cqn;
472 csp.wq_ctrl = &txqsq->wq_ctrl;
473 csp.min_inline_mode = txqsq->min_inline_mode;
474 csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
476 err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
480 err = mlx5e_ptp_alloc_traffic_db(ptpsq, dev_to_node(mlx5_core_dma_dev(c->mdev)));
484 INIT_WORK(&ptpsq->report_unhealthy_work, mlx5e_ptpsq_unhealthy_work);
489 mlx5e_free_txqsq(txqsq);
494 static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
496 struct mlx5e_txqsq *sq = &ptpsq->txqsq;
497 struct mlx5_core_dev *mdev = sq->mdev;
499 if (current_work() != &ptpsq->report_unhealthy_work)
500 cancel_work_sync(&ptpsq->report_unhealthy_work);
501 mlx5e_ptp_free_traffic_db(ptpsq);
502 cancel_work_sync(&sq->recover_work);
503 mlx5e_ptp_destroy_sq(mdev, sq->sqn);
504 mlx5e_free_txqsq_descs(sq);
505 mlx5e_free_txqsq(sq);
508 static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c,
509 struct mlx5e_ptp_params *cparams)
511 struct mlx5e_params *params = &cparams->params;
512 u8 num_tc = mlx5e_get_dcb_num_tc(params);
517 ix_base = num_tc * params->num_channels;
519 for (tc = 0; tc < num_tc; tc++) {
520 int txq_ix = ix_base + tc;
523 tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
525 err = mlx5e_ptp_open_txqsq(c, tisn, txq_ix, cparams, tc, &c->ptpsq[tc]);
533 for (--tc; tc >= 0; tc--)
534 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
539 static void mlx5e_ptp_close_txqsqs(struct mlx5e_ptp *c)
543 for (tc = 0; tc < c->num_tc; tc++)
544 mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
547 static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c,
548 struct mlx5e_ptp_params *cparams)
550 struct mlx5e_params *params = &cparams->params;
551 struct mlx5e_create_cq_param ccp = {};
552 struct dim_cq_moder ptp_moder = {};
553 struct mlx5e_cq_param *cq_param;
558 num_tc = mlx5e_get_dcb_num_tc(params);
560 ccp.netdev = c->netdev;
561 ccp.wq = c->priv->wq;
562 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
563 ccp.ch_stats = c->stats;
565 ccp.ix = MLX5E_PTP_CHANNEL_IX;
567 cq_param = &cparams->txq_sq_param.cqp;
569 for (tc = 0; tc < num_tc; tc++) {
570 struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
572 err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
574 goto out_err_txqsq_cq;
577 for (tc = 0; tc < num_tc; tc++) {
578 struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
579 struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
581 err = mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
585 ptpsq->cq_stats = &c->priv->ptp_stats.cq[tc];
591 for (--tc; tc >= 0; tc--)
592 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
595 for (--tc; tc >= 0; tc--)
596 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
601 static int mlx5e_ptp_open_rx_cq(struct mlx5e_ptp *c,
602 struct mlx5e_ptp_params *cparams)
604 struct mlx5e_create_cq_param ccp = {};
605 struct dim_cq_moder ptp_moder = {};
606 struct mlx5e_cq_param *cq_param;
607 struct mlx5e_cq *cq = &c->rq.cq;
609 ccp.netdev = c->netdev;
610 ccp.wq = c->priv->wq;
611 ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev));
612 ccp.ch_stats = c->stats;
614 ccp.ix = MLX5E_PTP_CHANNEL_IX;
616 cq_param = &cparams->rq_param.cqp;
618 return mlx5e_open_cq(c->mdev, ptp_moder, cq_param, &ccp, cq);
621 static void mlx5e_ptp_close_tx_cqs(struct mlx5e_ptp *c)
625 for (tc = 0; tc < c->num_tc; tc++)
626 mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
628 for (tc = 0; tc < c->num_tc; tc++)
629 mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
632 static void mlx5e_ptp_build_sq_param(struct mlx5_core_dev *mdev,
633 struct mlx5e_params *params,
634 struct mlx5e_sq_param *param)
636 void *sqc = param->sqc;
639 mlx5e_build_sq_param_common(mdev, param);
641 wq = MLX5_ADDR_OF(sqc, sqc, wq);
642 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
643 param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
644 mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
647 static void mlx5e_ptp_build_rq_param(struct mlx5_core_dev *mdev,
648 struct net_device *netdev,
649 struct mlx5e_ptp_params *ptp_params)
651 struct mlx5e_rq_param *rq_params = &ptp_params->rq_param;
652 struct mlx5e_params *params = &ptp_params->params;
654 params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
655 mlx5e_init_rq_type_params(mdev, params);
656 params->sw_mtu = netdev->max_mtu;
657 mlx5e_build_rq_param(mdev, params, NULL, rq_params);
660 static void mlx5e_ptp_build_params(struct mlx5e_ptp *c,
661 struct mlx5e_ptp_params *cparams,
662 struct mlx5e_params *orig)
664 struct mlx5e_params *params = &cparams->params;
666 params->tx_min_inline_mode = orig->tx_min_inline_mode;
667 params->num_channels = orig->num_channels;
668 params->hard_mtu = orig->hard_mtu;
669 params->sw_mtu = orig->sw_mtu;
670 params->mqprio = orig->mqprio;
673 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
674 params->log_sq_size =
675 min(MLX5_CAP_GEN_2(c->mdev, ts_cqe_metadata_size2wqe_counter),
676 MLX5E_PTP_MAX_LOG_SQ_SIZE);
677 params->log_sq_size = min(params->log_sq_size, orig->log_sq_size);
678 mlx5e_ptp_build_sq_param(c->mdev, params, &cparams->txq_sq_param);
681 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
682 params->vlan_strip_disable = orig->vlan_strip_disable;
683 mlx5e_ptp_build_rq_param(c->mdev, c->netdev, cparams);
687 static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
690 struct mlx5_core_dev *mdev = c->mdev;
691 struct mlx5e_priv *priv = c->priv;
694 rq->wq_type = params->rq_wq_type;
696 rq->netdev = priv->netdev;
698 rq->clock = &mdev->clock;
699 rq->tstamp = &priv->tstamp;
701 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
702 rq->stats = &c->priv->ptp_stats.rq;
703 rq->ix = MLX5E_PTP_CHANNEL_IX;
704 rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
705 err = mlx5e_rq_set_handlers(rq, params, false);
709 return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
712 static int mlx5e_ptp_open_rq(struct mlx5e_ptp *c, struct mlx5e_params *params,
713 struct mlx5e_rq_param *rq_param)
715 int node = dev_to_node(c->mdev->device);
719 err = mlx5e_init_ptp_rq(c, params, &c->rq);
723 sd_ix = mlx5_sd_ch_ix_get_dev_ix(c->mdev, MLX5E_PTP_CHANNEL_IX);
724 q_counter = c->priv->q_counter[sd_ix];
725 return mlx5e_open_rq(params, rq_param, NULL, node, q_counter, &c->rq);
728 static int mlx5e_ptp_open_queues(struct mlx5e_ptp *c,
729 struct mlx5e_ptp_params *cparams)
733 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
734 err = mlx5e_ptp_open_tx_cqs(c, cparams);
738 err = mlx5e_ptp_open_txqsqs(c, cparams);
742 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
743 err = mlx5e_ptp_open_rx_cq(c, cparams);
747 err = mlx5e_ptp_open_rq(c, &cparams->params, &cparams->rq_param);
754 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
755 mlx5e_close_cq(&c->rq.cq);
757 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
758 mlx5e_ptp_close_txqsqs(c);
760 if (test_bit(MLX5E_PTP_STATE_TX, c->state))
761 mlx5e_ptp_close_tx_cqs(c);
766 static void mlx5e_ptp_close_queues(struct mlx5e_ptp *c)
768 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
769 mlx5e_close_rq(&c->rq);
770 mlx5e_close_cq(&c->rq.cq);
772 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
773 mlx5e_ptp_close_txqsqs(c);
774 mlx5e_ptp_close_tx_cqs(c);
778 static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
780 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS))
781 __set_bit(MLX5E_PTP_STATE_TX, c->state);
784 __set_bit(MLX5E_PTP_STATE_RX, c->state);
786 return bitmap_empty(c->state, MLX5E_PTP_STATE_NUM_STATES) ? -EINVAL : 0;
789 static void mlx5e_ptp_rx_unset_fs(struct mlx5e_flow_steering *fs)
791 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
796 mlx5e_fs_tt_redirect_del_rule(ptp_fs->l2_rule);
797 mlx5e_fs_tt_redirect_any_destroy(fs);
799 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
800 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
801 mlx5e_fs_tt_redirect_udp_destroy(fs);
802 ptp_fs->valid = false;
805 static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
807 u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
808 struct mlx5e_flow_steering *fs = priv->fs;
809 struct mlx5_flow_handle *rule;
810 struct mlx5e_ptp_fs *ptp_fs;
813 ptp_fs = mlx5e_fs_get_ptp(fs);
817 err = mlx5e_fs_tt_redirect_udp_create(fs);
821 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV4_UDP,
825 goto out_destroy_fs_udp;
827 ptp_fs->udp_v4_rule = rule;
829 rule = mlx5e_fs_tt_redirect_udp_add_rule(fs, MLX5_TT_IPV6_UDP,
833 goto out_destroy_udp_v4_rule;
835 ptp_fs->udp_v6_rule = rule;
837 err = mlx5e_fs_tt_redirect_any_create(fs);
839 goto out_destroy_udp_v6_rule;
841 rule = mlx5e_fs_tt_redirect_any_add_rule(fs, tirn, ETH_P_1588);
844 goto out_destroy_fs_any;
846 ptp_fs->l2_rule = rule;
847 ptp_fs->valid = true;
852 mlx5e_fs_tt_redirect_any_destroy(fs);
853 out_destroy_udp_v6_rule:
854 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v6_rule);
855 out_destroy_udp_v4_rule:
856 mlx5e_fs_tt_redirect_del_rule(ptp_fs->udp_v4_rule);
858 mlx5e_fs_tt_redirect_udp_destroy(fs);
863 int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
864 u8 lag_port, struct mlx5e_ptp **cp)
866 struct net_device *netdev = priv->netdev;
867 struct mlx5_core_dev *mdev = priv->mdev;
868 struct mlx5e_ptp_params *cparams;
873 c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
874 cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
875 if (!c || !cparams) {
881 c->mdev = priv->mdev;
882 c->tstamp = &priv->tstamp;
883 c->pdev = mlx5_core_dma_dev(priv->mdev);
884 c->netdev = priv->netdev;
885 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
886 c->num_tc = mlx5e_get_dcb_num_tc(params);
887 c->stats = &priv->ptp_stats.ch;
888 c->lag_port = lag_port;
890 err = mlx5e_ptp_set_state(c, params);
894 netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll);
896 mlx5e_ptp_build_params(c, cparams, params);
898 err = mlx5e_ptp_open_queues(c, cparams);
902 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
903 priv->rx_ptp_opened = true;
912 netif_napi_del(&c->napi);
919 void mlx5e_ptp_close(struct mlx5e_ptp *c)
921 mlx5e_ptp_close_queues(c);
922 netif_napi_del(&c->napi);
927 void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
931 napi_enable(&c->napi);
933 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
934 for (tc = 0; tc < c->num_tc; tc++)
935 mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
937 if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
938 mlx5e_ptp_rx_set_fs(c->priv);
939 mlx5e_activate_rq(&c->rq);
941 mlx5e_trigger_napi_sched(&c->napi);
944 void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
948 if (test_bit(MLX5E_PTP_STATE_RX, c->state))
949 mlx5e_deactivate_rq(&c->rq);
951 if (test_bit(MLX5E_PTP_STATE_TX, c->state)) {
952 for (tc = 0; tc < c->num_tc; tc++)
953 mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
956 napi_disable(&c->napi);
959 int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn)
961 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state))
968 int mlx5e_ptp_alloc_rx_fs(struct mlx5e_flow_steering *fs,
969 const struct mlx5e_profile *profile)
971 struct mlx5e_ptp_fs *ptp_fs;
973 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
976 ptp_fs = kzalloc(sizeof(*ptp_fs), GFP_KERNEL);
979 mlx5e_fs_set_ptp(fs, ptp_fs);
984 void mlx5e_ptp_free_rx_fs(struct mlx5e_flow_steering *fs,
985 const struct mlx5e_profile *profile)
987 struct mlx5e_ptp_fs *ptp_fs = mlx5e_fs_get_ptp(fs);
989 if (!mlx5e_profile_feature_cap(profile, PTP_RX))
992 mlx5e_ptp_rx_unset_fs(fs);
996 int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set)
998 struct mlx5e_ptp *c = priv->channels.ptp;
1000 if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
1003 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1007 if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) {
1008 netdev_WARN_ONCE(priv->netdev, "Don't try to add PTP RX-FS rules");
1011 return mlx5e_ptp_rx_set_fs(priv);
1014 if (c && test_bit(MLX5E_PTP_STATE_RX, c->state)) {
1015 netdev_WARN_ONCE(priv->netdev, "Don't try to remove PTP RX-FS rules");
1018 mlx5e_ptp_rx_unset_fs(priv->fs);