1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 #include <net/sch_generic.h>
5 #include <net/pkt_cls.h>
11 struct qos_sq_callback_params {
12 struct mlx5e_priv *priv;
13 struct mlx5e_channels *chs;
16 int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
18 if (nbytes < BYTES_IN_MBIT) {
19 qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
20 nbytes, BYTES_IN_MBIT);
26 static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
28 return div_u64(nbytes, BYTES_IN_MBIT);
31 int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
33 return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
40 /* These channel params are safe to access from the datapath, because:
41 * 1. This function is called only after checking selq->htb_maj_id != 0,
42 * and the number of queues can't change while HTB offload is active.
43 * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
44 * mlx5e_select_queue to finish while holding priv->state_lock,
45 * preventing other code from changing the number of queues.
47 bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
49 return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
54 static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
56 struct mlx5e_params *params = &priv->channels.params;
57 struct mlx5e_txqsq __rcu **qos_sqs;
58 struct mlx5e_channel *c;
61 ix = qid % params->num_channels;
62 qid /= params->num_channels;
63 c = priv->channels.c[ix];
65 qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
66 return mlx5e_state_dereference(priv, qos_sqs[qid]);
69 int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
70 u16 node_qid, u32 hw_id)
72 struct mlx5e_create_cq_param ccp = {};
73 struct mlx5e_txqsq __rcu **qos_sqs;
74 struct mlx5e_sq_param param_sq;
75 struct mlx5e_cq_param param_cq;
76 int txq_ix, ix, qid, err = 0;
77 struct mlx5e_params *params;
78 struct mlx5e_channel *c;
79 struct mlx5e_txqsq *sq;
82 params = &chs->params;
84 txq_ix = mlx5e_qid_from_qos(chs, node_qid);
86 WARN_ON(node_qid > priv->htb_max_qos_sqs);
87 if (node_qid == priv->htb_max_qos_sqs) {
88 struct mlx5e_sq_stats *stats, **stats_list = NULL;
90 if (priv->htb_max_qos_sqs == 0) {
91 stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
97 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
103 WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
104 WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
105 /* Order htb_max_qos_sqs increment after writing the array pointer.
106 * Pairs with smp_load_acquire in en_stats.c.
108 smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
111 ix = node_qid % params->num_channels;
112 qid = node_qid / params->num_channels;
115 qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
116 sq = kzalloc(sizeof(*sq), GFP_KERNEL);
121 mlx5e_build_create_cq_param(&ccp, c);
123 memset(¶m_sq, 0, sizeof(param_sq));
124 memset(¶m_cq, 0, sizeof(param_cq));
125 mlx5e_build_sq_param(c->mdev, params, ¶m_sq);
126 mlx5e_build_tx_cq_param(c->mdev, params, ¶m_cq);
127 err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, ¶m_cq, &ccp, &sq->cq);
131 tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
133 err = mlx5e_open_txqsq(c, tisn, txq_ix, params, ¶m_sq, sq, 0, hw_id,
134 priv->htb_qos_sq_stats[node_qid]);
138 rcu_assign_pointer(qos_sqs[qid], sq);
143 mlx5e_close_cq(&sq->cq);
149 static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
151 struct qos_sq_callback_params *cb_params = data;
153 return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
156 int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
158 struct mlx5e_priv *priv = data;
159 struct mlx5e_txqsq *sq;
162 sq = mlx5e_get_qos_sq(priv, node_qid);
164 qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
166 /* If it's a new queue, it will be marked as started at this point.
167 * Stop it before updating txq2sq.
169 mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
171 priv->txq2sq[qid] = sq;
173 /* Make the change to txq2sq visible before the queue is started.
174 * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
175 * which pairs with this barrier.
179 qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid);
180 mlx5e_activate_txqsq(sq);
185 void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
187 struct mlx5e_txqsq *sq;
189 sq = mlx5e_get_qos_sq(priv, qid);
190 if (!sq) /* Handle the case when the SQ failed to open. */
193 qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
194 mlx5e_deactivate_txqsq(sq);
196 priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
198 /* Make the change to txq2sq visible before the queue is started again.
199 * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
200 * which pairs with this barrier.
205 void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
207 struct mlx5e_txqsq __rcu **qos_sqs;
208 struct mlx5e_params *params;
209 struct mlx5e_channel *c;
210 struct mlx5e_txqsq *sq;
213 params = &priv->channels.params;
215 ix = qid % params->num_channels;
216 qid /= params->num_channels;
217 c = priv->channels.c[ix];
218 qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
219 sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
220 if (!sq) /* Handle the case when the SQ failed to open. */
223 synchronize_rcu(); /* Sync with NAPI. */
225 mlx5e_close_txqsq(sq);
226 mlx5e_close_cq(&sq->cq);
230 void mlx5e_qos_close_queues(struct mlx5e_channel *c)
232 struct mlx5e_txqsq __rcu **qos_sqs;
235 qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock));
238 synchronize_rcu(); /* Sync with NAPI. */
240 for (i = 0; i < c->qos_sqs_size; i++) {
241 struct mlx5e_txqsq *sq;
243 sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
244 if (!sq) /* Handle the case when the SQ failed to open. */
247 mlx5e_close_txqsq(sq);
248 mlx5e_close_cq(&sq->cq);
255 void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
259 for (i = 0; i < chs->num; i++)
260 mlx5e_qos_close_queues(chs->c[i]);
263 int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
268 qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num);
270 for (i = 0; i < chs->num; i++) {
271 struct mlx5e_txqsq **sqs;
273 sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL);
277 WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size);
278 smp_wmb(); /* Pairs with mlx5e_napi_poll. */
279 rcu_assign_pointer(chs->c[i]->qos_sqs, sqs);
286 struct mlx5e_txqsq **sqs;
288 sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL,
289 lockdep_is_held(&priv->state_lock));
291 synchronize_rcu(); /* Sync with NAPI. */
297 int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
299 struct qos_sq_callback_params callback_params;
302 err = mlx5e_qos_alloc_queues(priv, chs);
306 callback_params.priv = priv;
307 callback_params.chs = chs;
309 err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
311 mlx5e_qos_close_all_queues(chs);
318 void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
320 mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
323 void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
325 struct mlx5e_params *params = &c->priv->channels.params;
326 struct mlx5e_txqsq __rcu **qos_sqs;
329 qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
333 for (i = 0; i < c->qos_sqs_size; i++) {
334 u16 qid = params->num_channels * i + c->ix;
335 struct mlx5e_txqsq *sq;
337 sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
338 if (!sq) /* Handle the case when the SQ failed to open. */
341 qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
342 mlx5e_deactivate_txqsq(sq);
344 /* The queue is disabled, no synchronization with datapath is needed. */
345 c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
349 void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
353 for (i = 0; i < chs->num; i++)
354 mlx5e_qos_deactivate_queues(chs->c[i]);
357 void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
359 qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
360 netdev_tx_reset_queue(txq);
361 netif_tx_start_queue(txq);
364 void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
366 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
367 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
372 spin_lock_bh(qdisc_lock(qdisc));
374 spin_unlock_bh(qdisc_lock(qdisc));
377 int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
379 struct mlx5e_htb *htb = priv->htb;
382 if (!htb && htb_qopt->command != TC_HTB_CREATE)
385 if (htb_qopt->prio || htb_qopt->quantum) {
386 NL_SET_ERR_MSG_MOD(htb_qopt->extack,
387 "prio and quantum parameters are not supported by device with HTB offload enabled.");
391 switch (htb_qopt->command) {
393 if (!mlx5_qos_is_supported(priv->mdev)) {
394 NL_SET_ERR_MSG_MOD(htb_qopt->extack,
395 "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
398 priv->htb = mlx5e_htb_alloc();
402 res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
409 mlx5e_htb_cleanup(htb);
413 case TC_HTB_LEAF_ALLOC_QUEUE:
414 res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
415 htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
420 case TC_HTB_LEAF_TO_INNER:
421 return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
422 htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
423 case TC_HTB_LEAF_DEL:
424 return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
425 case TC_HTB_LEAF_DEL_LAST:
426 case TC_HTB_LEAF_DEL_LAST_FORCE:
427 return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
428 htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
430 case TC_HTB_NODE_MODIFY:
431 return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
433 case TC_HTB_LEAF_QUERY_QUEUE:
434 res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
444 struct mlx5e_mqprio_rl {
445 struct mlx5_core_dev *mdev;
451 struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
453 return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
456 void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
461 int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
467 if (!mlx5_qos_is_supported(mdev)) {
468 qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
471 if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
476 rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
480 err = mlx5_qos_create_root_node(mdev, &rl->root_id);
482 goto err_free_leaves;
484 qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
486 for (tc = 0; tc < num_tc; tc++) {
489 max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
490 err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
493 goto err_destroy_leaves;
495 qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
496 tc, rl->leaves_id[tc], max_average_bw);
502 mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
503 mlx5_qos_destroy_node(mdev, rl->root_id);
505 kvfree(rl->leaves_id);
509 void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
513 for (tc = 0; tc < rl->num_tc; tc++)
514 mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
515 mlx5_qos_destroy_node(rl->mdev, rl->root_id);
516 kvfree(rl->leaves_id);
519 int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
521 if (tc >= rl->num_tc)
524 *hw_id = rl->leaves_id[tc];