e87e26f2c669c2e39f59a9f656e643fce2b48aae
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en / qos.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3 #include <net/sch_generic.h>
4
5 #include <net/pkt_cls.h>
6 #include "en.h"
7 #include "params.h"
8 #include "../qos.h"
9 #include "en/htb.h"
10
11 struct qos_sq_callback_params {
12         struct mlx5e_priv *priv;
13         struct mlx5e_channels *chs;
14 };
15
16 int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
17 {
18         if (nbytes < BYTES_IN_MBIT) {
19                 qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
20                          nbytes, BYTES_IN_MBIT);
21                 return -EINVAL;
22         }
23         return 0;
24 }
25
26 static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
27 {
28         return div_u64(nbytes, BYTES_IN_MBIT);
29 }
30
31 int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
32 {
33         return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
34 }
35
36 /* TX datapath API */
37
38 u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
39 {
40         /* These channel params are safe to access from the datapath, because:
41          * 1. This function is called only after checking selq->htb_maj_id != 0,
42          *    and the number of queues can't change while HTB offload is active.
43          * 2. When selq->htb_maj_id becomes 0, synchronize_rcu waits for
44          *    mlx5e_select_queue to finish while holding priv->state_lock,
45          *    preventing other code from changing the number of queues.
46          */
47         bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
48
49         return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid;
50 }
51
52 /* SQ lifecycle */
53
54 static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
55 {
56         struct mlx5e_params *params = &priv->channels.params;
57         struct mlx5e_txqsq __rcu **qos_sqs;
58         struct mlx5e_channel *c;
59         int ix;
60
61         ix = qid % params->num_channels;
62         qid /= params->num_channels;
63         c = priv->channels.c[ix];
64
65         qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
66         return mlx5e_state_dereference(priv, qos_sqs[qid]);
67 }
68
69 int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
70                       u16 node_qid, u32 hw_id)
71 {
72         struct mlx5e_create_cq_param ccp = {};
73         struct mlx5e_txqsq __rcu **qos_sqs;
74         struct mlx5e_sq_param param_sq;
75         struct mlx5e_cq_param param_cq;
76         int txq_ix, ix, qid, err = 0;
77         struct mlx5e_params *params;
78         struct mlx5e_channel *c;
79         struct mlx5e_txqsq *sq;
80         u32 tisn;
81
82         params = &chs->params;
83
84         txq_ix = mlx5e_qid_from_qos(chs, node_qid);
85
86         WARN_ON(node_qid > priv->htb_max_qos_sqs);
87         if (node_qid == priv->htb_max_qos_sqs) {
88                 struct mlx5e_sq_stats *stats, **stats_list = NULL;
89
90                 if (priv->htb_max_qos_sqs == 0) {
91                         stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
92                                               sizeof(*stats_list),
93                                               GFP_KERNEL);
94                         if (!stats_list)
95                                 return -ENOMEM;
96                 }
97                 stats = kzalloc(sizeof(*stats), GFP_KERNEL);
98                 if (!stats) {
99                         kvfree(stats_list);
100                         return -ENOMEM;
101                 }
102                 if (stats_list)
103                         WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
104                 WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
105                 /* Order htb_max_qos_sqs increment after writing the array pointer.
106                  * Pairs with smp_load_acquire in en_stats.c.
107                  */
108                 smp_store_release(&priv->htb_max_qos_sqs, priv->htb_max_qos_sqs + 1);
109         }
110
111         ix = node_qid % params->num_channels;
112         qid = node_qid / params->num_channels;
113         c = chs->c[ix];
114
115         qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
116         sq = kzalloc(sizeof(*sq), GFP_KERNEL);
117
118         if (!sq)
119                 return -ENOMEM;
120
121         mlx5e_build_create_cq_param(&ccp, c);
122
123         memset(&param_sq, 0, sizeof(param_sq));
124         memset(&param_cq, 0, sizeof(param_cq));
125         mlx5e_build_sq_param(c->mdev, params, &param_sq);
126         mlx5e_build_tx_cq_param(c->mdev, params, &param_cq);
127         err = mlx5e_open_cq(c->mdev, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
128         if (err)
129                 goto err_free_sq;
130
131         tisn = mlx5e_profile_get_tisn(c->mdev, c->priv, c->priv->profile,
132                                       c->lag_port, 0);
133         err = mlx5e_open_txqsq(c, tisn, txq_ix, params, &param_sq, sq, 0, hw_id,
134                                priv->htb_qos_sq_stats[node_qid]);
135         if (err)
136                 goto err_close_cq;
137
138         rcu_assign_pointer(qos_sqs[qid], sq);
139
140         return 0;
141
142 err_close_cq:
143         mlx5e_close_cq(&sq->cq);
144 err_free_sq:
145         kfree(sq);
146         return err;
147 }
148
149 static int mlx5e_open_qos_sq_cb_wrapper(void *data, u16 node_qid, u32 hw_id)
150 {
151         struct qos_sq_callback_params *cb_params = data;
152
153         return mlx5e_open_qos_sq(cb_params->priv, cb_params->chs, node_qid, hw_id);
154 }
155
156 int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
157 {
158         struct mlx5e_priv *priv = data;
159         struct mlx5e_txqsq *sq;
160         u16 qid;
161
162         sq = mlx5e_get_qos_sq(priv, node_qid);
163
164         qid = mlx5e_qid_from_qos(&priv->channels, node_qid);
165
166         /* If it's a new queue, it will be marked as started at this point.
167          * Stop it before updating txq2sq.
168          */
169         mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
170
171         priv->txq2sq[qid] = sq;
172
173         /* Make the change to txq2sq visible before the queue is started.
174          * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
175          * which pairs with this barrier.
176          */
177         smp_wmb();
178
179         qos_dbg(sq->mdev, "Activate QoS SQ qid %u\n", node_qid);
180         mlx5e_activate_txqsq(sq);
181
182         return 0;
183 }
184
185 void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
186 {
187         struct mlx5e_txqsq *sq;
188
189         sq = mlx5e_get_qos_sq(priv, qid);
190         if (!sq) /* Handle the case when the SQ failed to open. */
191                 return;
192
193         qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
194         mlx5e_deactivate_txqsq(sq);
195
196         priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
197
198         /* Make the change to txq2sq visible before the queue is started again.
199          * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
200          * which pairs with this barrier.
201          */
202         smp_wmb();
203 }
204
205 void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
206 {
207         struct mlx5e_txqsq __rcu **qos_sqs;
208         struct mlx5e_params *params;
209         struct mlx5e_channel *c;
210         struct mlx5e_txqsq *sq;
211         int ix;
212
213         params = &priv->channels.params;
214
215         ix = qid % params->num_channels;
216         qid /= params->num_channels;
217         c = priv->channels.c[ix];
218         qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
219         sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
220         if (!sq) /* Handle the case when the SQ failed to open. */
221                 return;
222
223         synchronize_rcu(); /* Sync with NAPI. */
224
225         mlx5e_close_txqsq(sq);
226         mlx5e_close_cq(&sq->cq);
227         kfree(sq);
228 }
229
230 void mlx5e_qos_close_queues(struct mlx5e_channel *c)
231 {
232         struct mlx5e_txqsq __rcu **qos_sqs;
233         int i;
234
235         qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock));
236         if (!qos_sqs)
237                 return;
238         synchronize_rcu(); /* Sync with NAPI. */
239
240         for (i = 0; i < c->qos_sqs_size; i++) {
241                 struct mlx5e_txqsq *sq;
242
243                 sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
244                 if (!sq) /* Handle the case when the SQ failed to open. */
245                         continue;
246
247                 mlx5e_close_txqsq(sq);
248                 mlx5e_close_cq(&sq->cq);
249                 kfree(sq);
250         }
251
252         kvfree(qos_sqs);
253 }
254
255 void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
256 {
257         int i;
258
259         for (i = 0; i < chs->num; i++)
260                 mlx5e_qos_close_queues(chs->c[i]);
261 }
262
263 int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
264 {
265         u16 qos_sqs_size;
266         int i;
267
268         qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num);
269
270         for (i = 0; i < chs->num; i++) {
271                 struct mlx5e_txqsq **sqs;
272
273                 sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL);
274                 if (!sqs)
275                         goto err_free;
276
277                 WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size);
278                 smp_wmb(); /* Pairs with mlx5e_napi_poll. */
279                 rcu_assign_pointer(chs->c[i]->qos_sqs, sqs);
280         }
281
282         return 0;
283
284 err_free:
285         while (--i >= 0) {
286                 struct mlx5e_txqsq **sqs;
287
288                 sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL,
289                                           lockdep_is_held(&priv->state_lock));
290
291                 synchronize_rcu(); /* Sync with NAPI. */
292                 kvfree(sqs);
293         }
294         return -ENOMEM;
295 }
296
297 int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
298 {
299         struct qos_sq_callback_params callback_params;
300         int err;
301
302         err = mlx5e_qos_alloc_queues(priv, chs);
303         if (err)
304                 return err;
305
306         callback_params.priv = priv;
307         callback_params.chs = chs;
308
309         err = mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_open_qos_sq_cb_wrapper, &callback_params);
310         if (err) {
311                 mlx5e_qos_close_all_queues(chs);
312                 return err;
313         }
314
315         return 0;
316 }
317
318 void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
319 {
320         mlx5e_htb_enumerate_leaves(priv->htb, mlx5e_activate_qos_sq, priv);
321 }
322
323 void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
324 {
325         struct mlx5e_params *params = &c->priv->channels.params;
326         struct mlx5e_txqsq __rcu **qos_sqs;
327         int i;
328
329         qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
330         if (!qos_sqs)
331                 return;
332
333         for (i = 0; i < c->qos_sqs_size; i++) {
334                 u16 qid = params->num_channels * i + c->ix;
335                 struct mlx5e_txqsq *sq;
336
337                 sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
338                 if (!sq) /* Handle the case when the SQ failed to open. */
339                         continue;
340
341                 qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
342                 mlx5e_deactivate_txqsq(sq);
343
344                 /* The queue is disabled, no synchronization with datapath is needed. */
345                 c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
346         }
347 }
348
349 void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
350 {
351         int i;
352
353         for (i = 0; i < chs->num; i++)
354                 mlx5e_qos_deactivate_queues(chs->c[i]);
355 }
356
357 void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
358 {
359         qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
360         netdev_tx_reset_queue(txq);
361         netif_tx_start_queue(txq);
362 }
363
364 void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
365 {
366         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
367         struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
368
369         if (!qdisc)
370                 return;
371
372         spin_lock_bh(qdisc_lock(qdisc));
373         qdisc_reset(qdisc);
374         spin_unlock_bh(qdisc_lock(qdisc));
375 }
376
377 int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_qopt)
378 {
379         struct mlx5e_htb *htb = priv->htb;
380         int res;
381
382         if (!htb && htb_qopt->command != TC_HTB_CREATE)
383                 return -EINVAL;
384
385         if (htb_qopt->prio || htb_qopt->quantum) {
386                 NL_SET_ERR_MSG_MOD(htb_qopt->extack,
387                                    "prio and quantum parameters are not supported by device with HTB offload enabled.");
388                 return -EOPNOTSUPP;
389         }
390
391         switch (htb_qopt->command) {
392         case TC_HTB_CREATE:
393                 if (!mlx5_qos_is_supported(priv->mdev)) {
394                         NL_SET_ERR_MSG_MOD(htb_qopt->extack,
395                                            "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
396                         return -EOPNOTSUPP;
397                 }
398                 priv->htb = mlx5e_htb_alloc();
399                 htb = priv->htb;
400                 if (!htb)
401                         return -ENOMEM;
402                 res = mlx5e_htb_init(htb, htb_qopt, priv->netdev, priv->mdev, &priv->selq, priv);
403                 if (res) {
404                         mlx5e_htb_free(htb);
405                         priv->htb = NULL;
406                 }
407                 return res;
408         case TC_HTB_DESTROY:
409                 mlx5e_htb_cleanup(htb);
410                 mlx5e_htb_free(htb);
411                 priv->htb = NULL;
412                 return 0;
413         case TC_HTB_LEAF_ALLOC_QUEUE:
414                 res = mlx5e_htb_leaf_alloc_queue(htb, htb_qopt->classid, htb_qopt->parent_classid,
415                                                  htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
416                 if (res < 0)
417                         return res;
418                 htb_qopt->qid = res;
419                 return 0;
420         case TC_HTB_LEAF_TO_INNER:
421                 return mlx5e_htb_leaf_to_inner(htb, htb_qopt->parent_classid, htb_qopt->classid,
422                                                htb_qopt->rate, htb_qopt->ceil, htb_qopt->extack);
423         case TC_HTB_LEAF_DEL:
424                 return mlx5e_htb_leaf_del(htb, &htb_qopt->classid, htb_qopt->extack);
425         case TC_HTB_LEAF_DEL_LAST:
426         case TC_HTB_LEAF_DEL_LAST_FORCE:
427                 return mlx5e_htb_leaf_del_last(htb, htb_qopt->classid,
428                                                htb_qopt->command == TC_HTB_LEAF_DEL_LAST_FORCE,
429                                                htb_qopt->extack);
430         case TC_HTB_NODE_MODIFY:
431                 return mlx5e_htb_node_modify(htb, htb_qopt->classid, htb_qopt->rate, htb_qopt->ceil,
432                                              htb_qopt->extack);
433         case TC_HTB_LEAF_QUERY_QUEUE:
434                 res = mlx5e_htb_get_txq_by_classid(htb, htb_qopt->classid);
435                 if (res < 0)
436                         return res;
437                 htb_qopt->qid = res;
438                 return 0;
439         default:
440                 return -EOPNOTSUPP;
441         }
442 }
443
444 struct mlx5e_mqprio_rl {
445         struct mlx5_core_dev *mdev;
446         u32 root_id;
447         u32 *leaves_id;
448         u8 num_tc;
449 };
450
451 struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
452 {
453         return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
454 }
455
456 void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
457 {
458         kvfree(rl);
459 }
460
461 int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
462                          u64 max_rate[])
463 {
464         int err;
465         int tc;
466
467         if (!mlx5_qos_is_supported(mdev)) {
468                 qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
469                 return -EOPNOTSUPP;
470         }
471         if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
472                 return -EINVAL;
473
474         rl->mdev = mdev;
475         rl->num_tc = num_tc;
476         rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
477         if (!rl->leaves_id)
478                 return -ENOMEM;
479
480         err = mlx5_qos_create_root_node(mdev, &rl->root_id);
481         if (err)
482                 goto err_free_leaves;
483
484         qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
485
486         for (tc = 0; tc < num_tc; tc++) {
487                 u32 max_average_bw;
488
489                 max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
490                 err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
491                                                 &rl->leaves_id[tc]);
492                 if (err)
493                         goto err_destroy_leaves;
494
495                 qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
496                         tc, rl->leaves_id[tc], max_average_bw);
497         }
498         return 0;
499
500 err_destroy_leaves:
501         while (--tc >= 0)
502                 mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
503         mlx5_qos_destroy_node(mdev, rl->root_id);
504 err_free_leaves:
505         kvfree(rl->leaves_id);
506         return err;
507 }
508
509 void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
510 {
511         int tc;
512
513         for (tc = 0; tc < rl->num_tc; tc++)
514                 mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
515         mlx5_qos_destroy_node(rl->mdev, rl->root_id);
516         kvfree(rl->leaves_id);
517 }
518
519 int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
520 {
521         if (tc >= rl->num_tc)
522                 return -EINVAL;
523
524         *hw_id = rl->leaves_id[tc];
525         return 0;
526 }