treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[linux-block.git] / drivers / net / ethernet / hisilicon / hns / hns_ae_adapt.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
511e6bc0 2/*
3 * Copyright (c) 2014-2015 Hisilicon Limited.
511e6bc0 4 */
5
6#include <linux/etherdevice.h>
7#include <linux/netdevice.h>
8#include <linux/spinlock.h>
9
10#include "hnae.h"
11#include "hns_dsaf_mac.h"
12#include "hns_dsaf_main.h"
13#include "hns_dsaf_ppe.h"
14#include "hns_dsaf_rcb.h"
15
16#define AE_NAME_PORT_ID_IDX 6
511e6bc0 17
18static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
19{
20 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
21
22 return vf_cb->mac_cb;
23}
24
511e6bc0 25static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
26{
27 return container_of(dev, struct dsaf_device, ae_dev);
28}
29
30static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
31{
32 int ppe_index;
511e6bc0 33 struct ppe_common_cb *ppe_comm;
34 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
35
831d828b
YZZ
36 ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
37 ppe_index = vf_cb->port_index;
38
511e6bc0 39 return &ppe_comm->ppe_cb[ppe_index];
40}
41
42static int hns_ae_get_q_num_per_vf(
43 struct dsaf_device *dsaf_dev, int port)
44{
831d828b 45 return dsaf_dev->rcb_common[0]->max_q_per_vf;
511e6bc0 46}
47
48static int hns_ae_get_vf_num_per_port(
49 struct dsaf_device *dsaf_dev, int port)
50{
831d828b 51 return dsaf_dev->rcb_common[0]->max_vfn;
511e6bc0 52}
53
54static struct ring_pair_cb *hns_ae_get_base_ring_pair(
55 struct dsaf_device *dsaf_dev, int port)
56{
831d828b 57 struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
511e6bc0 58 int q_num = rcb_comm->max_q_per_vf;
59 int vf_num = rcb_comm->max_vfn;
60
831d828b 61 return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
511e6bc0 62}
63
64static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
65{
66 return container_of(q, struct ring_pair_cb, q);
67}
68
336a443b
Y
69static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
70 u32 port_id)
511e6bc0 71{
511e6bc0 72 int vfnum_per_port;
73 int qnum_per_vf;
74 int i;
75 struct dsaf_device *dsaf_dev;
76 struct hnae_handle *ae_handle;
77 struct ring_pair_cb *ring_pair_cb;
78 struct hnae_vf_cb *vf_cb;
79
80 dsaf_dev = hns_ae_get_dsaf_dev(dev);
511e6bc0 81
406adee9
YZZ
82 ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
83 vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
84 qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
511e6bc0 85
86 vf_cb = kzalloc(sizeof(*vf_cb) +
87 qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
88 if (unlikely(!vf_cb)) {
89 dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
90 ae_handle = ERR_PTR(-ENOMEM);
91 goto handle_err;
92 }
93 ae_handle = &vf_cb->ae_handle;
94 /* ae_handle Init */
95 ae_handle->owner_dev = dsaf_dev->dev;
96 ae_handle->dev = dev;
97 ae_handle->q_num = qnum_per_vf;
b8c17f70 98 ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
511e6bc0 99
100 /* find ring pair, and set vf id*/
101 for (ae_handle->vf_id = 0;
102 ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
103 if (!ring_pair_cb->used_by_vf)
104 break;
105 ring_pair_cb += qnum_per_vf;
106 }
107 if (ae_handle->vf_id >= vfnum_per_port) {
108 dev_err(dsaf_dev->dev, "malloc queue fail!\n");
109 ae_handle = ERR_PTR(-EINVAL);
110 goto vf_id_err;
111 }
112
113 ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
114 for (i = 0; i < qnum_per_vf; i++) {
115 ae_handle->qs[i] = &ring_pair_cb->q;
116 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
117 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
118
119 ring_pair_cb->used_by_vf = 1;
511e6bc0 120 ring_pair_cb++;
121 }
122
123 vf_cb->dsaf_dev = dsaf_dev;
406adee9 124 vf_cb->port_index = port_id;
831d828b 125 vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
511e6bc0 126
127 ae_handle->phy_if = vf_cb->mac_cb->phy_if;
652d39b0 128 ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
511e6bc0 129 ae_handle->if_support = vf_cb->mac_cb->if_support;
130 ae_handle->port_type = vf_cb->mac_cb->mac_type;
5d2525f7 131 ae_handle->media_type = vf_cb->mac_cb->media_type;
406adee9 132 ae_handle->dport_id = port_id;
511e6bc0 133
134 return ae_handle;
135vf_id_err:
136 kfree(vf_cb);
137handle_err:
138 return ae_handle;
139}
140
141static void hns_ae_put_handle(struct hnae_handle *handle)
142{
143 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
144 int i;
145
511e6bc0 146 for (i = 0; i < handle->q_num; i++)
147 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
bb989501
YL
148
149 kfree(vf_cb);
511e6bc0 150}
151
31fabbee
PL
152static int hns_ae_wait_flow_down(struct hnae_handle *handle)
153{
154 struct dsaf_device *dsaf_dev;
155 struct hns_ppe_cb *ppe_cb;
156 struct hnae_vf_cb *vf_cb;
157 int ret;
158 int i;
159
160 for (i = 0; i < handle->q_num; i++) {
161 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
162 if (ret)
163 return ret;
164 }
165
166 ppe_cb = hns_get_ppe_cb(handle);
167 ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
168 if (ret)
169 return ret;
170
171 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
172 if (!dsaf_dev)
173 return -EINVAL;
174 ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
175 if (ret)
176 return ret;
177
178 vf_cb = hns_ae_get_vf_cb(handle);
179 ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
180 if (ret)
181 return ret;
182
183 mdelay(10);
184 return 0;
185}
186
511e6bc0 187static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
188{
189 int q_num = handle->q_num;
190 int i;
191
192 for (i = 0; i < q_num; i++)
193 hns_rcb_ring_enable_hw(handle->qs[i], val);
194}
195
196static void hns_ae_init_queue(struct hnae_queue *q)
197{
198 struct ring_pair_cb *ring =
199 container_of(q, struct ring_pair_cb, q);
200
201 hns_rcb_init_hw(ring);
202}
203
204static void hns_ae_fini_queue(struct hnae_queue *q)
205{
206 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
207
208 if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
209 hns_rcb_reset_ring_hw(q);
210}
211
212static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
213{
214 int ret;
215 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
216
217 if (!p || !is_valid_ether_addr((const u8 *)p)) {
218 dev_err(handle->owner_dev, "is not valid ether addr !\n");
219 return -EADDRNOTAVAIL;
220 }
221
222 ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
223 if (ret != 0) {
224 dev_err(handle->owner_dev,
225 "set_mac_address fail, ret=%d!\n", ret);
226 return ret;
227 }
228
229 return 0;
230}
231
66355f52
KY
232static int hns_ae_add_uc_address(struct hnae_handle *handle,
233 const unsigned char *addr)
234{
235 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
236
237 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
238 return -ENOSPC;
239
240 return hns_mac_add_uc_addr(mac_cb, handle->vf_id, addr);
241}
242
243static int hns_ae_rm_uc_address(struct hnae_handle *handle,
244 const unsigned char *addr)
245{
246 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
247
248 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
249 return -ENOSPC;
250
251 return hns_mac_rm_uc_addr(mac_cb, handle->vf_id, addr);
252}
253
511e6bc0 254static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
255{
256 int ret;
257 char *mac_addr = (char *)addr;
258 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
58035fd9 259 u8 port_num;
511e6bc0 260
261 assert(mac_cb);
262
263 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
264 return 0;
265
13ac695e 266 ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true);
511e6bc0 267 if (ret) {
268 dev_err(handle->owner_dev,
269 "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
270 mac_addr, mac_cb->mac_id, ret);
271 return ret;
272 }
273
58035fd9
DH
274 ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
275 if (ret)
276 return ret;
277
278 ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
511e6bc0 279 if (ret)
280 dev_err(handle->owner_dev,
281 "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
282 mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
283
284 return ret;
285}
286
ec2cafe6
KY
287static int hns_ae_clr_multicast(struct hnae_handle *handle)
288{
289 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
290
291 if (mac_cb->mac_type != HNAE_PORT_SERVICE)
292 return 0;
293
294 return hns_mac_clr_multicast(mac_cb, handle->vf_id);
295}
296
511e6bc0 297static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
298{
299 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
b29bd412 300 struct hnae_queue *q;
301 u32 rx_buf_size;
302 int i, ret;
303
304 /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
305 if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
306 if (new_mtu <= BD_SIZE_2048_MAX_MTU)
307 rx_buf_size = 2048;
308 else
309 rx_buf_size = 4096;
310 } else {
311 rx_buf_size = mac_cb->dsaf_dev->buf_size;
312 }
313
314 ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
511e6bc0 315
b29bd412 316 if (!ret) {
317 /* reinit ring buf_size */
318 for (i = 0; i < handle->q_num; i++) {
319 q = handle->qs[i];
320 q->rx_ring.buf_size = rx_buf_size;
321 hns_rcb_set_rx_ring_bs(q, rx_buf_size);
322 }
323 }
324
325 return ret;
511e6bc0 326}
327
64353af6
S
328static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
329{
330 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
331
332 hns_ppe_set_tso_enable(ppe_cb, enable);
333}
334
511e6bc0 335static int hns_ae_start(struct hnae_handle *handle)
336{
337 int ret;
454784d8 338 int k;
511e6bc0 339 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
340
13ac695e 341 ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
511e6bc0 342 if (ret)
343 return ret;
344
454784d8
DH
345 for (k = 0; k < handle->q_num; k++) {
346 if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
347 hns_rcb_int_clr_hw(handle->qs[k],
348 RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
349 else
350 hns_rcbv2_int_clr_hw(handle->qs[k],
351 RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
352 }
511e6bc0 353 hns_ae_ring_enable_all(handle, 1);
354 msleep(100);
355
356 hns_mac_start(mac_cb);
357
358 return 0;
359}
360
336a443b 361static void hns_ae_stop(struct hnae_handle *handle)
511e6bc0 362{
363 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
364
365 /* just clean tx fbd, neednot rx fbd*/
366 hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
367
368 msleep(20);
369
370 hns_mac_stop(mac_cb);
371
372 usleep_range(10000, 20000);
373
374 hns_ae_ring_enable_all(handle, 0);
375
31f6b61d
YL
376 /* clean rx fbd. */
377 hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
378
13ac695e 379 (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
511e6bc0 380}
381
382static void hns_ae_reset(struct hnae_handle *handle)
383{
384 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
385
386 if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
511e6bc0 387 hns_mac_reset(vf_cb->mac_cb);
831d828b 388 hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
511e6bc0 389 }
390}
391
336a443b 392static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
511e6bc0 393{
394 u32 flag;
395
396 if (is_tx_ring(ring))
397 flag = RCB_INT_FLAG_TX;
398 else
399 flag = RCB_INT_FLAG_RX;
400
511e6bc0 401 hns_rcb_int_ctrl_hw(ring->q, flag, mask);
402}
403
13ac695e
S
404static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
405{
406 u32 flag;
407
408 if (is_tx_ring(ring))
409 flag = RCB_INT_FLAG_TX;
410 else
411 flag = RCB_INT_FLAG_RX;
412
413 hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
414}
415
511e6bc0 416static int hns_ae_get_link_status(struct hnae_handle *handle)
417{
418 u32 link_status;
419 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
420
421 hns_mac_get_link_status(mac_cb, &link_status);
422
423 return !!link_status;
424}
425
426static int hns_ae_get_mac_info(struct hnae_handle *handle,
427 u8 *auto_neg, u16 *speed, u8 *duplex)
428{
429 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
430
431 return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
432}
433
31fabbee
PL
434static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
435 int duplex)
436{
437 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
438
439 return hns_mac_need_adjust_link(mac_cb, speed, duplex);
440}
441
511e6bc0 442static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
443 int duplex)
444{
445 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
446
31fabbee
PL
447 switch (mac_cb->dsaf_dev->dsaf_ver) {
448 case AE_VERSION_1:
449 hns_mac_adjust_link(mac_cb, speed, duplex);
450 break;
451
452 case AE_VERSION_2:
453 /* chip need to clear all pkt inside */
454 hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
455 if (hns_ae_wait_flow_down(handle)) {
456 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
457 break;
458 }
459
460 hns_mac_adjust_link(mac_cb, speed, duplex);
461 hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
462 break;
463
464 default:
465 break;
466 }
467
468 return;
511e6bc0 469}
470
471static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
472 u32 *uplimit)
473{
474 *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
475}
476
477static void hns_ae_get_pauseparam(struct hnae_handle *handle,
478 u32 *auto_neg, u32 *rx_en, u32 *tx_en)
479{
5ada37b5
L
480 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
481 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
511e6bc0 482
5ada37b5 483 hns_mac_get_autoneg(mac_cb, auto_neg);
511e6bc0 484
5ada37b5
L
485 hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
486
487 /* Service port's pause feature is provided by DSAF, not mac */
488 if (handle->port_type == HNAE_PORT_SERVICE)
489 hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
511e6bc0 490}
491
492static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
493{
494 assert(handle);
495
496 return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
497}
498
4568637f 499static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
500{
d5679849
KY
501 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
502
4568637f 503 hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
d5679849 504 hns_mac_set_promisc(mac_cb, (u8)!!en);
4568637f 505}
506
511e6bc0 507static int hns_ae_get_autoneg(struct hnae_handle *handle)
508{
509 u32 auto_neg;
510
511 assert(handle);
512
513 hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
514
515 return auto_neg;
516}
517
518static int hns_ae_set_pauseparam(struct hnae_handle *handle,
519 u32 autoneg, u32 rx_en, u32 tx_en)
520{
521 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
5ada37b5 522 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
511e6bc0 523 int ret;
524
525 ret = hns_mac_set_autoneg(mac_cb, autoneg);
526 if (ret)
527 return ret;
528
5ada37b5
L
529 /* Service port's pause feature is provided by DSAF, not mac */
530 if (handle->port_type == HNAE_PORT_SERVICE) {
531 ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
532 mac_cb->mac_id, rx_en);
533 if (ret)
534 return ret;
535 rx_en = 0;
536 }
511e6bc0 537 return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
538}
539
540static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
541 u32 *tx_usecs, u32 *rx_usecs)
542{
43adc067
L
543 struct ring_pair_cb *ring_pair =
544 container_of(handle->qs[0], struct ring_pair_cb, q);
511e6bc0 545
43adc067
L
546 *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
547 ring_pair->port_id_in_comm);
548 *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
549 ring_pair->port_id_in_comm);
511e6bc0 550}
551
820c90cb 552static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
553 u32 *tx_frames, u32 *rx_frames)
511e6bc0 554{
43adc067
L
555 struct ring_pair_cb *ring_pair =
556 container_of(handle->qs[0], struct ring_pair_cb, q);
820c90cb 557 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
511e6bc0 558
820c90cb 559 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
560 handle->port_type == HNAE_PORT_DEBUG)
561 *tx_frames = hns_rcb_get_rx_coalesced_frames(
562 ring_pair->rcb_common, ring_pair->port_id_in_comm);
563 else
564 *tx_frames = hns_rcb_get_tx_coalesced_frames(
565 ring_pair->rcb_common, ring_pair->port_id_in_comm);
566 *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
43adc067 567 ring_pair->port_id_in_comm);
511e6bc0 568}
569
9832ce4c
L
570static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
571 u32 timeout)
511e6bc0 572{
43adc067
L
573 struct ring_pair_cb *ring_pair =
574 container_of(handle->qs[0], struct ring_pair_cb, q);
511e6bc0 575
9832ce4c 576 return hns_rcb_set_coalesce_usecs(
43adc067 577 ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
511e6bc0 578}
579
820c90cb 580static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
581 u32 tx_frames, u32 rx_frames)
511e6bc0 582{
820c90cb 583 int ret;
43adc067
L
584 struct ring_pair_cb *ring_pair =
585 container_of(handle->qs[0], struct ring_pair_cb, q);
820c90cb 586 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
511e6bc0 587
820c90cb 588 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
589 handle->port_type == HNAE_PORT_DEBUG) {
590 if (tx_frames != rx_frames)
591 return -EINVAL;
592 return hns_rcb_set_rx_coalesced_frames(
593 ring_pair->rcb_common,
594 ring_pair->port_id_in_comm, rx_frames);
595 } else {
596 if (tx_frames != 1)
597 return -EINVAL;
598 ret = hns_rcb_set_tx_coalesced_frames(
599 ring_pair->rcb_common,
600 ring_pair->port_id_in_comm, tx_frames);
601 if (ret)
602 return ret;
603
604 return hns_rcb_set_rx_coalesced_frames(
605 ring_pair->rcb_common,
606 ring_pair->port_id_in_comm, rx_frames);
607 }
511e6bc0 608}
609
ad59a17f
DH
610static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
611 u32 *tx_frames_low, u32 *rx_frames_low,
612 u32 *tx_frames_high, u32 *rx_frames_high,
613 u32 *tx_usecs_low, u32 *rx_usecs_low,
614 u32 *tx_usecs_high, u32 *rx_usecs_high)
615{
616 struct dsaf_device *dsaf_dev;
617
820c90cb 618 assert(handle);
619
ad59a17f
DH
620 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
621
820c90cb 622 *tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
623 *rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
624
625 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
626 handle->port_type == HNAE_PORT_DEBUG)
627 *tx_frames_high =
628 (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
629 HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
630 else
631 *tx_frames_high = 1;
632
633 *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
634 HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
635 *tx_usecs_low = HNS_RCB_TX_USECS_LOW;
636 *rx_usecs_low = HNS_RCB_RX_USECS_LOW;
637 *tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
638 *rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
ad59a17f
DH
639}
640
336a443b
Y
641static void hns_ae_update_stats(struct hnae_handle *handle,
642 struct net_device_stats *net_stats)
511e6bc0 643{
644 int port;
645 int idx;
646 struct dsaf_device *dsaf_dev;
647 struct hns_mac_cb *mac_cb;
648 struct hns_ppe_cb *ppe_cb;
649 struct hnae_queue *queue;
650 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
651 u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
652 u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
653 u64 rx_missed_errors = 0;
654
655 dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
656 if (!dsaf_dev)
657 return;
658 port = vf_cb->port_index;
659 ppe_cb = hns_get_ppe_cb(handle);
660 mac_cb = hns_get_mac_cb(handle);
661
662 for (idx = 0; idx < handle->q_num; idx++) {
663 queue = handle->qs[idx];
664 hns_rcb_update_stats(queue);
665
666 tx_bytes += queue->tx_ring.stats.tx_bytes;
667 tx_packets += queue->tx_ring.stats.tx_pkts;
668 rx_bytes += queue->rx_ring.stats.rx_bytes;
669 rx_packets += queue->rx_ring.stats.rx_pkts;
670
671 rx_errors += queue->rx_ring.stats.err_pkt_len
672 + queue->rx_ring.stats.l2_err
673 + queue->rx_ring.stats.l3l4_csum_err;
674 }
675
676 hns_ppe_update_stats(ppe_cb);
677 rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
678 tx_errors += ppe_cb->hw_stats.tx_err_checksum
679 + ppe_cb->hw_stats.tx_err_fifo_empty;
680
681 if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
682 hns_dsaf_update_stats(dsaf_dev, port);
683 /* for port upline direction, i.e., rx. */
684 rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
685 rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
686 rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
687
688 /* for port downline direction, i.e., tx. */
689 port = port + DSAF_PPE_INODE_BASE;
690 hns_dsaf_update_stats(dsaf_dev, port);
691 tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
692 tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
693 tx_dropped += dsaf_dev->hw_stats[port].crc_false;
694 tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
695 tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
696 tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
697 }
698
699 hns_mac_update_stats(mac_cb);
700 rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
701
702 tx_errors += mac_cb->hw_stats.tx_bad_pkts
703 + mac_cb->hw_stats.tx_fragment_err
704 + mac_cb->hw_stats.tx_jabber_err
705 + mac_cb->hw_stats.tx_underrun_err
706 + mac_cb->hw_stats.tx_crc_err;
707
708 net_stats->tx_bytes = tx_bytes;
709 net_stats->tx_packets = tx_packets;
710 net_stats->rx_bytes = rx_bytes;
711 net_stats->rx_dropped = 0;
712 net_stats->rx_packets = rx_packets;
713 net_stats->rx_errors = rx_errors;
714 net_stats->tx_errors = tx_errors;
715 net_stats->tx_dropped = tx_dropped;
716 net_stats->rx_missed_errors = rx_missed_errors;
717 net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
718 net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
719 net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
720 net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
721 net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
722}
723
336a443b 724static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
511e6bc0 725{
726 int idx;
727 struct hns_mac_cb *mac_cb;
728 struct hns_ppe_cb *ppe_cb;
729 u64 *p = data;
730 struct hnae_vf_cb *vf_cb;
731
732 if (!handle || !data) {
733 pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
734 return;
735 }
736
737 vf_cb = hns_ae_get_vf_cb(handle);
738 mac_cb = hns_get_mac_cb(handle);
739 ppe_cb = hns_get_ppe_cb(handle);
740
741 for (idx = 0; idx < handle->q_num; idx++) {
742 hns_rcb_get_stats(handle->qs[idx], p);
743 p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
744 }
745
746 hns_ppe_get_stats(ppe_cb, p);
747 p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
748
749 hns_mac_get_stats(mac_cb, p);
750 p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
751
752 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
753 hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
754}
755
336a443b
Y
756static void hns_ae_get_strings(struct hnae_handle *handle,
757 u32 stringset, u8 *data)
511e6bc0 758{
759 int port;
760 int idx;
761 struct hns_mac_cb *mac_cb;
762 struct hns_ppe_cb *ppe_cb;
379d3954 763 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
511e6bc0 764 u8 *p = data;
765 struct hnae_vf_cb *vf_cb;
766
767 assert(handle);
768
769 vf_cb = hns_ae_get_vf_cb(handle);
770 port = vf_cb->port_index;
771 mac_cb = hns_get_mac_cb(handle);
772 ppe_cb = hns_get_ppe_cb(handle);
773
774 for (idx = 0; idx < handle->q_num; idx++) {
775 hns_rcb_get_strings(stringset, p, idx);
776 p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
777 }
778
779 hns_ppe_get_strings(ppe_cb, stringset, p);
780 p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
781
782 hns_mac_get_strings(mac_cb, stringset, p);
783 p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
784
785 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
379d3954 786 hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
511e6bc0 787}
788
336a443b 789static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
511e6bc0 790{
791 u32 sset_count = 0;
792 struct hns_mac_cb *mac_cb;
379d3954 793 struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
511e6bc0 794
795 assert(handle);
796
797 mac_cb = hns_get_mac_cb(handle);
798
799 sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
800 sset_count += hns_ppe_get_sset_count(stringset);
801 sset_count += hns_mac_get_sset_count(mac_cb, stringset);
802
803 if (mac_cb->mac_type == HNAE_PORT_SERVICE)
379d3954 804 sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
511e6bc0 805
806 return sset_count;
807}
808
809static int hns_ae_config_loopback(struct hnae_handle *handle,
810 enum hnae_loop loop, int en)
811{
812 int ret;
813 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
68c222a6 814 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
a24274aa 815 struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
511e6bc0 816
817 switch (loop) {
68c222a6 818 case MAC_INTERNALLOOP_PHY:
819 ret = 0;
820 break;
511e6bc0 821 case MAC_INTERNALLOOP_SERDES:
a24274aa
KY
822 ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
823 !!en);
511e6bc0 824 break;
825 case MAC_INTERNALLOOP_MAC:
826 ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
827 break;
828 default:
829 ret = -EINVAL;
830 }
68c222a6 831
511e6bc0 832 return ret;
833}
834
336a443b 835static void hns_ae_update_led_status(struct hnae_handle *handle)
511e6bc0 836{
837 struct hns_mac_cb *mac_cb;
838
839 assert(handle);
840 mac_cb = hns_get_mac_cb(handle);
1e4babee 841 if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
511e6bc0 842 return;
1e4babee 843
511e6bc0 844 hns_set_led_opt(mac_cb);
845}
846
336a443b
Y
847static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
848 enum hnae_led_state status)
511e6bc0 849{
850 struct hns_mac_cb *mac_cb;
851
852 assert(handle);
853
854 mac_cb = hns_get_mac_cb(handle);
855
856 return hns_cpld_led_set_id(mac_cb, status);
857}
858
336a443b 859static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
511e6bc0 860{
861 u32 *p = data;
511e6bc0 862 int i;
863 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
864 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
865
866 hns_ppe_get_regs(ppe_cb, p);
867 p += hns_ppe_get_regs_count();
868
831d828b 869 hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
511e6bc0 870 p += hns_rcb_get_common_regs_count();
871
872 for (i = 0; i < handle->q_num; i++) {
873 hns_rcb_get_ring_regs(handle->qs[i], p);
874 p += hns_rcb_get_ring_regs_count();
875 }
876
877 hns_mac_get_regs(vf_cb->mac_cb, p);
878 p += hns_mac_get_regs_count(vf_cb->mac_cb);
879
880 if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
881 hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
882}
883
336a443b 884static int hns_ae_get_regs_len(struct hnae_handle *handle)
511e6bc0 885{
886 u32 total_num;
887 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
888
889 total_num = hns_ppe_get_regs_count();
890 total_num += hns_rcb_get_common_regs_count();
891 total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
892 total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
893
894 if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
895 total_num += hns_dsaf_get_regs_count();
896
897 return total_num;
898}
899
6bc0ce7d
S
900static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle)
901{
902 return HNS_PPEV2_RSS_KEY_SIZE;
903}
904
905static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle)
906{
907 return HNS_PPEV2_RSS_IND_TBL_SIZE;
908}
909
910static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
911 u8 *hfunc)
912{
913 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
914
915 /* currently we support only one type of hash function i.e. Toep hash */
916 if (hfunc)
917 *hfunc = ETH_RSS_HASH_TOP;
918
919 /* get the RSS Key required by the user */
920 if (key)
921 memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
922
923 /* update the current hash->queue mappings from the shadow RSS table */
64ec10dc 924 if (indir)
925 memcpy(indir, ppe_cb->rss_indir_table,
926 HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
6bc0ce7d
S
927
928 return 0;
929}
930
931static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
932 const u8 *key, const u8 hfunc)
933{
934 struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
935
936 /* set the RSS Hash Key if specififed by the user */
64ec10dc 937 if (key) {
938 memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
939 hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
940 }
6bc0ce7d 941
64ec10dc 942 if (indir) {
943 /* update the shadow RSS table with user specified qids */
944 memcpy(ppe_cb->rss_indir_table, indir,
945 HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
6bc0ce7d 946
64ec10dc 947 /* now update the hardware */
948 hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
949 }
6bc0ce7d
S
950
951 return 0;
952}
953
511e6bc0 954static struct hnae_ae_ops hns_dsaf_ops = {
955 .get_handle = hns_ae_get_handle,
956 .put_handle = hns_ae_put_handle,
957 .init_queue = hns_ae_init_queue,
958 .fini_queue = hns_ae_fini_queue,
959 .start = hns_ae_start,
960 .stop = hns_ae_stop,
961 .reset = hns_ae_reset,
962 .toggle_ring_irq = hns_ae_toggle_ring_irq,
511e6bc0 963 .get_status = hns_ae_get_link_status,
964 .get_info = hns_ae_get_mac_info,
965 .adjust_link = hns_ae_adjust_link,
31fabbee 966 .need_adjust_link = hns_ae_need_adjust_link,
511e6bc0 967 .set_loopback = hns_ae_config_loopback,
968 .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
969 .get_pauseparam = hns_ae_get_pauseparam,
970 .set_autoneg = hns_ae_set_autoneg,
971 .get_autoneg = hns_ae_get_autoneg,
972 .set_pauseparam = hns_ae_set_pauseparam,
973 .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
820c90cb 974 .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
511e6bc0 975 .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
976 .set_coalesce_frames = hns_ae_set_coalesce_frames,
ad59a17f 977 .get_coalesce_range = hns_ae_get_coalesce_range,
4568637f 978 .set_promisc_mode = hns_ae_set_promisc_mode,
511e6bc0 979 .set_mac_addr = hns_ae_set_mac_address,
66355f52
KY
980 .add_uc_addr = hns_ae_add_uc_address,
981 .rm_uc_addr = hns_ae_rm_uc_address,
511e6bc0 982 .set_mc_addr = hns_ae_set_multicast_one,
ec2cafe6 983 .clr_mc_addr = hns_ae_clr_multicast,
511e6bc0 984 .set_mtu = hns_ae_set_mtu,
985 .update_stats = hns_ae_update_stats,
64353af6 986 .set_tso_stats = hns_ae_set_tso_stats,
511e6bc0 987 .get_stats = hns_ae_get_stats,
988 .get_strings = hns_ae_get_strings,
989 .get_sset_count = hns_ae_get_sset_count,
990 .update_led_status = hns_ae_update_led_status,
991 .set_led_id = hns_ae_cpld_set_led_id,
992 .get_regs = hns_ae_get_regs,
6bc0ce7d
S
993 .get_regs_len = hns_ae_get_regs_len,
994 .get_rss_key_size = hns_ae_get_rss_key_size,
995 .get_rss_indir_size = hns_ae_get_rss_indir_size,
996 .get_rss = hns_ae_get_rss,
997 .set_rss = hns_ae_set_rss
511e6bc0 998};
999
1000int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
1001{
1002 struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
48189d6a 1003 static atomic_t id = ATOMIC_INIT(-1);
511e6bc0 1004
13ac695e
S
1005 switch (dsaf_dev->dsaf_ver) {
1006 case AE_VERSION_1:
1007 hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq;
1008 break;
1009 case AE_VERSION_2:
1010 hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq;
1011 break;
1012 default:
1013 break;
1014 }
48189d6a 1015
1016 snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
1017 (int)atomic_inc_return(&id));
511e6bc0 1018 ae_dev->ops = &hns_dsaf_ops;
1019 ae_dev->dev = dsaf_dev->dev;
1020
1021 return hnae_ae_register(ae_dev, THIS_MODULE);
1022}
1023
1024void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
1025{
1026 hnae_ae_unregister(&dsaf_dev->ae_dev);
1027}