Commit | Line | Data |
---|---|---|
5a6d7c9d SG |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Marvell OcteonTx2 RVU Ethernet driver | |
3 | * | |
4 | * Copyright (C) 2020 Marvell International Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/interrupt.h> | |
12 | #include <linux/pci.h> | |
86d74760 | 13 | #include <net/tso.h> |
5a6d7c9d SG |
14 | |
15 | #include "otx2_reg.h" | |
16 | #include "otx2_common.h" | |
05fcc9e0 SG |
17 | #include "otx2_struct.h" |
18 | ||
d45d8979 CJ |
19 | static void otx2_nix_rq_op_stats(struct queue_stats *stats, |
20 | struct otx2_nic *pfvf, int qidx) | |
21 | { | |
22 | u64 incr = (u64)qidx << 32; | |
23 | u64 *ptr; | |
24 | ||
25 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); | |
26 | stats->bytes = otx2_atomic64_add(incr, ptr); | |
27 | ||
28 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); | |
29 | stats->pkts = otx2_atomic64_add(incr, ptr); | |
30 | } | |
31 | ||
32 | static void otx2_nix_sq_op_stats(struct queue_stats *stats, | |
33 | struct otx2_nic *pfvf, int qidx) | |
34 | { | |
35 | u64 incr = (u64)qidx << 32; | |
36 | u64 *ptr; | |
37 | ||
38 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); | |
39 | stats->bytes = otx2_atomic64_add(incr, ptr); | |
40 | ||
41 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); | |
42 | stats->pkts = otx2_atomic64_add(incr, ptr); | |
43 | } | |
44 | ||
45 | void otx2_update_lmac_stats(struct otx2_nic *pfvf) | |
46 | { | |
47 | struct msg_req *req; | |
48 | ||
49 | if (!netif_running(pfvf->netdev)) | |
50 | return; | |
51 | ||
4c3212f5 | 52 | mutex_lock(&pfvf->mbox.lock); |
d45d8979 CJ |
53 | req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); |
54 | if (!req) { | |
4c3212f5 | 55 | mutex_unlock(&pfvf->mbox.lock); |
d45d8979 CJ |
56 | return; |
57 | } | |
58 | ||
59 | otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 60 | mutex_unlock(&pfvf->mbox.lock); |
d45d8979 CJ |
61 | } |
62 | ||
63 | int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) | |
64 | { | |
65 | struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; | |
66 | ||
67 | if (!pfvf->qset.rq) | |
68 | return 0; | |
69 | ||
70 | otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); | |
71 | return 1; | |
72 | } | |
73 | ||
74 | int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) | |
75 | { | |
76 | struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; | |
77 | ||
78 | if (!pfvf->qset.sq) | |
79 | return 0; | |
80 | ||
81 | otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); | |
82 | return 1; | |
83 | } | |
84 | ||
e239d0c7 G |
85 | void otx2_get_dev_stats(struct otx2_nic *pfvf) |
86 | { | |
87 | struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; | |
88 | ||
89 | #define OTX2_GET_RX_STATS(reg) \ | |
90 | otx2_read64(pfvf, NIX_LF_RX_STATX(reg)) | |
91 | #define OTX2_GET_TX_STATS(reg) \ | |
92 | otx2_read64(pfvf, NIX_LF_TX_STATX(reg)) | |
93 | ||
94 | dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); | |
95 | dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); | |
96 | dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); | |
97 | dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); | |
98 | dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); | |
99 | dev_stats->rx_frames = dev_stats->rx_bcast_frames + | |
100 | dev_stats->rx_mcast_frames + | |
101 | dev_stats->rx_ucast_frames; | |
102 | ||
103 | dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); | |
104 | dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); | |
105 | dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); | |
106 | dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); | |
107 | dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); | |
108 | dev_stats->tx_frames = dev_stats->tx_bcast_frames + | |
109 | dev_stats->tx_mcast_frames + | |
110 | dev_stats->tx_ucast_frames; | |
111 | } | |
112 | ||
113 | void otx2_get_stats64(struct net_device *netdev, | |
114 | struct rtnl_link_stats64 *stats) | |
115 | { | |
116 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
117 | struct otx2_dev_stats *dev_stats; | |
118 | ||
119 | otx2_get_dev_stats(pfvf); | |
120 | ||
121 | dev_stats = &pfvf->hw.dev_stats; | |
122 | stats->rx_bytes = dev_stats->rx_bytes; | |
123 | stats->rx_packets = dev_stats->rx_frames; | |
124 | stats->rx_dropped = dev_stats->rx_drops; | |
125 | stats->multicast = dev_stats->rx_mcast_frames; | |
126 | ||
127 | stats->tx_bytes = dev_stats->tx_bytes; | |
128 | stats->tx_packets = dev_stats->tx_frames; | |
129 | stats->tx_dropped = dev_stats->tx_drops; | |
130 | } | |
3184fb5b | 131 | EXPORT_SYMBOL(otx2_get_stats64); |
e239d0c7 | 132 | |
34bfe0eb SG |
133 | /* Sync MAC address with RVU AF */ |
134 | static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) | |
135 | { | |
136 | struct nix_set_mac_addr *req; | |
137 | int err; | |
138 | ||
4c3212f5 | 139 | mutex_lock(&pfvf->mbox.lock); |
34bfe0eb SG |
140 | req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); |
141 | if (!req) { | |
4c3212f5 | 142 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
143 | return -ENOMEM; |
144 | } | |
145 | ||
146 | ether_addr_copy(req->mac_addr, mac); | |
147 | ||
148 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 149 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
150 | return err; |
151 | } | |
152 | ||
153 | static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, | |
154 | struct net_device *netdev) | |
155 | { | |
156 | struct nix_get_mac_addr_rsp *rsp; | |
157 | struct mbox_msghdr *msghdr; | |
158 | struct msg_req *req; | |
159 | int err; | |
160 | ||
4c3212f5 | 161 | mutex_lock(&pfvf->mbox.lock); |
34bfe0eb SG |
162 | req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); |
163 | if (!req) { | |
4c3212f5 | 164 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
165 | return -ENOMEM; |
166 | } | |
167 | ||
168 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
169 | if (err) { | |
4c3212f5 | 170 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
171 | return err; |
172 | } | |
173 | ||
174 | msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); | |
08ff7818 | 175 | if (IS_ERR(msghdr)) { |
4c3212f5 | 176 | mutex_unlock(&pfvf->mbox.lock); |
08ff7818 | 177 | return PTR_ERR(msghdr); |
34bfe0eb SG |
178 | } |
179 | rsp = (struct nix_get_mac_addr_rsp *)msghdr; | |
180 | ether_addr_copy(netdev->dev_addr, rsp->mac_addr); | |
4c3212f5 | 181 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
182 | |
183 | return 0; | |
184 | } | |
185 | ||
186 | int otx2_set_mac_address(struct net_device *netdev, void *p) | |
187 | { | |
188 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
189 | struct sockaddr *addr = p; | |
190 | ||
191 | if (!is_valid_ether_addr(addr->sa_data)) | |
192 | return -EADDRNOTAVAIL; | |
193 | ||
fd9d7859 | 194 | if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { |
34bfe0eb | 195 | memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); |
fd9d7859 HK |
196 | /* update dmac field in vlan offload rule */ |
197 | if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) | |
198 | otx2_install_rxvlan_offload_flow(pfvf); | |
199 | } else { | |
34bfe0eb | 200 | return -EPERM; |
fd9d7859 | 201 | } |
34bfe0eb SG |
202 | |
203 | return 0; | |
204 | } | |
3184fb5b | 205 | EXPORT_SYMBOL(otx2_set_mac_address); |
34bfe0eb SG |
206 | |
207 | int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) | |
208 | { | |
209 | struct nix_frs_cfg *req; | |
210 | int err; | |
211 | ||
4c3212f5 | 212 | mutex_lock(&pfvf->mbox.lock); |
34bfe0eb SG |
213 | req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); |
214 | if (!req) { | |
4c3212f5 | 215 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
216 | return -ENOMEM; |
217 | } | |
218 | ||
34bfe0eb SG |
219 | pfvf->max_frs = mtu + OTX2_ETH_HLEN; |
220 | req->maxlen = pfvf->max_frs; | |
221 | ||
222 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 223 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
224 | return err; |
225 | } | |
226 | ||
75f36270 G |
227 | int otx2_config_pause_frm(struct otx2_nic *pfvf) |
228 | { | |
229 | struct cgx_pause_frm_cfg *req; | |
230 | int err; | |
231 | ||
3184fb5b TD |
232 | if (is_otx2_lbkvf(pfvf->pdev)) |
233 | return 0; | |
234 | ||
4c3212f5 | 235 | mutex_lock(&pfvf->mbox.lock); |
75f36270 | 236 | req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); |
8a765471 DC |
237 | if (!req) { |
238 | err = -ENOMEM; | |
239 | goto unlock; | |
240 | } | |
75f36270 G |
241 | |
242 | req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); | |
243 | req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); | |
244 | req->set = 1; | |
245 | ||
246 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
8a765471 | 247 | unlock: |
4c3212f5 | 248 | mutex_unlock(&pfvf->mbox.lock); |
75f36270 G |
249 | return err; |
250 | } | |
251 | ||
6e92d71b | 252 | int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) |
85069e95 SG |
253 | { |
254 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
255 | struct nix_rss_flowkey_cfg *req; | |
256 | int err; | |
257 | ||
4c3212f5 | 258 | mutex_lock(&pfvf->mbox.lock); |
85069e95 SG |
259 | req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); |
260 | if (!req) { | |
4c3212f5 | 261 | mutex_unlock(&pfvf->mbox.lock); |
85069e95 SG |
262 | return -ENOMEM; |
263 | } | |
264 | req->mcam_index = -1; /* Default or reserved index */ | |
265 | req->flowkey_cfg = rss->flowkey_cfg; | |
266 | req->group = DEFAULT_RSS_CONTEXT_GROUP; | |
267 | ||
268 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 269 | mutex_unlock(&pfvf->mbox.lock); |
85069e95 SG |
270 | return err; |
271 | } | |
272 | ||
6e92d71b | 273 | int otx2_set_rss_table(struct otx2_nic *pfvf) |
85069e95 SG |
274 | { |
275 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
276 | struct mbox *mbox = &pfvf->mbox; | |
277 | struct nix_aq_enq_req *aq; | |
278 | int idx, err; | |
279 | ||
4c3212f5 | 280 | mutex_lock(&mbox->lock); |
85069e95 SG |
281 | /* Get memory to put this msg */ |
282 | for (idx = 0; idx < rss->rss_size; idx++) { | |
283 | aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); | |
284 | if (!aq) { | |
285 | /* The shared memory buffer can be full. | |
286 | * Flush it and retry | |
287 | */ | |
288 | err = otx2_sync_mbox_msg(mbox); | |
289 | if (err) { | |
4c3212f5 | 290 | mutex_unlock(&mbox->lock); |
85069e95 SG |
291 | return err; |
292 | } | |
293 | aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); | |
294 | if (!aq) { | |
4c3212f5 | 295 | mutex_unlock(&mbox->lock); |
85069e95 SG |
296 | return -ENOMEM; |
297 | } | |
298 | } | |
299 | ||
300 | aq->rss.rq = rss->ind_tbl[idx]; | |
301 | ||
302 | /* Fill AQ info */ | |
303 | aq->qidx = idx; | |
304 | aq->ctype = NIX_AQ_CTYPE_RSS; | |
305 | aq->op = NIX_AQ_INSTOP_INIT; | |
306 | } | |
307 | err = otx2_sync_mbox_msg(mbox); | |
4c3212f5 | 308 | mutex_unlock(&mbox->lock); |
85069e95 SG |
309 | return err; |
310 | } | |
311 | ||
6e92d71b | 312 | void otx2_set_rss_key(struct otx2_nic *pfvf) |
85069e95 SG |
313 | { |
314 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
315 | u64 *key = (u64 *)&rss->key[4]; | |
316 | int idx; | |
317 | ||
318 | /* 352bit or 44byte key needs to be configured as below | |
319 | * NIX_LF_RX_SECRETX0 = key<351:288> | |
320 | * NIX_LF_RX_SECRETX1 = key<287:224> | |
321 | * NIX_LF_RX_SECRETX2 = key<223:160> | |
322 | * NIX_LF_RX_SECRETX3 = key<159:96> | |
323 | * NIX_LF_RX_SECRETX4 = key<95:32> | |
324 | * NIX_LF_RX_SECRETX5<63:32> = key<31:0> | |
325 | */ | |
326 | otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), | |
327 | (u64)(*((u32 *)&rss->key)) << 32); | |
328 | idx = sizeof(rss->key) / sizeof(u64); | |
329 | while (idx > 0) { | |
330 | idx--; | |
331 | otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); | |
332 | } | |
333 | } | |
334 | ||
335 | int otx2_rss_init(struct otx2_nic *pfvf) | |
336 | { | |
337 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
338 | int idx, ret = 0; | |
339 | ||
340 | rss->rss_size = sizeof(rss->ind_tbl); | |
341 | ||
342 | /* Init RSS key if it is not setup already */ | |
343 | if (!rss->enable) | |
344 | netdev_rss_key_fill(rss->key, sizeof(rss->key)); | |
345 | otx2_set_rss_key(pfvf); | |
346 | ||
347 | if (!netif_is_rxfh_configured(pfvf->netdev)) { | |
348 | /* Default indirection table */ | |
349 | for (idx = 0; idx < rss->rss_size; idx++) | |
350 | rss->ind_tbl[idx] = | |
351 | ethtool_rxfh_indir_default(idx, | |
352 | pfvf->hw.rx_queues); | |
353 | } | |
354 | ret = otx2_set_rss_table(pfvf); | |
355 | if (ret) | |
356 | return ret; | |
357 | ||
358 | /* Flowkey or hash config to be used for generating flow tag */ | |
359 | rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : | |
360 | NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | | |
361 | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | | |
f9e425e9 GC |
362 | NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN | |
363 | NIX_FLOW_KEY_TYPE_IPV4_PROTO; | |
85069e95 SG |
364 | |
365 | ret = otx2_set_flowkey_cfg(pfvf); | |
366 | if (ret) | |
367 | return ret; | |
368 | ||
369 | rss->enable = true; | |
370 | return 0; | |
371 | } | |
372 | ||
dc1a9bf2 SG |
373 | /* Setup UDP segmentation algorithm in HW */ |
374 | static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4) | |
375 | { | |
376 | struct nix_lso_format *field; | |
377 | ||
378 | field = (struct nix_lso_format *)&lso->fields[0]; | |
379 | lso->field_mask = GENMASK(18, 0); | |
380 | ||
381 | /* IP's Length field */ | |
382 | field->layer = NIX_TXLAYER_OL3; | |
383 | /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ | |
384 | field->offset = v4 ? 2 : 4; | |
385 | field->sizem1 = 1; /* i.e 2 bytes */ | |
386 | field->alg = NIX_LSOALG_ADD_PAYLEN; | |
387 | field++; | |
388 | ||
389 | /* No ID field in IPv6 header */ | |
390 | if (v4) { | |
391 | /* Increment IPID */ | |
392 | field->layer = NIX_TXLAYER_OL3; | |
393 | field->offset = 4; | |
394 | field->sizem1 = 1; /* i.e 2 bytes */ | |
395 | field->alg = NIX_LSOALG_ADD_SEGNUM; | |
396 | field++; | |
397 | } | |
398 | ||
399 | /* Update length in UDP header */ | |
400 | field->layer = NIX_TXLAYER_OL4; | |
401 | field->offset = 4; | |
402 | field->sizem1 = 1; | |
403 | field->alg = NIX_LSOALG_ADD_PAYLEN; | |
404 | } | |
405 | ||
406 | /* Setup segmentation algorithms in HW and retrieve algorithm index */ | |
407 | void otx2_setup_segmentation(struct otx2_nic *pfvf) | |
408 | { | |
409 | struct nix_lso_format_cfg_rsp *rsp; | |
410 | struct nix_lso_format_cfg *lso; | |
411 | struct otx2_hw *hw = &pfvf->hw; | |
412 | int err; | |
413 | ||
414 | mutex_lock(&pfvf->mbox.lock); | |
415 | ||
416 | /* UDPv4 segmentation */ | |
417 | lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); | |
418 | if (!lso) | |
419 | goto fail; | |
420 | ||
421 | /* Setup UDP/IP header fields that HW should update per segment */ | |
422 | otx2_setup_udp_segmentation(lso, true); | |
423 | ||
424 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
425 | if (err) | |
426 | goto fail; | |
427 | ||
428 | rsp = (struct nix_lso_format_cfg_rsp *) | |
429 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); | |
430 | if (IS_ERR(rsp)) | |
431 | goto fail; | |
432 | ||
433 | hw->lso_udpv4_idx = rsp->lso_format_idx; | |
434 | ||
435 | /* UDPv6 segmentation */ | |
436 | lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); | |
437 | if (!lso) | |
438 | goto fail; | |
439 | ||
440 | /* Setup UDP/IP header fields that HW should update per segment */ | |
441 | otx2_setup_udp_segmentation(lso, false); | |
442 | ||
443 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
444 | if (err) | |
445 | goto fail; | |
446 | ||
447 | rsp = (struct nix_lso_format_cfg_rsp *) | |
448 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); | |
449 | if (IS_ERR(rsp)) | |
450 | goto fail; | |
451 | ||
452 | hw->lso_udpv6_idx = rsp->lso_format_idx; | |
453 | mutex_unlock(&pfvf->mbox.lock); | |
454 | return; | |
455 | fail: | |
456 | mutex_unlock(&pfvf->mbox.lock); | |
457 | netdev_info(pfvf->netdev, | |
458 | "Failed to get LSO index for UDP GSO offload, disabling\n"); | |
459 | pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; | |
460 | } | |
461 | ||
04a21ef3 SG |
462 | void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) |
463 | { | |
464 | /* Configure CQE interrupt coalescing parameters | |
465 | * | |
466 | * HW triggers an irq when ECOUNT > cq_ecount_wait, hence | |
467 | * set 1 less than cq_ecount_wait. And cq_time_wait is in | |
468 | * usecs, convert that to 100ns count. | |
469 | */ | |
470 | otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), | |
471 | ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | | |
472 | ((u64)pfvf->hw.cq_qcount_wait << 32) | | |
473 | (pfvf->hw.cq_ecount_wait - 1)); | |
474 | } | |
475 | ||
7a36e491 | 476 | dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) |
caa2da34 SG |
477 | { |
478 | dma_addr_t iova; | |
7a36e491 | 479 | u8 *buf; |
caa2da34 | 480 | |
7a36e491 KH |
481 | buf = napi_alloc_frag(pool->rbsize); |
482 | if (unlikely(!buf)) | |
caa2da34 SG |
483 | return -ENOMEM; |
484 | ||
7a36e491 KH |
485 | iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, |
486 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | |
487 | if (unlikely(dma_mapping_error(pfvf->dev, iova))) { | |
488 | page_frag_free(buf); | |
caa2da34 SG |
489 | return -ENOMEM; |
490 | } | |
7a36e491 | 491 | |
caa2da34 SG |
492 | return iova; |
493 | } | |
494 | ||
7a36e491 KH |
495 | static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) |
496 | { | |
497 | dma_addr_t addr; | |
498 | ||
499 | local_bh_disable(); | |
500 | addr = __otx2_alloc_rbuf(pfvf, pool); | |
501 | local_bh_enable(); | |
502 | return addr; | |
503 | } | |
504 | ||
4ff7d148 G |
505 | void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) |
506 | { | |
507 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
508 | ||
509 | schedule_work(&pfvf->reset_task); | |
510 | } | |
3184fb5b | 511 | EXPORT_SYMBOL(otx2_tx_timeout); |
4ff7d148 | 512 | |
34bfe0eb SG |
513 | void otx2_get_mac_from_af(struct net_device *netdev) |
514 | { | |
515 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
516 | int err; | |
517 | ||
518 | err = otx2_hw_get_mac_addr(pfvf, netdev); | |
519 | if (err) | |
520 | dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); | |
521 | ||
522 | /* If AF doesn't provide a valid MAC, generate a random one */ | |
523 | if (!is_valid_ether_addr(netdev->dev_addr)) | |
524 | eth_hw_addr_random(netdev); | |
525 | } | |
3184fb5b | 526 | EXPORT_SYMBOL(otx2_get_mac_from_af); |
34bfe0eb | 527 | |
caa2da34 SG |
528 | static int otx2_get_link(struct otx2_nic *pfvf) |
529 | { | |
530 | int link = 0; | |
531 | u16 map; | |
532 | ||
533 | /* cgx lmac link */ | |
534 | if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { | |
535 | map = pfvf->hw.tx_chan_base & 0x7FF; | |
536 | link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); | |
537 | } | |
538 | /* LBK channel */ | |
8bcf5ced SS |
539 | if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) { |
540 | map = pfvf->hw.tx_chan_base & 0x7FF; | |
541 | link = pfvf->hw.cgx_links | ((map >> 8) & 0xF); | |
542 | } | |
caa2da34 SG |
543 | |
544 | return link; | |
545 | } | |
546 | ||
547 | int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) | |
548 | { | |
549 | struct otx2_hw *hw = &pfvf->hw; | |
550 | struct nix_txschq_config *req; | |
551 | u64 schq, parent; | |
552 | ||
553 | req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); | |
554 | if (!req) | |
555 | return -ENOMEM; | |
556 | ||
557 | req->lvl = lvl; | |
558 | req->num_regs = 1; | |
559 | ||
560 | schq = hw->txschq_list[lvl][0]; | |
561 | /* Set topology e.t.c configuration */ | |
562 | if (lvl == NIX_TXSCH_LVL_SMQ) { | |
563 | req->reg[0] = NIX_AF_SMQX_CFG(schq); | |
b1bc8457 | 564 | req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) | |
34bfe0eb SG |
565 | OTX2_MIN_MTU; |
566 | ||
caa2da34 SG |
567 | req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | |
568 | (0x2ULL << 36); | |
569 | req->num_regs++; | |
570 | /* MDQ config */ | |
571 | parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0]; | |
572 | req->reg[1] = NIX_AF_MDQX_PARENT(schq); | |
573 | req->regval[1] = parent << 16; | |
574 | req->num_regs++; | |
575 | /* Set DWRR quantum */ | |
576 | req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); | |
577 | req->regval[2] = DFLT_RR_QTM; | |
578 | } else if (lvl == NIX_TXSCH_LVL_TL4) { | |
579 | parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; | |
580 | req->reg[0] = NIX_AF_TL4X_PARENT(schq); | |
581 | req->regval[0] = parent << 16; | |
582 | req->num_regs++; | |
583 | req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); | |
584 | req->regval[1] = DFLT_RR_QTM; | |
585 | } else if (lvl == NIX_TXSCH_LVL_TL3) { | |
586 | parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; | |
587 | req->reg[0] = NIX_AF_TL3X_PARENT(schq); | |
588 | req->regval[0] = parent << 16; | |
589 | req->num_regs++; | |
590 | req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); | |
591 | req->regval[1] = DFLT_RR_QTM; | |
592 | } else if (lvl == NIX_TXSCH_LVL_TL2) { | |
593 | parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; | |
594 | req->reg[0] = NIX_AF_TL2X_PARENT(schq); | |
595 | req->regval[0] = parent << 16; | |
596 | ||
597 | req->num_regs++; | |
598 | req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); | |
599 | req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; | |
600 | ||
601 | req->num_regs++; | |
602 | req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, | |
603 | otx2_get_link(pfvf)); | |
604 | /* Enable this queue and backpressure */ | |
605 | req->regval[2] = BIT_ULL(13) | BIT_ULL(12); | |
606 | ||
607 | } else if (lvl == NIX_TXSCH_LVL_TL1) { | |
608 | /* Default config for TL1. | |
609 | * For VF this is always ignored. | |
610 | */ | |
611 | ||
612 | /* Set DWRR quantum */ | |
613 | req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); | |
614 | req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; | |
615 | ||
616 | req->num_regs++; | |
617 | req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); | |
618 | req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); | |
619 | ||
620 | req->num_regs++; | |
621 | req->reg[2] = NIX_AF_TL1X_CIR(schq); | |
622 | req->regval[2] = 0; | |
623 | } | |
624 | ||
625 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
626 | } | |
627 | ||
628 | int otx2_txsch_alloc(struct otx2_nic *pfvf) | |
629 | { | |
630 | struct nix_txsch_alloc_req *req; | |
631 | int lvl; | |
632 | ||
633 | /* Get memory to put this msg */ | |
634 | req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); | |
635 | if (!req) | |
636 | return -ENOMEM; | |
637 | ||
638 | /* Request one schq per level */ | |
639 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) | |
640 | req->schq[lvl] = 1; | |
641 | ||
642 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
643 | } | |
644 | ||
645 | int otx2_txschq_stop(struct otx2_nic *pfvf) | |
646 | { | |
647 | struct nix_txsch_free_req *free_req; | |
648 | int lvl, schq, err; | |
649 | ||
4c3212f5 | 650 | mutex_lock(&pfvf->mbox.lock); |
caa2da34 SG |
651 | /* Free the transmit schedulers */ |
652 | free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); | |
653 | if (!free_req) { | |
4c3212f5 | 654 | mutex_unlock(&pfvf->mbox.lock); |
caa2da34 SG |
655 | return -ENOMEM; |
656 | } | |
657 | ||
658 | free_req->flags = TXSCHQ_FREE_ALL; | |
659 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 660 | mutex_unlock(&pfvf->mbox.lock); |
caa2da34 SG |
661 | |
662 | /* Clear the txschq list */ | |
663 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { | |
664 | for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) | |
665 | pfvf->hw.txschq_list[lvl][schq] = 0; | |
666 | } | |
667 | return err; | |
668 | } | |
669 | ||
670 | void otx2_sqb_flush(struct otx2_nic *pfvf) | |
671 | { | |
672 | int qidx, sqe_tail, sqe_head; | |
673 | u64 incr, *ptr, val; | |
b1bc8457 | 674 | int timeout = 1000; |
caa2da34 SG |
675 | |
676 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); | |
677 | for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { | |
678 | incr = (u64)qidx << 32; | |
b1bc8457 | 679 | while (timeout) { |
caa2da34 SG |
680 | val = otx2_atomic64_add(incr, ptr); |
681 | sqe_head = (val >> 20) & 0x3F; | |
682 | sqe_tail = (val >> 28) & 0x3F; | |
683 | if (sqe_head == sqe_tail) | |
684 | break; | |
685 | usleep_range(1, 3); | |
b1bc8457 | 686 | timeout--; |
caa2da34 SG |
687 | } |
688 | } | |
689 | } | |
690 | ||
691 | /* RED and drop levels of CQ on packet reception. | |
692 | * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). | |
693 | */ | |
694 | #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) | |
695 | #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) | |
696 | ||
697 | /* RED and drop levels of AURA for packet reception. | |
698 | * For AURA level is measure of fullness (0x0 = empty, 255 = full). | |
699 | * Eg: For RQ length 1K, for pass/drop level 204/230. | |
700 | * RED accepts pkts if free pointers > 102 & <= 205. | |
701 | * Drops pkts if free pointers < 102. | |
702 | */ | |
75f36270 | 703 | #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ |
caa2da34 SG |
704 | #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ |
705 | #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ | |
706 | ||
707 | /* Send skid of 2000 packets required for CQ size of 4K CQEs. */ | |
708 | #define SEND_CQ_SKID 2000 | |
709 | ||
710 | static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) | |
711 | { | |
712 | struct otx2_qset *qset = &pfvf->qset; | |
713 | struct nix_aq_enq_req *aq; | |
714 | ||
715 | /* Get memory to put this msg */ | |
716 | aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); | |
717 | if (!aq) | |
718 | return -ENOMEM; | |
719 | ||
720 | aq->rq.cq = qidx; | |
721 | aq->rq.ena = 1; | |
722 | aq->rq.pb_caching = 1; | |
723 | aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ | |
724 | aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; | |
725 | aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ | |
726 | aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ | |
4ff7d148 | 727 | aq->rq.qint_idx = 0; |
caa2da34 SG |
728 | aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ |
729 | aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ | |
730 | aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); | |
731 | aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); | |
732 | aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; | |
733 | aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; | |
734 | ||
735 | /* Fill AQ info */ | |
736 | aq->qidx = qidx; | |
737 | aq->ctype = NIX_AQ_CTYPE_RQ; | |
738 | aq->op = NIX_AQ_INSTOP_INIT; | |
739 | ||
740 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
741 | } | |
742 | ||
743 | static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) | |
744 | { | |
745 | struct otx2_qset *qset = &pfvf->qset; | |
746 | struct otx2_snd_queue *sq; | |
747 | struct nix_aq_enq_req *aq; | |
748 | struct otx2_pool *pool; | |
749 | int err; | |
750 | ||
751 | pool = &pfvf->qset.pool[sqb_aura]; | |
752 | sq = &qset->sq[qidx]; | |
753 | sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; | |
754 | sq->sqe_cnt = qset->sqe_cnt; | |
755 | ||
756 | err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); | |
757 | if (err) | |
758 | return err; | |
759 | ||
86d74760 SG |
760 | err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, |
761 | TSO_HEADER_SIZE); | |
762 | if (err) | |
763 | return err; | |
764 | ||
caa2da34 | 765 | sq->sqe_base = sq->sqe->base; |
3ca6c4c8 SG |
766 | sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); |
767 | if (!sq->sg) | |
768 | return -ENOMEM; | |
caa2da34 | 769 | |
c9c12d33 AM |
770 | if (pfvf->ptp) { |
771 | err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, | |
772 | sizeof(*sq->timestamps)); | |
773 | if (err) | |
774 | return err; | |
775 | } | |
776 | ||
3ca6c4c8 | 777 | sq->head = 0; |
caa2da34 SG |
778 | sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; |
779 | sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; | |
3ca6c4c8 SG |
780 | /* Set SQE threshold to 10% of total SQEs */ |
781 | sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; | |
caa2da34 SG |
782 | sq->aura_id = sqb_aura; |
783 | sq->aura_fc_addr = pool->fc_addr->base; | |
784 | sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); | |
785 | sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); | |
786 | ||
d45d8979 CJ |
787 | sq->stats.bytes = 0; |
788 | sq->stats.pkts = 0; | |
789 | ||
caa2da34 SG |
790 | /* Get memory to put this msg */ |
791 | aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); | |
792 | if (!aq) | |
793 | return -ENOMEM; | |
794 | ||
795 | aq->sq.cq = pfvf->hw.rx_queues + qidx; | |
796 | aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ | |
797 | aq->sq.cq_ena = 1; | |
798 | aq->sq.ena = 1; | |
799 | /* Only one SMQ is allocated, map all SQ's to that SMQ */ | |
800 | aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; | |
801 | aq->sq.smq_rr_quantum = DFLT_RR_QTM; | |
802 | aq->sq.default_chan = pfvf->hw.tx_chan_base; | |
803 | aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ | |
804 | aq->sq.sqb_aura = sqb_aura; | |
4ff7d148 G |
805 | aq->sq.sq_int_ena = NIX_SQINT_BITS; |
806 | aq->sq.qint_idx = 0; | |
caa2da34 SG |
807 | /* Due pipelining impact minimum 2000 unused SQ CQE's |
808 | * need to maintain to avoid CQ overflow. | |
809 | */ | |
810 | aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt)); | |
811 | ||
812 | /* Fill AQ info */ | |
813 | aq->qidx = qidx; | |
814 | aq->ctype = NIX_AQ_CTYPE_SQ; | |
815 | aq->op = NIX_AQ_INSTOP_INIT; | |
816 | ||
817 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
818 | } | |
819 | ||
820 | static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) | |
821 | { | |
822 | struct otx2_qset *qset = &pfvf->qset; | |
823 | struct nix_aq_enq_req *aq; | |
824 | struct otx2_cq_queue *cq; | |
825 | int err, pool_id; | |
826 | ||
827 | cq = &qset->cq[qidx]; | |
828 | cq->cq_idx = qidx; | |
829 | if (qidx < pfvf->hw.rx_queues) { | |
830 | cq->cq_type = CQ_RX; | |
abe02543 | 831 | cq->cint_idx = qidx; |
caa2da34 SG |
832 | cq->cqe_cnt = qset->rqe_cnt; |
833 | } else { | |
834 | cq->cq_type = CQ_TX; | |
3ca6c4c8 | 835 | cq->cint_idx = qidx - pfvf->hw.rx_queues; |
caa2da34 SG |
836 | cq->cqe_cnt = qset->sqe_cnt; |
837 | } | |
838 | cq->cqe_size = pfvf->qset.xqe_size; | |
839 | ||
840 | /* Allocate memory for CQEs */ | |
841 | err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); | |
842 | if (err) | |
843 | return err; | |
844 | ||
845 | /* Save CQE CPU base for faster reference */ | |
846 | cq->cqe_base = cq->cqe->base; | |
847 | /* In case where all RQs auras point to single pool, | |
848 | * all CQs receive buffer pool also point to same pool. | |
849 | */ | |
850 | pool_id = ((cq->cq_type == CQ_RX) && | |
851 | (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; | |
852 | cq->rbpool = &qset->pool[pool_id]; | |
4ff7d148 | 853 | cq->refill_task_sched = false; |
caa2da34 SG |
854 | |
855 | /* Get memory to put this msg */ | |
856 | aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); | |
857 | if (!aq) | |
858 | return -ENOMEM; | |
859 | ||
860 | aq->cq.ena = 1; | |
861 | aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); | |
862 | aq->cq.caching = 1; | |
863 | aq->cq.base = cq->cqe->iova; | |
abe02543 | 864 | aq->cq.cint_idx = cq->cint_idx; |
4ff7d148 G |
865 | aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; |
866 | aq->cq.qint_idx = 0; | |
caa2da34 SG |
867 | aq->cq.avg_level = 255; |
868 | ||
869 | if (qidx < pfvf->hw.rx_queues) { | |
870 | aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); | |
871 | aq->cq.drop_ena = 1; | |
75f36270 G |
872 | |
873 | /* Enable receive CQ backpressure */ | |
874 | aq->cq.bp_ena = 1; | |
875 | aq->cq.bpid = pfvf->bpid[0]; | |
876 | ||
877 | /* Set backpressure level is same as cq pass level */ | |
878 | aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); | |
caa2da34 SG |
879 | } |
880 | ||
881 | /* Fill AQ info */ | |
882 | aq->qidx = qidx; | |
883 | aq->ctype = NIX_AQ_CTYPE_CQ; | |
884 | aq->op = NIX_AQ_INSTOP_INIT; | |
885 | ||
886 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
887 | } | |
888 | ||
4ff7d148 G |
889 | static void otx2_pool_refill_task(struct work_struct *work) |
890 | { | |
891 | struct otx2_cq_queue *cq; | |
892 | struct otx2_pool *rbpool; | |
893 | struct refill_work *wrk; | |
894 | int qidx, free_ptrs = 0; | |
895 | struct otx2_nic *pfvf; | |
896 | s64 bufptr; | |
897 | ||
898 | wrk = container_of(work, struct refill_work, pool_refill_work.work); | |
899 | pfvf = wrk->pf; | |
900 | qidx = wrk - pfvf->refill_wrk; | |
901 | cq = &pfvf->qset.cq[qidx]; | |
902 | rbpool = cq->rbpool; | |
903 | free_ptrs = cq->pool_ptrs; | |
904 | ||
905 | while (cq->pool_ptrs) { | |
7a36e491 | 906 | bufptr = otx2_alloc_rbuf(pfvf, rbpool); |
4ff7d148 G |
907 | if (bufptr <= 0) { |
908 | /* Schedule a WQ if we fails to free atleast half of the | |
909 | * pointers else enable napi for this RQ. | |
910 | */ | |
911 | if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { | |
912 | struct delayed_work *dwork; | |
913 | ||
914 | dwork = &wrk->pool_refill_work; | |
915 | schedule_delayed_work(dwork, | |
916 | msecs_to_jiffies(100)); | |
917 | } else { | |
918 | cq->refill_task_sched = false; | |
919 | } | |
920 | return; | |
921 | } | |
922 | otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); | |
923 | cq->pool_ptrs--; | |
924 | } | |
925 | cq->refill_task_sched = false; | |
926 | } | |
927 | ||
caa2da34 SG |
928 | int otx2_config_nix_queues(struct otx2_nic *pfvf) |
929 | { | |
930 | int qidx, err; | |
931 | ||
932 | /* Initialize RX queues */ | |
933 | for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { | |
934 | u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); | |
935 | ||
936 | err = otx2_rq_init(pfvf, qidx, lpb_aura); | |
937 | if (err) | |
938 | return err; | |
939 | } | |
940 | ||
941 | /* Initialize TX queues */ | |
942 | for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) { | |
943 | u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); | |
944 | ||
945 | err = otx2_sq_init(pfvf, qidx, sqb_aura); | |
946 | if (err) | |
947 | return err; | |
948 | } | |
949 | ||
950 | /* Initialize completion queues */ | |
951 | for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { | |
952 | err = otx2_cq_init(pfvf, qidx); | |
953 | if (err) | |
954 | return err; | |
955 | } | |
956 | ||
4ff7d148 G |
957 | /* Initialize work queue for receive buffer refill */ |
958 | pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, | |
959 | sizeof(struct refill_work), GFP_KERNEL); | |
960 | if (!pfvf->refill_wrk) | |
961 | return -ENOMEM; | |
962 | ||
963 | for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { | |
964 | pfvf->refill_wrk[qidx].pf = pfvf; | |
965 | INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, | |
966 | otx2_pool_refill_task); | |
967 | } | |
caa2da34 SG |
968 | return 0; |
969 | } | |
970 | ||
05fcc9e0 SG |
971 | int otx2_config_nix(struct otx2_nic *pfvf) |
972 | { | |
973 | struct nix_lf_alloc_req *nixlf; | |
974 | struct nix_lf_alloc_rsp *rsp; | |
975 | int err; | |
976 | ||
977 | pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512; | |
978 | ||
979 | /* Get memory to put this msg */ | |
980 | nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); | |
981 | if (!nixlf) | |
982 | return -ENOMEM; | |
983 | ||
984 | /* Set RQ/SQ/CQ counts */ | |
985 | nixlf->rq_cnt = pfvf->hw.rx_queues; | |
986 | nixlf->sq_cnt = pfvf->hw.tx_queues; | |
987 | nixlf->cq_cnt = pfvf->qset.cq_cnt; | |
85069e95 SG |
988 | nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; |
989 | nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */ | |
05fcc9e0 SG |
990 | nixlf->xqe_sz = NIX_XQESZ_W16; |
991 | /* We don't know absolute NPA LF idx attached. | |
992 | * AF will replace 'RVU_DEFAULT_PF_FUNC' with | |
993 | * NPA LF attached to this RVU PF/VF. | |
994 | */ | |
995 | nixlf->npa_func = RVU_DEFAULT_PF_FUNC; | |
996 | /* Disable alignment pad, enable L2 length check, | |
997 | * enable L4 TCP/UDP checksum verification. | |
998 | */ | |
999 | nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); | |
1000 | ||
1001 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1002 | if (err) | |
1003 | return err; | |
1004 | ||
1005 | rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, | |
1006 | &nixlf->hdr); | |
1007 | if (IS_ERR(rsp)) | |
1008 | return PTR_ERR(rsp); | |
1009 | ||
1010 | if (rsp->qints < 1) | |
1011 | return -ENXIO; | |
1012 | ||
1013 | return rsp->hdr.rc; | |
1014 | } | |
1015 | ||
caa2da34 SG |
1016 | void otx2_sq_free_sqbs(struct otx2_nic *pfvf) |
1017 | { | |
1018 | struct otx2_qset *qset = &pfvf->qset; | |
1019 | struct otx2_hw *hw = &pfvf->hw; | |
1020 | struct otx2_snd_queue *sq; | |
1021 | int sqb, qidx; | |
1022 | u64 iova, pa; | |
1023 | ||
1024 | for (qidx = 0; qidx < hw->tx_queues; qidx++) { | |
1025 | sq = &qset->sq[qidx]; | |
1026 | if (!sq->sqb_ptrs) | |
1027 | continue; | |
1028 | for (sqb = 0; sqb < sq->sqb_count; sqb++) { | |
1029 | if (!sq->sqb_ptrs[sqb]) | |
1030 | continue; | |
1031 | iova = sq->sqb_ptrs[sqb]; | |
1032 | pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); | |
1033 | dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, | |
1034 | DMA_FROM_DEVICE, | |
1035 | DMA_ATTR_SKIP_CPU_SYNC); | |
1036 | put_page(virt_to_page(phys_to_virt(pa))); | |
1037 | } | |
1038 | sq->sqb_count = 0; | |
1039 | } | |
1040 | } | |
1041 | ||
1042 | void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) | |
1043 | { | |
1044 | int pool_id, pool_start = 0, pool_end = 0, size = 0; | |
1045 | u64 iova, pa; | |
1046 | ||
1047 | if (type == AURA_NIX_SQ) { | |
1048 | pool_start = otx2_get_pool_idx(pfvf, type, 0); | |
1049 | pool_end = pool_start + pfvf->hw.sqpool_cnt; | |
1050 | size = pfvf->hw.sqb_size; | |
1051 | } | |
1052 | if (type == AURA_NIX_RQ) { | |
1053 | pool_start = otx2_get_pool_idx(pfvf, type, 0); | |
1054 | pool_end = pfvf->hw.rqpool_cnt; | |
1055 | size = pfvf->rbsize; | |
1056 | } | |
1057 | ||
1058 | /* Free SQB and RQB pointers from the aura pool */ | |
1059 | for (pool_id = pool_start; pool_id < pool_end; pool_id++) { | |
1060 | iova = otx2_aura_allocptr(pfvf, pool_id); | |
1061 | while (iova) { | |
1062 | if (type == AURA_NIX_RQ) | |
1063 | iova -= OTX2_HEAD_ROOM; | |
1064 | ||
1065 | pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); | |
1066 | dma_unmap_page_attrs(pfvf->dev, iova, size, | |
1067 | DMA_FROM_DEVICE, | |
1068 | DMA_ATTR_SKIP_CPU_SYNC); | |
1069 | put_page(virt_to_page(phys_to_virt(pa))); | |
1070 | iova = otx2_aura_allocptr(pfvf, pool_id); | |
1071 | } | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | void otx2_aura_pool_free(struct otx2_nic *pfvf) | |
1076 | { | |
1077 | struct otx2_pool *pool; | |
1078 | int pool_id; | |
1079 | ||
1080 | if (!pfvf->qset.pool) | |
1081 | return; | |
1082 | ||
1083 | for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { | |
1084 | pool = &pfvf->qset.pool[pool_id]; | |
1085 | qmem_free(pfvf->dev, pool->stack); | |
1086 | qmem_free(pfvf->dev, pool->fc_addr); | |
1087 | } | |
1088 | devm_kfree(pfvf->dev, pfvf->qset.pool); | |
b1bc8457 | 1089 | pfvf->qset.pool = NULL; |
caa2da34 SG |
1090 | } |
1091 | ||
1092 | static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, | |
1093 | int pool_id, int numptrs) | |
1094 | { | |
1095 | struct npa_aq_enq_req *aq; | |
1096 | struct otx2_pool *pool; | |
1097 | int err; | |
1098 | ||
1099 | pool = &pfvf->qset.pool[pool_id]; | |
1100 | ||
1101 | /* Allocate memory for HW to update Aura count. | |
1102 | * Alloc one cache line, so that it fits all FC_STYPE modes. | |
1103 | */ | |
1104 | if (!pool->fc_addr) { | |
1105 | err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); | |
1106 | if (err) | |
1107 | return err; | |
1108 | } | |
1109 | ||
1110 | /* Initialize this aura's context via AF */ | |
1111 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1112 | if (!aq) { | |
1113 | /* Shared mbox memory buffer is full, flush it and retry */ | |
1114 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1115 | if (err) | |
1116 | return err; | |
1117 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1118 | if (!aq) | |
1119 | return -ENOMEM; | |
1120 | } | |
1121 | ||
1122 | aq->aura_id = aura_id; | |
1123 | /* Will be filled by AF with correct pool context address */ | |
1124 | aq->aura.pool_addr = pool_id; | |
1125 | aq->aura.pool_caching = 1; | |
1126 | aq->aura.shift = ilog2(numptrs) - 8; | |
1127 | aq->aura.count = numptrs; | |
1128 | aq->aura.limit = numptrs; | |
1129 | aq->aura.avg_level = 255; | |
1130 | aq->aura.ena = 1; | |
1131 | aq->aura.fc_ena = 1; | |
1132 | aq->aura.fc_addr = pool->fc_addr->iova; | |
1133 | aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ | |
1134 | ||
75f36270 G |
1135 | /* Enable backpressure for RQ aura */ |
1136 | if (aura_id < pfvf->hw.rqpool_cnt) { | |
1137 | aq->aura.bp_ena = 0; | |
1138 | aq->aura.nix0_bpid = pfvf->bpid[0]; | |
1139 | /* Set backpressure level for RQ's Aura */ | |
1140 | aq->aura.bp = RQ_BP_LVL_AURA; | |
1141 | } | |
1142 | ||
caa2da34 SG |
1143 | /* Fill AQ info */ |
1144 | aq->ctype = NPA_AQ_CTYPE_AURA; | |
1145 | aq->op = NPA_AQ_INSTOP_INIT; | |
1146 | ||
1147 | return 0; | |
1148 | } | |
1149 | ||
1150 | static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, | |
1151 | int stack_pages, int numptrs, int buf_size) | |
1152 | { | |
1153 | struct npa_aq_enq_req *aq; | |
1154 | struct otx2_pool *pool; | |
1155 | int err; | |
1156 | ||
1157 | pool = &pfvf->qset.pool[pool_id]; | |
1158 | /* Alloc memory for stack which is used to store buffer pointers */ | |
1159 | err = qmem_alloc(pfvf->dev, &pool->stack, | |
1160 | stack_pages, pfvf->hw.stack_pg_bytes); | |
1161 | if (err) | |
1162 | return err; | |
1163 | ||
1164 | pool->rbsize = buf_size; | |
caa2da34 SG |
1165 | |
1166 | /* Initialize this pool's context via AF */ | |
1167 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1168 | if (!aq) { | |
1169 | /* Shared mbox memory buffer is full, flush it and retry */ | |
1170 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1171 | if (err) { | |
1172 | qmem_free(pfvf->dev, pool->stack); | |
1173 | return err; | |
1174 | } | |
1175 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1176 | if (!aq) { | |
1177 | qmem_free(pfvf->dev, pool->stack); | |
1178 | return -ENOMEM; | |
1179 | } | |
1180 | } | |
1181 | ||
1182 | aq->aura_id = pool_id; | |
1183 | aq->pool.stack_base = pool->stack->iova; | |
1184 | aq->pool.stack_caching = 1; | |
1185 | aq->pool.ena = 1; | |
1186 | aq->pool.buf_size = buf_size / 128; | |
1187 | aq->pool.stack_max_pages = stack_pages; | |
1188 | aq->pool.shift = ilog2(numptrs) - 8; | |
1189 | aq->pool.ptr_start = 0; | |
1190 | aq->pool.ptr_end = ~0ULL; | |
1191 | ||
1192 | /* Fill AQ info */ | |
1193 | aq->ctype = NPA_AQ_CTYPE_POOL; | |
1194 | aq->op = NPA_AQ_INSTOP_INIT; | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) | |
1200 | { | |
1201 | int qidx, pool_id, stack_pages, num_sqbs; | |
1202 | struct otx2_qset *qset = &pfvf->qset; | |
1203 | struct otx2_hw *hw = &pfvf->hw; | |
1204 | struct otx2_snd_queue *sq; | |
1205 | struct otx2_pool *pool; | |
1206 | int err, ptr; | |
1207 | s64 bufptr; | |
1208 | ||
1209 | /* Calculate number of SQBs needed. | |
1210 | * | |
1211 | * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. | |
1212 | * Last SQE is used for pointing to next SQB. | |
1213 | */ | |
1214 | num_sqbs = (hw->sqb_size / 128) - 1; | |
1215 | num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; | |
1216 | ||
1217 | /* Get no of stack pages needed */ | |
1218 | stack_pages = | |
1219 | (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; | |
1220 | ||
1221 | for (qidx = 0; qidx < hw->tx_queues; qidx++) { | |
1222 | pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); | |
1223 | /* Initialize aura context */ | |
1224 | err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); | |
1225 | if (err) | |
1226 | goto fail; | |
1227 | ||
1228 | /* Initialize pool context */ | |
1229 | err = otx2_pool_init(pfvf, pool_id, stack_pages, | |
1230 | num_sqbs, hw->sqb_size); | |
1231 | if (err) | |
1232 | goto fail; | |
1233 | } | |
1234 | ||
1235 | /* Flush accumulated messages */ | |
1236 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1237 | if (err) | |
1238 | goto fail; | |
1239 | ||
1240 | /* Allocate pointers and free them to aura/pool */ | |
1241 | for (qidx = 0; qidx < hw->tx_queues; qidx++) { | |
1242 | pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); | |
1243 | pool = &pfvf->qset.pool[pool_id]; | |
1244 | ||
1245 | sq = &qset->sq[qidx]; | |
1246 | sq->sqb_count = 0; | |
873b807c | 1247 | sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); |
caa2da34 SG |
1248 | if (!sq->sqb_ptrs) |
1249 | return -ENOMEM; | |
1250 | ||
1251 | for (ptr = 0; ptr < num_sqbs; ptr++) { | |
7a36e491 | 1252 | bufptr = otx2_alloc_rbuf(pfvf, pool); |
caa2da34 SG |
1253 | if (bufptr <= 0) |
1254 | return bufptr; | |
1255 | otx2_aura_freeptr(pfvf, pool_id, bufptr); | |
1256 | sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; | |
1257 | } | |
caa2da34 SG |
1258 | } |
1259 | ||
1260 | return 0; | |
1261 | fail: | |
1262 | otx2_mbox_reset(&pfvf->mbox.mbox, 0); | |
1263 | otx2_aura_pool_free(pfvf); | |
1264 | return err; | |
1265 | } | |
1266 | ||
1267 | int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) | |
1268 | { | |
1269 | struct otx2_hw *hw = &pfvf->hw; | |
1270 | int stack_pages, pool_id, rq; | |
1271 | struct otx2_pool *pool; | |
1272 | int err, ptr, num_ptrs; | |
1273 | s64 bufptr; | |
1274 | ||
1275 | num_ptrs = pfvf->qset.rqe_cnt; | |
1276 | ||
1277 | stack_pages = | |
1278 | (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; | |
1279 | ||
1280 | for (rq = 0; rq < hw->rx_queues; rq++) { | |
1281 | pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); | |
1282 | /* Initialize aura context */ | |
1283 | err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); | |
1284 | if (err) | |
1285 | goto fail; | |
1286 | } | |
1287 | for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { | |
1288 | err = otx2_pool_init(pfvf, pool_id, stack_pages, | |
1289 | num_ptrs, pfvf->rbsize); | |
1290 | if (err) | |
1291 | goto fail; | |
1292 | } | |
1293 | ||
1294 | /* Flush accumulated messages */ | |
1295 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1296 | if (err) | |
1297 | goto fail; | |
1298 | ||
1299 | /* Allocate pointers and free them to aura/pool */ | |
1300 | for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { | |
1301 | pool = &pfvf->qset.pool[pool_id]; | |
1302 | for (ptr = 0; ptr < num_ptrs; ptr++) { | |
7a36e491 | 1303 | bufptr = otx2_alloc_rbuf(pfvf, pool); |
caa2da34 SG |
1304 | if (bufptr <= 0) |
1305 | return bufptr; | |
1306 | otx2_aura_freeptr(pfvf, pool_id, | |
1307 | bufptr + OTX2_HEAD_ROOM); | |
1308 | } | |
caa2da34 SG |
1309 | } |
1310 | ||
1311 | return 0; | |
1312 | fail: | |
1313 | otx2_mbox_reset(&pfvf->mbox.mbox, 0); | |
1314 | otx2_aura_pool_free(pfvf); | |
1315 | return err; | |
1316 | } | |
1317 | ||
05fcc9e0 SG |
1318 | int otx2_config_npa(struct otx2_nic *pfvf) |
1319 | { | |
1320 | struct otx2_qset *qset = &pfvf->qset; | |
1321 | struct npa_lf_alloc_req *npalf; | |
1322 | struct otx2_hw *hw = &pfvf->hw; | |
1323 | int aura_cnt; | |
1324 | ||
1325 | /* Pool - Stack of free buffer pointers | |
1326 | * Aura - Alloc/frees pointers from/to pool for NIX DMA. | |
1327 | */ | |
1328 | ||
1329 | if (!hw->pool_cnt) | |
1330 | return -EINVAL; | |
1331 | ||
bf2bcd6f XW |
1332 | qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, |
1333 | sizeof(struct otx2_pool), GFP_KERNEL); | |
05fcc9e0 SG |
1334 | if (!qset->pool) |
1335 | return -ENOMEM; | |
1336 | ||
1337 | /* Get memory to put this msg */ | |
1338 | npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); | |
1339 | if (!npalf) | |
1340 | return -ENOMEM; | |
1341 | ||
1342 | /* Set aura and pool counts */ | |
1343 | npalf->nr_pools = hw->pool_cnt; | |
1344 | aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); | |
1345 | npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; | |
1346 | ||
1347 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
1348 | } | |
1349 | ||
1350 | int otx2_detach_resources(struct mbox *mbox) | |
1351 | { | |
1352 | struct rsrc_detach *detach; | |
1353 | ||
4c3212f5 | 1354 | mutex_lock(&mbox->lock); |
05fcc9e0 SG |
1355 | detach = otx2_mbox_alloc_msg_detach_resources(mbox); |
1356 | if (!detach) { | |
4c3212f5 | 1357 | mutex_unlock(&mbox->lock); |
05fcc9e0 SG |
1358 | return -ENOMEM; |
1359 | } | |
1360 | ||
1361 | /* detach all */ | |
1362 | detach->partial = false; | |
1363 | ||
1364 | /* Send detach request to AF */ | |
1365 | otx2_mbox_msg_send(&mbox->mbox, 0); | |
4c3212f5 | 1366 | mutex_unlock(&mbox->lock); |
05fcc9e0 SG |
1367 | return 0; |
1368 | } | |
3184fb5b | 1369 | EXPORT_SYMBOL(otx2_detach_resources); |
05fcc9e0 SG |
1370 | |
1371 | int otx2_attach_npa_nix(struct otx2_nic *pfvf) | |
1372 | { | |
1373 | struct rsrc_attach *attach; | |
1374 | struct msg_req *msix; | |
1375 | int err; | |
1376 | ||
4c3212f5 | 1377 | mutex_lock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1378 | /* Get memory to put this msg */ |
1379 | attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); | |
1380 | if (!attach) { | |
4c3212f5 | 1381 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1382 | return -ENOMEM; |
1383 | } | |
1384 | ||
1385 | attach->npalf = true; | |
1386 | attach->nixlf = true; | |
1387 | ||
1388 | /* Send attach request to AF */ | |
1389 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1390 | if (err) { | |
4c3212f5 | 1391 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1392 | return err; |
1393 | } | |
1394 | ||
caa2da34 SG |
1395 | pfvf->nix_blkaddr = BLKADDR_NIX0; |
1396 | ||
1397 | /* If the platform has two NIX blocks then LF may be | |
1398 | * allocated from NIX1. | |
1399 | */ | |
1400 | if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) | |
1401 | pfvf->nix_blkaddr = BLKADDR_NIX1; | |
1402 | ||
05fcc9e0 SG |
1403 | /* Get NPA and NIX MSIX vector offsets */ |
1404 | msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); | |
1405 | if (!msix) { | |
4c3212f5 | 1406 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1407 | return -ENOMEM; |
1408 | } | |
1409 | ||
1410 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1411 | if (err) { | |
4c3212f5 | 1412 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1413 | return err; |
1414 | } | |
4c3212f5 | 1415 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1416 | |
1417 | if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || | |
1418 | pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { | |
1419 | dev_err(pfvf->dev, | |
1420 | "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); | |
1421 | return -EINVAL; | |
1422 | } | |
1423 | ||
1424 | return 0; | |
1425 | } | |
3184fb5b | 1426 | EXPORT_SYMBOL(otx2_attach_npa_nix); |
05fcc9e0 | 1427 | |
caa2da34 SG |
1428 | void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) |
1429 | { | |
1430 | struct hwctx_disable_req *req; | |
1431 | ||
4c3212f5 | 1432 | mutex_lock(&mbox->lock); |
caa2da34 SG |
1433 | /* Request AQ to disable this context */ |
1434 | if (npa) | |
1435 | req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); | |
1436 | else | |
1437 | req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); | |
1438 | ||
1439 | if (!req) { | |
4c3212f5 | 1440 | mutex_unlock(&mbox->lock); |
caa2da34 SG |
1441 | return; |
1442 | } | |
1443 | ||
1444 | req->ctype = type; | |
1445 | ||
1446 | if (otx2_sync_mbox_msg(mbox)) | |
1447 | dev_err(mbox->pfvf->dev, "%s failed to disable context\n", | |
1448 | __func__); | |
1449 | ||
4c3212f5 | 1450 | mutex_unlock(&mbox->lock); |
caa2da34 SG |
1451 | } |
1452 | ||
75f36270 G |
1453 | int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) |
1454 | { | |
1455 | struct nix_bp_cfg_req *req; | |
1456 | ||
1457 | if (enable) | |
1458 | req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); | |
1459 | else | |
1460 | req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); | |
1461 | ||
1462 | if (!req) | |
1463 | return -ENOMEM; | |
1464 | ||
1465 | req->chan_base = 0; | |
1466 | req->chan_cnt = 1; | |
1467 | req->bpid_per_chan = 0; | |
1468 | ||
1469 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
1470 | } | |
1471 | ||
d45d8979 CJ |
1472 | /* Mbox message handlers */ |
1473 | void mbox_handler_cgx_stats(struct otx2_nic *pfvf, | |
1474 | struct cgx_stats_rsp *rsp) | |
1475 | { | |
1476 | int id; | |
1477 | ||
1478 | for (id = 0; id < CGX_RX_STATS_COUNT; id++) | |
1479 | pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; | |
1480 | for (id = 0; id < CGX_TX_STATS_COUNT; id++) | |
1481 | pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; | |
1482 | } | |
1483 | ||
caa2da34 SG |
1484 | void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf, |
1485 | struct nix_txsch_alloc_rsp *rsp) | |
1486 | { | |
1487 | int lvl, schq; | |
1488 | ||
1489 | /* Setup transmit scheduler list */ | |
1490 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) | |
1491 | for (schq = 0; schq < rsp->schq[lvl]; schq++) | |
1492 | pf->hw.txschq_list[lvl][schq] = | |
1493 | rsp->schq_list[lvl][schq]; | |
1494 | } | |
3184fb5b | 1495 | EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc); |
caa2da34 | 1496 | |
05fcc9e0 SG |
1497 | void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, |
1498 | struct npa_lf_alloc_rsp *rsp) | |
1499 | { | |
1500 | pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; | |
1501 | pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; | |
1502 | } | |
3184fb5b | 1503 | EXPORT_SYMBOL(mbox_handler_npa_lf_alloc); |
05fcc9e0 SG |
1504 | |
1505 | void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, | |
1506 | struct nix_lf_alloc_rsp *rsp) | |
1507 | { | |
1508 | pfvf->hw.sqb_size = rsp->sqb_size; | |
1509 | pfvf->hw.rx_chan_base = rsp->rx_chan_base; | |
1510 | pfvf->hw.tx_chan_base = rsp->tx_chan_base; | |
86d74760 SG |
1511 | pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; |
1512 | pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; | |
8bcf5ced SS |
1513 | pfvf->hw.cgx_links = rsp->cgx_links; |
1514 | pfvf->hw.lbk_links = rsp->lbk_links; | |
05fcc9e0 | 1515 | } |
3184fb5b | 1516 | EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); |
05fcc9e0 SG |
1517 | |
1518 | void mbox_handler_msix_offset(struct otx2_nic *pfvf, | |
1519 | struct msix_offset_rsp *rsp) | |
1520 | { | |
1521 | pfvf->hw.npa_msixoff = rsp->npa_msixoff; | |
1522 | pfvf->hw.nix_msixoff = rsp->nix_msixoff; | |
1523 | } | |
3184fb5b | 1524 | EXPORT_SYMBOL(mbox_handler_msix_offset); |
5a6d7c9d | 1525 | |
75f36270 G |
1526 | void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, |
1527 | struct nix_bp_cfg_rsp *rsp) | |
1528 | { | |
1529 | int chan, chan_id; | |
1530 | ||
1531 | for (chan = 0; chan < rsp->chan_cnt; chan++) { | |
1532 | chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); | |
1533 | pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; | |
1534 | } | |
1535 | } | |
3184fb5b | 1536 | EXPORT_SYMBOL(mbox_handler_nix_bp_enable); |
75f36270 | 1537 | |
04a21ef3 SG |
1538 | void otx2_free_cints(struct otx2_nic *pfvf, int n) |
1539 | { | |
1540 | struct otx2_qset *qset = &pfvf->qset; | |
1541 | struct otx2_hw *hw = &pfvf->hw; | |
1542 | int irq, qidx; | |
1543 | ||
1544 | for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; | |
1545 | qidx < n; | |
1546 | qidx++, irq++) { | |
1547 | int vector = pci_irq_vector(pfvf->pdev, irq); | |
1548 | ||
1549 | irq_set_affinity_hint(vector, NULL); | |
1550 | free_cpumask_var(hw->affinity_mask[irq]); | |
1551 | free_irq(vector, &qset->napi[qidx]); | |
1552 | } | |
1553 | } | |
1554 | ||
1555 | void otx2_set_cints_affinity(struct otx2_nic *pfvf) | |
1556 | { | |
1557 | struct otx2_hw *hw = &pfvf->hw; | |
1558 | int vec, cpu, irq, cint; | |
1559 | ||
1560 | vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; | |
1561 | cpu = cpumask_first(cpu_online_mask); | |
1562 | ||
1563 | /* CQ interrupts */ | |
1564 | for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { | |
1565 | if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) | |
1566 | return; | |
1567 | ||
1568 | cpumask_set_cpu(cpu, hw->affinity_mask[vec]); | |
1569 | ||
1570 | irq = pci_irq_vector(pfvf->pdev, vec); | |
1571 | irq_set_affinity_hint(irq, hw->affinity_mask[vec]); | |
1572 | ||
1573 | cpu = cpumask_next(cpu, cpu_online_mask); | |
1574 | if (unlikely(cpu >= nr_cpu_ids)) | |
1575 | cpu = 0; | |
1576 | } | |
1577 | } | |
1578 | ||
5a6d7c9d SG |
1579 | #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ |
1580 | int __weak \ | |
1581 | otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ | |
1582 | struct _req_type *req, \ | |
1583 | struct _rsp_type *rsp) \ | |
1584 | { \ | |
1585 | /* Nothing to do here */ \ | |
1586 | return 0; \ | |
1587 | } \ | |
1588 | EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); | |
1589 | MBOX_UP_CGX_MESSAGES | |
1590 | #undef M |