Commit | Line | Data |
---|---|---|
5a6d7c9d | 1 | // SPDX-License-Identifier: GPL-2.0 |
cb0e3ec4 | 2 | /* Marvell RVU Ethernet driver |
5a6d7c9d | 3 | * |
cb0e3ec4 | 4 | * Copyright (C) 2020 Marvell. |
5a6d7c9d | 5 | * |
5a6d7c9d SG |
6 | */ |
7 | ||
8 | #include <linux/interrupt.h> | |
9 | #include <linux/pci.h> | |
a9ca9f9c | 10 | #include <net/page_pool/helpers.h> |
86d74760 | 11 | #include <net/tso.h> |
bbba125e | 12 | #include <linux/bitfield.h> |
5a6d7c9d SG |
13 | |
14 | #include "otx2_reg.h" | |
15 | #include "otx2_common.h" | |
05fcc9e0 | 16 | #include "otx2_struct.h" |
4c236d5d | 17 | #include "cn10k.h" |
05fcc9e0 | 18 | |
d45d8979 CJ |
19 | static void otx2_nix_rq_op_stats(struct queue_stats *stats, |
20 | struct otx2_nic *pfvf, int qidx) | |
21 | { | |
22 | u64 incr = (u64)qidx << 32; | |
23 | u64 *ptr; | |
24 | ||
25 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS); | |
26 | stats->bytes = otx2_atomic64_add(incr, ptr); | |
27 | ||
28 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS); | |
29 | stats->pkts = otx2_atomic64_add(incr, ptr); | |
30 | } | |
31 | ||
32 | static void otx2_nix_sq_op_stats(struct queue_stats *stats, | |
33 | struct otx2_nic *pfvf, int qidx) | |
34 | { | |
35 | u64 incr = (u64)qidx << 32; | |
36 | u64 *ptr; | |
37 | ||
38 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS); | |
39 | stats->bytes = otx2_atomic64_add(incr, ptr); | |
40 | ||
41 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS); | |
42 | stats->pkts = otx2_atomic64_add(incr, ptr); | |
43 | } | |
44 | ||
45 | void otx2_update_lmac_stats(struct otx2_nic *pfvf) | |
46 | { | |
47 | struct msg_req *req; | |
48 | ||
49 | if (!netif_running(pfvf->netdev)) | |
50 | return; | |
51 | ||
4c3212f5 | 52 | mutex_lock(&pfvf->mbox.lock); |
d45d8979 CJ |
53 | req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox); |
54 | if (!req) { | |
4c3212f5 | 55 | mutex_unlock(&pfvf->mbox.lock); |
d45d8979 CJ |
56 | return; |
57 | } | |
58 | ||
59 | otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 60 | mutex_unlock(&pfvf->mbox.lock); |
d45d8979 CJ |
61 | } |
62 | ||
d0cf9503 CJ |
63 | void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf) |
64 | { | |
65 | struct msg_req *req; | |
66 | ||
67 | if (!netif_running(pfvf->netdev)) | |
68 | return; | |
69 | mutex_lock(&pfvf->mbox.lock); | |
70 | req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox); | |
71 | if (req) | |
72 | otx2_sync_mbox_msg(&pfvf->mbox); | |
73 | mutex_unlock(&pfvf->mbox.lock); | |
74 | } | |
75 | ||
d45d8979 CJ |
76 | int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx) |
77 | { | |
78 | struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx]; | |
79 | ||
80 | if (!pfvf->qset.rq) | |
81 | return 0; | |
82 | ||
83 | otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx); | |
84 | return 1; | |
85 | } | |
86 | ||
87 | int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx) | |
88 | { | |
89 | struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx]; | |
90 | ||
91 | if (!pfvf->qset.sq) | |
92 | return 0; | |
93 | ||
5e6808b4 NM |
94 | if (qidx >= pfvf->hw.non_qos_queues) { |
95 | if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap)) | |
96 | return 0; | |
97 | } | |
98 | ||
d45d8979 CJ |
99 | otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx); |
100 | return 1; | |
101 | } | |
102 | ||
e239d0c7 G |
103 | void otx2_get_dev_stats(struct otx2_nic *pfvf) |
104 | { | |
105 | struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats; | |
106 | ||
e239d0c7 G |
107 | dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS); |
108 | dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP); | |
109 | dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST); | |
110 | dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST); | |
111 | dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST); | |
112 | dev_stats->rx_frames = dev_stats->rx_bcast_frames + | |
113 | dev_stats->rx_mcast_frames + | |
114 | dev_stats->rx_ucast_frames; | |
115 | ||
116 | dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS); | |
117 | dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP); | |
118 | dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST); | |
119 | dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST); | |
120 | dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST); | |
121 | dev_stats->tx_frames = dev_stats->tx_bcast_frames + | |
122 | dev_stats->tx_mcast_frames + | |
123 | dev_stats->tx_ucast_frames; | |
124 | } | |
125 | ||
126 | void otx2_get_stats64(struct net_device *netdev, | |
127 | struct rtnl_link_stats64 *stats) | |
128 | { | |
129 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
130 | struct otx2_dev_stats *dev_stats; | |
131 | ||
132 | otx2_get_dev_stats(pfvf); | |
133 | ||
134 | dev_stats = &pfvf->hw.dev_stats; | |
135 | stats->rx_bytes = dev_stats->rx_bytes; | |
136 | stats->rx_packets = dev_stats->rx_frames; | |
137 | stats->rx_dropped = dev_stats->rx_drops; | |
138 | stats->multicast = dev_stats->rx_mcast_frames; | |
139 | ||
140 | stats->tx_bytes = dev_stats->tx_bytes; | |
141 | stats->tx_packets = dev_stats->tx_frames; | |
142 | stats->tx_dropped = dev_stats->tx_drops; | |
143 | } | |
3184fb5b | 144 | EXPORT_SYMBOL(otx2_get_stats64); |
e239d0c7 | 145 | |
34bfe0eb SG |
146 | /* Sync MAC address with RVU AF */ |
147 | static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac) | |
148 | { | |
149 | struct nix_set_mac_addr *req; | |
150 | int err; | |
151 | ||
4c3212f5 | 152 | mutex_lock(&pfvf->mbox.lock); |
34bfe0eb SG |
153 | req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox); |
154 | if (!req) { | |
4c3212f5 | 155 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
156 | return -ENOMEM; |
157 | } | |
158 | ||
159 | ether_addr_copy(req->mac_addr, mac); | |
160 | ||
161 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
4c3212f5 | 162 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
163 | return err; |
164 | } | |
165 | ||
166 | static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf, | |
167 | struct net_device *netdev) | |
168 | { | |
169 | struct nix_get_mac_addr_rsp *rsp; | |
170 | struct mbox_msghdr *msghdr; | |
171 | struct msg_req *req; | |
172 | int err; | |
173 | ||
4c3212f5 | 174 | mutex_lock(&pfvf->mbox.lock); |
34bfe0eb SG |
175 | req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox); |
176 | if (!req) { | |
4c3212f5 | 177 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
178 | return -ENOMEM; |
179 | } | |
180 | ||
181 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
182 | if (err) { | |
4c3212f5 | 183 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
184 | return err; |
185 | } | |
186 | ||
187 | msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); | |
08ff7818 | 188 | if (IS_ERR(msghdr)) { |
4c3212f5 | 189 | mutex_unlock(&pfvf->mbox.lock); |
08ff7818 | 190 | return PTR_ERR(msghdr); |
34bfe0eb SG |
191 | } |
192 | rsp = (struct nix_get_mac_addr_rsp *)msghdr; | |
f3956ebb | 193 | eth_hw_addr_set(netdev, rsp->mac_addr); |
4c3212f5 | 194 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
195 | |
196 | return 0; | |
197 | } | |
198 | ||
199 | int otx2_set_mac_address(struct net_device *netdev, void *p) | |
200 | { | |
201 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
202 | struct sockaddr *addr = p; | |
203 | ||
204 | if (!is_valid_ether_addr(addr->sa_data)) | |
205 | return -EADDRNOTAVAIL; | |
206 | ||
fd9d7859 | 207 | if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { |
a05e4c0a | 208 | eth_hw_addr_set(netdev, addr->sa_data); |
fd9d7859 | 209 | /* update dmac field in vlan offload rule */ |
05209e35 SG |
210 | if (netif_running(netdev) && |
211 | pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) | |
fd9d7859 | 212 | otx2_install_rxvlan_offload_flow(pfvf); |
79d2be38 HK |
213 | /* update dmac address in ntuple and DMAC filter list */ |
214 | if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) | |
215 | otx2_dmacflt_update_pfmac_flow(pfvf); | |
fd9d7859 | 216 | } else { |
34bfe0eb | 217 | return -EPERM; |
fd9d7859 | 218 | } |
34bfe0eb SG |
219 | |
220 | return 0; | |
221 | } | |
3184fb5b | 222 | EXPORT_SYMBOL(otx2_set_mac_address); |
34bfe0eb SG |
223 | |
224 | int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu) | |
225 | { | |
226 | struct nix_frs_cfg *req; | |
a989eb66 | 227 | u16 maxlen; |
34bfe0eb SG |
228 | int err; |
229 | ||
a989eb66 SS |
230 | maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; |
231 | ||
4c3212f5 | 232 | mutex_lock(&pfvf->mbox.lock); |
34bfe0eb SG |
233 | req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox); |
234 | if (!req) { | |
4c3212f5 | 235 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
236 | return -ENOMEM; |
237 | } | |
238 | ||
0182d078 | 239 | req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN; |
34bfe0eb | 240 | |
a989eb66 SS |
241 | /* Use max receive length supported by hardware for loopback devices */ |
242 | if (is_otx2_lbkvf(pfvf->pdev)) | |
243 | req->maxlen = maxlen; | |
244 | ||
34bfe0eb | 245 | err = otx2_sync_mbox_msg(&pfvf->mbox); |
4c3212f5 | 246 | mutex_unlock(&pfvf->mbox.lock); |
34bfe0eb SG |
247 | return err; |
248 | } | |
249 | ||
75f36270 G |
250 | int otx2_config_pause_frm(struct otx2_nic *pfvf) |
251 | { | |
252 | struct cgx_pause_frm_cfg *req; | |
253 | int err; | |
254 | ||
3184fb5b TD |
255 | if (is_otx2_lbkvf(pfvf->pdev)) |
256 | return 0; | |
257 | ||
4c3212f5 | 258 | mutex_lock(&pfvf->mbox.lock); |
75f36270 | 259 | req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox); |
8a765471 DC |
260 | if (!req) { |
261 | err = -ENOMEM; | |
262 | goto unlock; | |
263 | } | |
75f36270 G |
264 | |
265 | req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED); | |
266 | req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED); | |
267 | req->set = 1; | |
268 | ||
269 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
8a765471 | 270 | unlock: |
4c3212f5 | 271 | mutex_unlock(&pfvf->mbox.lock); |
75f36270 G |
272 | return err; |
273 | } | |
d957b51f | 274 | EXPORT_SYMBOL(otx2_config_pause_frm); |
75f36270 | 275 | |
6e92d71b | 276 | int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) |
85069e95 SG |
277 | { |
278 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
e7938365 | 279 | struct nix_rss_flowkey_cfg_rsp *rsp; |
85069e95 SG |
280 | struct nix_rss_flowkey_cfg *req; |
281 | int err; | |
282 | ||
4c3212f5 | 283 | mutex_lock(&pfvf->mbox.lock); |
85069e95 SG |
284 | req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox); |
285 | if (!req) { | |
4c3212f5 | 286 | mutex_unlock(&pfvf->mbox.lock); |
85069e95 SG |
287 | return -ENOMEM; |
288 | } | |
289 | req->mcam_index = -1; /* Default or reserved index */ | |
290 | req->flowkey_cfg = rss->flowkey_cfg; | |
291 | req->group = DEFAULT_RSS_CONTEXT_GROUP; | |
292 | ||
293 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
e7938365 SG |
294 | if (err) |
295 | goto fail; | |
296 | ||
297 | rsp = (struct nix_rss_flowkey_cfg_rsp *) | |
298 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); | |
5e8243e6 YY |
299 | if (IS_ERR(rsp)) { |
300 | err = PTR_ERR(rsp); | |
e7938365 | 301 | goto fail; |
5e8243e6 | 302 | } |
e7938365 SG |
303 | |
304 | pfvf->hw.flowkey_alg_idx = rsp->alg_idx; | |
305 | fail: | |
4c3212f5 | 306 | mutex_unlock(&pfvf->mbox.lock); |
85069e95 SG |
307 | return err; |
308 | } | |
309 | ||
81a43620 | 310 | int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) |
85069e95 SG |
311 | { |
312 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
81a43620 | 313 | const int index = rss->rss_size * ctx_id; |
85069e95 | 314 | struct mbox *mbox = &pfvf->mbox; |
81a43620 | 315 | struct otx2_rss_ctx *rss_ctx; |
85069e95 SG |
316 | struct nix_aq_enq_req *aq; |
317 | int idx, err; | |
318 | ||
4c3212f5 | 319 | mutex_lock(&mbox->lock); |
81a43620 | 320 | rss_ctx = rss->rss_ctx[ctx_id]; |
85069e95 SG |
321 | /* Get memory to put this msg */ |
322 | for (idx = 0; idx < rss->rss_size; idx++) { | |
323 | aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); | |
324 | if (!aq) { | |
325 | /* The shared memory buffer can be full. | |
326 | * Flush it and retry | |
327 | */ | |
328 | err = otx2_sync_mbox_msg(mbox); | |
329 | if (err) { | |
4c3212f5 | 330 | mutex_unlock(&mbox->lock); |
85069e95 SG |
331 | return err; |
332 | } | |
333 | aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); | |
334 | if (!aq) { | |
4c3212f5 | 335 | mutex_unlock(&mbox->lock); |
85069e95 SG |
336 | return -ENOMEM; |
337 | } | |
338 | } | |
339 | ||
81a43620 | 340 | aq->rss.rq = rss_ctx->ind_tbl[idx]; |
85069e95 SG |
341 | |
342 | /* Fill AQ info */ | |
81a43620 | 343 | aq->qidx = index + idx; |
85069e95 SG |
344 | aq->ctype = NIX_AQ_CTYPE_RSS; |
345 | aq->op = NIX_AQ_INSTOP_INIT; | |
346 | } | |
347 | err = otx2_sync_mbox_msg(mbox); | |
4c3212f5 | 348 | mutex_unlock(&mbox->lock); |
85069e95 SG |
349 | return err; |
350 | } | |
351 | ||
6e92d71b | 352 | void otx2_set_rss_key(struct otx2_nic *pfvf) |
85069e95 SG |
353 | { |
354 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
355 | u64 *key = (u64 *)&rss->key[4]; | |
356 | int idx; | |
357 | ||
358 | /* 352bit or 44byte key needs to be configured as below | |
359 | * NIX_LF_RX_SECRETX0 = key<351:288> | |
360 | * NIX_LF_RX_SECRETX1 = key<287:224> | |
361 | * NIX_LF_RX_SECRETX2 = key<223:160> | |
362 | * NIX_LF_RX_SECRETX3 = key<159:96> | |
363 | * NIX_LF_RX_SECRETX4 = key<95:32> | |
364 | * NIX_LF_RX_SECRETX5<63:32> = key<31:0> | |
365 | */ | |
366 | otx2_write64(pfvf, NIX_LF_RX_SECRETX(5), | |
367 | (u64)(*((u32 *)&rss->key)) << 32); | |
368 | idx = sizeof(rss->key) / sizeof(u64); | |
369 | while (idx > 0) { | |
370 | idx--; | |
371 | otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++); | |
372 | } | |
373 | } | |
374 | ||
375 | int otx2_rss_init(struct otx2_nic *pfvf) | |
376 | { | |
377 | struct otx2_rss_info *rss = &pfvf->hw.rss_info; | |
81a43620 | 378 | struct otx2_rss_ctx *rss_ctx; |
85069e95 SG |
379 | int idx, ret = 0; |
380 | ||
81a43620 | 381 | rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]); |
85069e95 SG |
382 | |
383 | /* Init RSS key if it is not setup already */ | |
384 | if (!rss->enable) | |
385 | netdev_rss_key_fill(rss->key, sizeof(rss->key)); | |
386 | otx2_set_rss_key(pfvf); | |
387 | ||
388 | if (!netif_is_rxfh_configured(pfvf->netdev)) { | |
81a43620 G |
389 | /* Set RSS group 0 as default indirection table */ |
390 | rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size, | |
391 | GFP_KERNEL); | |
392 | if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]) | |
393 | return -ENOMEM; | |
394 | ||
395 | rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]; | |
85069e95 | 396 | for (idx = 0; idx < rss->rss_size; idx++) |
81a43620 | 397 | rss_ctx->ind_tbl[idx] = |
85069e95 SG |
398 | ethtool_rxfh_indir_default(idx, |
399 | pfvf->hw.rx_queues); | |
400 | } | |
81a43620 | 401 | ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP); |
85069e95 SG |
402 | if (ret) |
403 | return ret; | |
404 | ||
405 | /* Flowkey or hash config to be used for generating flow tag */ | |
406 | rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg : | |
407 | NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 | | |
408 | NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP | | |
f9e425e9 GC |
409 | NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN | |
410 | NIX_FLOW_KEY_TYPE_IPV4_PROTO; | |
85069e95 SG |
411 | |
412 | ret = otx2_set_flowkey_cfg(pfvf); | |
413 | if (ret) | |
414 | return ret; | |
415 | ||
416 | rss->enable = true; | |
417 | return 0; | |
418 | } | |
419 | ||
dc1a9bf2 SG |
420 | /* Setup UDP segmentation algorithm in HW */ |
421 | static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4) | |
422 | { | |
423 | struct nix_lso_format *field; | |
424 | ||
425 | field = (struct nix_lso_format *)&lso->fields[0]; | |
426 | lso->field_mask = GENMASK(18, 0); | |
427 | ||
428 | /* IP's Length field */ | |
429 | field->layer = NIX_TXLAYER_OL3; | |
430 | /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */ | |
431 | field->offset = v4 ? 2 : 4; | |
432 | field->sizem1 = 1; /* i.e 2 bytes */ | |
433 | field->alg = NIX_LSOALG_ADD_PAYLEN; | |
434 | field++; | |
435 | ||
436 | /* No ID field in IPv6 header */ | |
437 | if (v4) { | |
438 | /* Increment IPID */ | |
439 | field->layer = NIX_TXLAYER_OL3; | |
440 | field->offset = 4; | |
441 | field->sizem1 = 1; /* i.e 2 bytes */ | |
442 | field->alg = NIX_LSOALG_ADD_SEGNUM; | |
443 | field++; | |
444 | } | |
445 | ||
446 | /* Update length in UDP header */ | |
447 | field->layer = NIX_TXLAYER_OL4; | |
448 | field->offset = 4; | |
449 | field->sizem1 = 1; | |
450 | field->alg = NIX_LSOALG_ADD_PAYLEN; | |
451 | } | |
452 | ||
453 | /* Setup segmentation algorithms in HW and retrieve algorithm index */ | |
454 | void otx2_setup_segmentation(struct otx2_nic *pfvf) | |
455 | { | |
456 | struct nix_lso_format_cfg_rsp *rsp; | |
457 | struct nix_lso_format_cfg *lso; | |
458 | struct otx2_hw *hw = &pfvf->hw; | |
459 | int err; | |
460 | ||
461 | mutex_lock(&pfvf->mbox.lock); | |
462 | ||
463 | /* UDPv4 segmentation */ | |
464 | lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); | |
465 | if (!lso) | |
466 | goto fail; | |
467 | ||
468 | /* Setup UDP/IP header fields that HW should update per segment */ | |
469 | otx2_setup_udp_segmentation(lso, true); | |
470 | ||
471 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
472 | if (err) | |
473 | goto fail; | |
474 | ||
475 | rsp = (struct nix_lso_format_cfg_rsp *) | |
476 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); | |
477 | if (IS_ERR(rsp)) | |
478 | goto fail; | |
479 | ||
480 | hw->lso_udpv4_idx = rsp->lso_format_idx; | |
481 | ||
482 | /* UDPv6 segmentation */ | |
483 | lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox); | |
484 | if (!lso) | |
485 | goto fail; | |
486 | ||
487 | /* Setup UDP/IP header fields that HW should update per segment */ | |
488 | otx2_setup_udp_segmentation(lso, false); | |
489 | ||
490 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
491 | if (err) | |
492 | goto fail; | |
493 | ||
494 | rsp = (struct nix_lso_format_cfg_rsp *) | |
495 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr); | |
496 | if (IS_ERR(rsp)) | |
497 | goto fail; | |
498 | ||
499 | hw->lso_udpv6_idx = rsp->lso_format_idx; | |
500 | mutex_unlock(&pfvf->mbox.lock); | |
501 | return; | |
502 | fail: | |
503 | mutex_unlock(&pfvf->mbox.lock); | |
504 | netdev_info(pfvf->netdev, | |
505 | "Failed to get LSO index for UDP GSO offload, disabling\n"); | |
506 | pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4; | |
507 | } | |
508 | ||
04a21ef3 SG |
509 | void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) |
510 | { | |
511 | /* Configure CQE interrupt coalescing parameters | |
512 | * | |
513 | * HW triggers an irq when ECOUNT > cq_ecount_wait, hence | |
514 | * set 1 less than cq_ecount_wait. And cq_time_wait is in | |
515 | * usecs, convert that to 100ns count. | |
516 | */ | |
517 | otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx), | |
518 | ((u64)(pfvf->hw.cq_time_wait * 10) << 48) | | |
519 | ((u64)pfvf->hw.cq_qcount_wait << 32) | | |
520 | (pfvf->hw.cq_ecount_wait - 1)); | |
521 | } | |
522 | ||
b2e3406a RK |
523 | static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, |
524 | dma_addr_t *dma) | |
525 | { | |
526 | unsigned int offset = 0; | |
527 | struct page *page; | |
528 | size_t sz; | |
529 | ||
530 | sz = SKB_DATA_ALIGN(pool->rbsize); | |
531 | sz = ALIGN(sz, OTX2_ALIGN); | |
532 | ||
533 | page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); | |
534 | if (unlikely(!page)) | |
535 | return -ENOMEM; | |
536 | ||
537 | *dma = page_pool_get_dma_addr(page) + offset; | |
538 | return 0; | |
539 | } | |
540 | ||
ab6dddd2 SS |
541 | static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, |
542 | dma_addr_t *dma) | |
caa2da34 | 543 | { |
7a36e491 | 544 | u8 *buf; |
caa2da34 | 545 | |
b2e3406a RK |
546 | if (pool->page_pool) |
547 | return otx2_alloc_pool_buf(pfvf, pool, dma); | |
548 | ||
1b041601 | 549 | buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); |
7a36e491 | 550 | if (unlikely(!buf)) |
caa2da34 SG |
551 | return -ENOMEM; |
552 | ||
1fb3ca76 | 553 | *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, |
7a36e491 | 554 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); |
1fb3ca76 | 555 | if (unlikely(dma_mapping_error(pfvf->dev, *dma))) { |
7a36e491 | 556 | page_frag_free(buf); |
caa2da34 SG |
557 | return -ENOMEM; |
558 | } | |
7a36e491 | 559 | |
1fb3ca76 | 560 | return 0; |
caa2da34 SG |
561 | } |
562 | ||
ab6dddd2 SS |
563 | int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, |
564 | dma_addr_t *dma) | |
7a36e491 | 565 | { |
1fb3ca76 | 566 | int ret; |
7a36e491 KH |
567 | |
568 | local_bh_disable(); | |
1fb3ca76 | 569 | ret = __otx2_alloc_rbuf(pfvf, pool, dma); |
7a36e491 | 570 | local_bh_enable(); |
1fb3ca76 | 571 | return ret; |
7a36e491 KH |
572 | } |
573 | ||
4c236d5d G |
574 | int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, |
575 | dma_addr_t *dma) | |
576 | { | |
88e69af0 | 577 | if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) |
4c236d5d | 578 | return -ENOMEM; |
4c236d5d G |
579 | return 0; |
580 | } | |
581 | ||
4ff7d148 G |
582 | void otx2_tx_timeout(struct net_device *netdev, unsigned int txq) |
583 | { | |
584 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
585 | ||
586 | schedule_work(&pfvf->reset_task); | |
587 | } | |
3184fb5b | 588 | EXPORT_SYMBOL(otx2_tx_timeout); |
4ff7d148 | 589 | |
34bfe0eb SG |
590 | void otx2_get_mac_from_af(struct net_device *netdev) |
591 | { | |
592 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
593 | int err; | |
594 | ||
595 | err = otx2_hw_get_mac_addr(pfvf, netdev); | |
596 | if (err) | |
597 | dev_warn(pfvf->dev, "Failed to read mac from hardware\n"); | |
598 | ||
599 | /* If AF doesn't provide a valid MAC, generate a random one */ | |
600 | if (!is_valid_ether_addr(netdev->dev_addr)) | |
601 | eth_hw_addr_random(netdev); | |
602 | } | |
3184fb5b | 603 | EXPORT_SYMBOL(otx2_get_mac_from_af); |
34bfe0eb | 604 | |
99c969a8 | 605 | int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc) |
caa2da34 | 606 | { |
99c969a8 | 607 | u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC]; |
caa2da34 SG |
608 | struct otx2_hw *hw = &pfvf->hw; |
609 | struct nix_txschq_config *req; | |
610 | u64 schq, parent; | |
c39830a4 SG |
611 | u64 dwrr_val; |
612 | ||
0182d078 | 613 | dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); |
caa2da34 SG |
614 | |
615 | req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); | |
616 | if (!req) | |
617 | return -ENOMEM; | |
618 | ||
619 | req->lvl = lvl; | |
620 | req->num_regs = 1; | |
621 | ||
99c969a8 SG |
622 | schq_list = hw->txschq_list; |
623 | #ifdef CONFIG_DCB | |
624 | if (txschq_for_pfc) | |
625 | schq_list = pfvf->pfc_schq_list; | |
626 | #endif | |
627 | ||
628 | schq = schq_list[lvl][prio]; | |
caa2da34 SG |
629 | /* Set topology e.t.c configuration */ |
630 | if (lvl == NIX_TXSCH_LVL_SMQ) { | |
631 | req->reg[0] = NIX_AF_SMQX_CFG(schq); | |
0182d078 | 632 | req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU; |
caa2da34 SG |
633 | req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) | |
634 | (0x2ULL << 36); | |
bbba125e SG |
635 | /* Set link type for DWRR MTU selection on CN10K silicons */ |
636 | if (!is_dev_otx2(pfvf->pdev)) | |
637 | req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57), | |
638 | (u64)hw->smq_link_type); | |
caa2da34 SG |
639 | req->num_regs++; |
640 | /* MDQ config */ | |
99c969a8 | 641 | parent = schq_list[NIX_TXSCH_LVL_TL4][prio]; |
caa2da34 SG |
642 | req->reg[1] = NIX_AF_MDQX_PARENT(schq); |
643 | req->regval[1] = parent << 16; | |
644 | req->num_regs++; | |
645 | /* Set DWRR quantum */ | |
646 | req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); | |
c39830a4 | 647 | req->regval[2] = dwrr_val; |
caa2da34 | 648 | } else if (lvl == NIX_TXSCH_LVL_TL4) { |
99c969a8 | 649 | parent = schq_list[NIX_TXSCH_LVL_TL3][prio]; |
caa2da34 SG |
650 | req->reg[0] = NIX_AF_TL4X_PARENT(schq); |
651 | req->regval[0] = parent << 16; | |
652 | req->num_regs++; | |
653 | req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); | |
c39830a4 | 654 | req->regval[1] = dwrr_val; |
caa2da34 | 655 | } else if (lvl == NIX_TXSCH_LVL_TL3) { |
99c969a8 | 656 | parent = schq_list[NIX_TXSCH_LVL_TL2][prio]; |
caa2da34 SG |
657 | req->reg[0] = NIX_AF_TL3X_PARENT(schq); |
658 | req->regval[0] = parent << 16; | |
659 | req->num_regs++; | |
660 | req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); | |
c39830a4 | 661 | req->regval[1] = dwrr_val; |
13c9f4dc NM |
662 | if (lvl == hw->txschq_link_cfg_lvl) { |
663 | req->num_regs++; | |
664 | req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); | |
99c969a8 SG |
665 | /* Enable this queue and backpressure |
666 | * and set relative channel | |
667 | */ | |
668 | req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; | |
13c9f4dc | 669 | } |
caa2da34 | 670 | } else if (lvl == NIX_TXSCH_LVL_TL2) { |
99c969a8 | 671 | parent = schq_list[NIX_TXSCH_LVL_TL1][prio]; |
caa2da34 SG |
672 | req->reg[0] = NIX_AF_TL2X_PARENT(schq); |
673 | req->regval[0] = parent << 16; | |
674 | ||
675 | req->num_regs++; | |
676 | req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); | |
c39830a4 | 677 | req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; |
caa2da34 | 678 | |
13c9f4dc NM |
679 | if (lvl == hw->txschq_link_cfg_lvl) { |
680 | req->num_regs++; | |
681 | req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); | |
99c969a8 SG |
682 | /* Enable this queue and backpressure |
683 | * and set relative channel | |
684 | */ | |
685 | req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio; | |
13c9f4dc | 686 | } |
caa2da34 SG |
687 | } else if (lvl == NIX_TXSCH_LVL_TL1) { |
688 | /* Default config for TL1. | |
689 | * For VF this is always ignored. | |
690 | */ | |
691 | ||
c39830a4 SG |
692 | /* On CN10K, if RR_WEIGHT is greater than 16384, HW will |
693 | * clip it to 16384, so configuring a 24bit max value | |
694 | * will work on both OTx2 and CN10K. | |
695 | */ | |
caa2da34 SG |
696 | req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); |
697 | req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; | |
698 | ||
699 | req->num_regs++; | |
700 | req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq); | |
701 | req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1); | |
702 | ||
703 | req->num_regs++; | |
704 | req->reg[2] = NIX_AF_TL1X_CIR(schq); | |
705 | req->regval[2] = 0; | |
706 | } | |
707 | ||
708 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
709 | } | |
99c969a8 SG |
710 | EXPORT_SYMBOL(otx2_txschq_config); |
711 | ||
712 | int otx2_smq_flush(struct otx2_nic *pfvf, int smq) | |
713 | { | |
714 | struct nix_txschq_config *req; | |
715 | int rc; | |
716 | ||
717 | mutex_lock(&pfvf->mbox.lock); | |
718 | ||
719 | req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); | |
720 | if (!req) { | |
721 | mutex_unlock(&pfvf->mbox.lock); | |
722 | return -ENOMEM; | |
723 | } | |
724 | ||
725 | req->lvl = NIX_TXSCH_LVL_SMQ; | |
726 | req->reg[0] = NIX_AF_SMQX_CFG(smq); | |
727 | req->regval[0] |= BIT_ULL(49); | |
728 | req->num_regs++; | |
729 | ||
730 | rc = otx2_sync_mbox_msg(&pfvf->mbox); | |
731 | mutex_unlock(&pfvf->mbox.lock); | |
732 | return rc; | |
733 | } | |
734 | EXPORT_SYMBOL(otx2_smq_flush); | |
caa2da34 SG |
735 | |
736 | int otx2_txsch_alloc(struct otx2_nic *pfvf) | |
737 | { | |
738 | struct nix_txsch_alloc_req *req; | |
6b4b2ded HK |
739 | struct nix_txsch_alloc_rsp *rsp; |
740 | int lvl, schq, rc; | |
caa2da34 SG |
741 | |
742 | /* Get memory to put this msg */ | |
743 | req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox); | |
744 | if (!req) | |
745 | return -ENOMEM; | |
746 | ||
747 | /* Request one schq per level */ | |
748 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) | |
749 | req->schq[lvl] = 1; | |
6b4b2ded HK |
750 | rc = otx2_sync_mbox_msg(&pfvf->mbox); |
751 | if (rc) | |
752 | return rc; | |
caa2da34 | 753 | |
6b4b2ded HK |
754 | rsp = (struct nix_txsch_alloc_rsp *) |
755 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); | |
756 | if (IS_ERR(rsp)) | |
757 | return PTR_ERR(rsp); | |
758 | ||
759 | /* Setup transmit scheduler list */ | |
760 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) | |
761 | for (schq = 0; schq < rsp->schq[lvl]; schq++) | |
762 | pfvf->hw.txschq_list[lvl][schq] = | |
763 | rsp->schq_list[lvl][schq]; | |
764 | ||
765 | pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl; | |
47a9656f | 766 | pfvf->hw.txschq_aggr_lvl_rr_prio = rsp->aggr_lvl_rr_prio; |
6b4b2ded HK |
767 | |
768 | return 0; | |
caa2da34 SG |
769 | } |
770 | ||
6b4b2ded | 771 | void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq) |
caa2da34 SG |
772 | { |
773 | struct nix_txsch_free_req *free_req; | |
6b4b2ded | 774 | int err; |
caa2da34 | 775 | |
4c3212f5 | 776 | mutex_lock(&pfvf->mbox.lock); |
6b4b2ded | 777 | |
caa2da34 SG |
778 | free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox); |
779 | if (!free_req) { | |
4c3212f5 | 780 | mutex_unlock(&pfvf->mbox.lock); |
6b4b2ded HK |
781 | netdev_err(pfvf->netdev, |
782 | "Failed alloc txschq free req\n"); | |
783 | return; | |
caa2da34 SG |
784 | } |
785 | ||
6b4b2ded HK |
786 | free_req->schq_lvl = lvl; |
787 | free_req->schq = schq; | |
788 | ||
caa2da34 | 789 | err = otx2_sync_mbox_msg(&pfvf->mbox); |
6b4b2ded HK |
790 | if (err) { |
791 | netdev_err(pfvf->netdev, | |
792 | "Failed stop txschq %d at level %d\n", schq, lvl); | |
793 | } | |
794 | ||
4c3212f5 | 795 | mutex_unlock(&pfvf->mbox.lock); |
6b4b2ded | 796 | } |
a9ac2e18 | 797 | EXPORT_SYMBOL(otx2_txschq_free_one); |
6b4b2ded HK |
798 | |
799 | void otx2_txschq_stop(struct otx2_nic *pfvf) | |
800 | { | |
801 | int lvl, schq; | |
802 | ||
803 | /* free non QOS TLx nodes */ | |
804 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) | |
805 | otx2_txschq_free_one(pfvf, lvl, | |
806 | pfvf->hw.txschq_list[lvl][0]); | |
caa2da34 SG |
807 | |
808 | /* Clear the txschq list */ | |
809 | for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { | |
810 | for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) | |
811 | pfvf->hw.txschq_list[lvl][schq] = 0; | |
812 | } | |
6b4b2ded | 813 | |
caa2da34 SG |
814 | } |
815 | ||
816 | void otx2_sqb_flush(struct otx2_nic *pfvf) | |
817 | { | |
818 | int qidx, sqe_tail, sqe_head; | |
ab6dddd2 | 819 | struct otx2_snd_queue *sq; |
caa2da34 | 820 | u64 incr, *ptr, val; |
b1bc8457 | 821 | int timeout = 1000; |
caa2da34 SG |
822 | |
823 | ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS); | |
ab6dddd2 SS |
824 | for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { |
825 | sq = &pfvf->qset.sq[qidx]; | |
826 | if (!sq->sqb_ptrs) | |
827 | continue; | |
828 | ||
caa2da34 | 829 | incr = (u64)qidx << 32; |
b1bc8457 | 830 | while (timeout) { |
caa2da34 SG |
831 | val = otx2_atomic64_add(incr, ptr); |
832 | sqe_head = (val >> 20) & 0x3F; | |
833 | sqe_tail = (val >> 28) & 0x3F; | |
834 | if (sqe_head == sqe_tail) | |
835 | break; | |
836 | usleep_range(1, 3); | |
b1bc8457 | 837 | timeout--; |
caa2da34 SG |
838 | } |
839 | } | |
840 | } | |
841 | ||
842 | /* RED and drop levels of CQ on packet reception. | |
843 | * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty). | |
844 | */ | |
845 | #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize)) | |
846 | #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize)) | |
847 | ||
848 | /* RED and drop levels of AURA for packet reception. | |
849 | * For AURA level is measure of fullness (0x0 = empty, 255 = full). | |
850 | * Eg: For RQ length 1K, for pass/drop level 204/230. | |
851 | * RED accepts pkts if free pointers > 102 & <= 205. | |
852 | * Drops pkts if free pointers < 102. | |
853 | */ | |
75f36270 | 854 | #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */ |
caa2da34 SG |
855 | #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ |
856 | #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ | |
857 | ||
caa2da34 SG |
858 | static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) |
859 | { | |
860 | struct otx2_qset *qset = &pfvf->qset; | |
861 | struct nix_aq_enq_req *aq; | |
862 | ||
863 | /* Get memory to put this msg */ | |
864 | aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); | |
865 | if (!aq) | |
866 | return -ENOMEM; | |
867 | ||
868 | aq->rq.cq = qidx; | |
869 | aq->rq.ena = 1; | |
870 | aq->rq.pb_caching = 1; | |
871 | aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */ | |
872 | aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1; | |
873 | aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */ | |
874 | aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */ | |
4ff7d148 | 875 | aq->rq.qint_idx = 0; |
caa2da34 SG |
876 | aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */ |
877 | aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */ | |
878 | aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); | |
879 | aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); | |
880 | aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA; | |
881 | aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA; | |
882 | ||
883 | /* Fill AQ info */ | |
884 | aq->qidx = qidx; | |
885 | aq->ctype = NIX_AQ_CTYPE_RQ; | |
886 | aq->op = NIX_AQ_INSTOP_INIT; | |
887 | ||
888 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
889 | } | |
890 | ||
4c236d5d | 891 | int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) |
d21a8575 | 892 | { |
4c236d5d G |
893 | struct otx2_nic *pfvf = dev; |
894 | struct otx2_snd_queue *sq; | |
d21a8575 G |
895 | struct nix_aq_enq_req *aq; |
896 | ||
4c236d5d G |
897 | sq = &pfvf->qset.sq[qidx]; |
898 | sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx)); | |
d21a8575 G |
899 | /* Get memory to put this msg */ |
900 | aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); | |
901 | if (!aq) | |
902 | return -ENOMEM; | |
903 | ||
904 | aq->sq.cq = pfvf->hw.rx_queues + qidx; | |
905 | aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */ | |
906 | aq->sq.cq_ena = 1; | |
907 | aq->sq.ena = 1; | |
99c969a8 | 908 | aq->sq.smq = otx2_get_smq_idx(pfvf, qidx); |
0182d078 | 909 | aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen); |
d21a8575 G |
910 | aq->sq.default_chan = pfvf->hw.tx_chan_base; |
911 | aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ | |
912 | aq->sq.sqb_aura = sqb_aura; | |
913 | aq->sq.sq_int_ena = NIX_SQINT_BITS; | |
914 | aq->sq.qint_idx = 0; | |
915 | /* Due pipelining impact minimum 2000 unused SQ CQE's | |
916 | * need to maintain to avoid CQ overflow. | |
917 | */ | |
918 | aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt)); | |
919 | ||
920 | /* Fill AQ info */ | |
921 | aq->qidx = qidx; | |
922 | aq->ctype = NIX_AQ_CTYPE_SQ; | |
923 | aq->op = NIX_AQ_INSTOP_INIT; | |
924 | ||
925 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
926 | } | |
927 | ||
ab6dddd2 | 928 | int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) |
caa2da34 SG |
929 | { |
930 | struct otx2_qset *qset = &pfvf->qset; | |
931 | struct otx2_snd_queue *sq; | |
caa2da34 SG |
932 | struct otx2_pool *pool; |
933 | int err; | |
934 | ||
935 | pool = &pfvf->qset.pool[sqb_aura]; | |
936 | sq = &qset->sq[qidx]; | |
937 | sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128; | |
938 | sq->sqe_cnt = qset->sqe_cnt; | |
939 | ||
940 | err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size); | |
941 | if (err) | |
942 | return err; | |
943 | ||
06059a1a G |
944 | if (qidx < pfvf->hw.tx_queues) { |
945 | err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt, | |
946 | TSO_HEADER_SIZE); | |
947 | if (err) | |
948 | return err; | |
949 | } | |
86d74760 | 950 | |
caa2da34 | 951 | sq->sqe_base = sq->sqe->base; |
3ca6c4c8 SG |
952 | sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL); |
953 | if (!sq->sg) | |
954 | return -ENOMEM; | |
caa2da34 | 955 | |
06059a1a | 956 | if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { |
c9c12d33 AM |
957 | err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, |
958 | sizeof(*sq->timestamps)); | |
959 | if (err) | |
960 | return err; | |
961 | } | |
962 | ||
3ca6c4c8 | 963 | sq->head = 0; |
f0dfc4c8 | 964 | sq->cons_head = 0; |
caa2da34 SG |
965 | sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; |
966 | sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; | |
3ca6c4c8 SG |
967 | /* Set SQE threshold to 10% of total SQEs */ |
968 | sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100; | |
caa2da34 SG |
969 | sq->aura_id = sqb_aura; |
970 | sq->aura_fc_addr = pool->fc_addr->base; | |
caa2da34 SG |
971 | sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0)); |
972 | ||
d45d8979 CJ |
973 | sq->stats.bytes = 0; |
974 | sq->stats.pkts = 0; | |
975 | ||
4c236d5d | 976 | return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); |
caa2da34 | 977 | |
caa2da34 SG |
978 | } |
979 | ||
980 | static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) | |
981 | { | |
982 | struct otx2_qset *qset = &pfvf->qset; | |
06059a1a | 983 | int err, pool_id, non_xdp_queues; |
caa2da34 SG |
984 | struct nix_aq_enq_req *aq; |
985 | struct otx2_cq_queue *cq; | |
caa2da34 SG |
986 | |
987 | cq = &qset->cq[qidx]; | |
988 | cq->cq_idx = qidx; | |
06059a1a | 989 | non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues; |
caa2da34 SG |
990 | if (qidx < pfvf->hw.rx_queues) { |
991 | cq->cq_type = CQ_RX; | |
abe02543 | 992 | cq->cint_idx = qidx; |
caa2da34 | 993 | cq->cqe_cnt = qset->rqe_cnt; |
06059a1a G |
994 | if (pfvf->xdp_prog) |
995 | xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); | |
996 | } else if (qidx < non_xdp_queues) { | |
caa2da34 | 997 | cq->cq_type = CQ_TX; |
3ca6c4c8 | 998 | cq->cint_idx = qidx - pfvf->hw.rx_queues; |
caa2da34 | 999 | cq->cqe_cnt = qset->sqe_cnt; |
06059a1a | 1000 | } else { |
ab6dddd2 SS |
1001 | if (pfvf->hw.xdp_queues && |
1002 | qidx < non_xdp_queues + pfvf->hw.xdp_queues) { | |
1003 | cq->cq_type = CQ_XDP; | |
1004 | cq->cint_idx = qidx - non_xdp_queues; | |
1005 | cq->cqe_cnt = qset->sqe_cnt; | |
1006 | } else { | |
1007 | cq->cq_type = CQ_QOS; | |
1008 | cq->cint_idx = qidx - non_xdp_queues - | |
1009 | pfvf->hw.xdp_queues; | |
1010 | cq->cqe_cnt = qset->sqe_cnt; | |
1011 | } | |
caa2da34 SG |
1012 | } |
1013 | cq->cqe_size = pfvf->qset.xqe_size; | |
1014 | ||
1015 | /* Allocate memory for CQEs */ | |
1016 | err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); | |
1017 | if (err) | |
1018 | return err; | |
1019 | ||
1020 | /* Save CQE CPU base for faster reference */ | |
1021 | cq->cqe_base = cq->cqe->base; | |
1022 | /* In case where all RQs auras point to single pool, | |
1023 | * all CQs receive buffer pool also point to same pool. | |
1024 | */ | |
1025 | pool_id = ((cq->cq_type == CQ_RX) && | |
1026 | (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx; | |
1027 | cq->rbpool = &qset->pool[pool_id]; | |
4ff7d148 | 1028 | cq->refill_task_sched = false; |
caa2da34 SG |
1029 | |
1030 | /* Get memory to put this msg */ | |
1031 | aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); | |
1032 | if (!aq) | |
1033 | return -ENOMEM; | |
1034 | ||
1035 | aq->cq.ena = 1; | |
1036 | aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4); | |
1037 | aq->cq.caching = 1; | |
1038 | aq->cq.base = cq->cqe->iova; | |
abe02543 | 1039 | aq->cq.cint_idx = cq->cint_idx; |
4ff7d148 G |
1040 | aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS; |
1041 | aq->cq.qint_idx = 0; | |
caa2da34 SG |
1042 | aq->cq.avg_level = 255; |
1043 | ||
1044 | if (qidx < pfvf->hw.rx_queues) { | |
1045 | aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt); | |
1046 | aq->cq.drop_ena = 1; | |
75f36270 | 1047 | |
4c85e575 HK |
1048 | if (!is_otx2_lbkvf(pfvf->pdev)) { |
1049 | /* Enable receive CQ backpressure */ | |
1050 | aq->cq.bp_ena = 1; | |
8e675581 HK |
1051 | #ifdef CONFIG_DCB |
1052 | aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]]; | |
1053 | #else | |
4c85e575 | 1054 | aq->cq.bpid = pfvf->bpid[0]; |
8e675581 | 1055 | #endif |
75f36270 | 1056 | |
4c85e575 HK |
1057 | /* Set backpressure level is same as cq pass level */ |
1058 | aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt); | |
1059 | } | |
caa2da34 SG |
1060 | } |
1061 | ||
1062 | /* Fill AQ info */ | |
1063 | aq->qidx = qidx; | |
1064 | aq->ctype = NIX_AQ_CTYPE_CQ; | |
1065 | aq->op = NIX_AQ_INSTOP_INIT; | |
1066 | ||
1067 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
1068 | } | |
1069 | ||
4ff7d148 G |
1070 | static void otx2_pool_refill_task(struct work_struct *work) |
1071 | { | |
1072 | struct otx2_cq_queue *cq; | |
4ff7d148 | 1073 | struct refill_work *wrk; |
4ff7d148 | 1074 | struct otx2_nic *pfvf; |
88e69af0 | 1075 | int qidx; |
4ff7d148 G |
1076 | |
1077 | wrk = container_of(work, struct refill_work, pool_refill_work.work); | |
1078 | pfvf = wrk->pf; | |
1079 | qidx = wrk - pfvf->refill_wrk; | |
1080 | cq = &pfvf->qset.cq[qidx]; | |
4ff7d148 | 1081 | |
4ff7d148 | 1082 | cq->refill_task_sched = false; |
88e69af0 RK |
1083 | |
1084 | local_bh_disable(); | |
1085 | napi_schedule(wrk->napi); | |
1086 | local_bh_enable(); | |
4ff7d148 G |
1087 | } |
1088 | ||
caa2da34 SG |
1089 | int otx2_config_nix_queues(struct otx2_nic *pfvf) |
1090 | { | |
1091 | int qidx, err; | |
1092 | ||
1093 | /* Initialize RX queues */ | |
1094 | for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) { | |
1095 | u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); | |
1096 | ||
1097 | err = otx2_rq_init(pfvf, qidx, lpb_aura); | |
1098 | if (err) | |
1099 | return err; | |
1100 | } | |
1101 | ||
1102 | /* Initialize TX queues */ | |
508c58f7 | 1103 | for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) { |
caa2da34 SG |
1104 | u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); |
1105 | ||
1106 | err = otx2_sq_init(pfvf, qidx, sqb_aura); | |
1107 | if (err) | |
1108 | return err; | |
1109 | } | |
1110 | ||
1111 | /* Initialize completion queues */ | |
1112 | for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { | |
1113 | err = otx2_cq_init(pfvf, qidx); | |
1114 | if (err) | |
1115 | return err; | |
1116 | } | |
1117 | ||
af3826db G |
1118 | pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf, |
1119 | NIX_LF_CQ_OP_STATUS); | |
1120 | ||
4ff7d148 G |
1121 | /* Initialize work queue for receive buffer refill */ |
1122 | pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt, | |
1123 | sizeof(struct refill_work), GFP_KERNEL); | |
1124 | if (!pfvf->refill_wrk) | |
1125 | return -ENOMEM; | |
1126 | ||
1127 | for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) { | |
1128 | pfvf->refill_wrk[qidx].pf = pfvf; | |
1129 | INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work, | |
1130 | otx2_pool_refill_task); | |
1131 | } | |
caa2da34 SG |
1132 | return 0; |
1133 | } | |
1134 | ||
05fcc9e0 SG |
1135 | int otx2_config_nix(struct otx2_nic *pfvf) |
1136 | { | |
1137 | struct nix_lf_alloc_req *nixlf; | |
1138 | struct nix_lf_alloc_rsp *rsp; | |
1139 | int err; | |
1140 | ||
68258596 | 1141 | pfvf->qset.xqe_size = pfvf->hw.xqe_size; |
05fcc9e0 SG |
1142 | |
1143 | /* Get memory to put this msg */ | |
1144 | nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox); | |
1145 | if (!nixlf) | |
1146 | return -ENOMEM; | |
1147 | ||
1148 | /* Set RQ/SQ/CQ counts */ | |
1149 | nixlf->rq_cnt = pfvf->hw.rx_queues; | |
ab6dddd2 | 1150 | nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf); |
05fcc9e0 | 1151 | nixlf->cq_cnt = pfvf->qset.cq_cnt; |
85069e95 | 1152 | nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE; |
81a43620 | 1153 | nixlf->rss_grps = MAX_RSS_GROUPS; |
68258596 | 1154 | nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64; |
05fcc9e0 SG |
1155 | /* We don't know absolute NPA LF idx attached. |
1156 | * AF will replace 'RVU_DEFAULT_PF_FUNC' with | |
1157 | * NPA LF attached to this RVU PF/VF. | |
1158 | */ | |
1159 | nixlf->npa_func = RVU_DEFAULT_PF_FUNC; | |
1160 | /* Disable alignment pad, enable L2 length check, | |
1161 | * enable L4 TCP/UDP checksum verification. | |
1162 | */ | |
1163 | nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37); | |
1164 | ||
1165 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1166 | if (err) | |
1167 | return err; | |
1168 | ||
1169 | rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, | |
1170 | &nixlf->hdr); | |
1171 | if (IS_ERR(rsp)) | |
1172 | return PTR_ERR(rsp); | |
1173 | ||
1174 | if (rsp->qints < 1) | |
1175 | return -ENXIO; | |
1176 | ||
1177 | return rsp->hdr.rc; | |
1178 | } | |
1179 | ||
caa2da34 SG |
1180 | void otx2_sq_free_sqbs(struct otx2_nic *pfvf) |
1181 | { | |
1182 | struct otx2_qset *qset = &pfvf->qset; | |
1183 | struct otx2_hw *hw = &pfvf->hw; | |
1184 | struct otx2_snd_queue *sq; | |
1185 | int sqb, qidx; | |
1186 | u64 iova, pa; | |
1187 | ||
ab6dddd2 | 1188 | for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) { |
caa2da34 SG |
1189 | sq = &qset->sq[qidx]; |
1190 | if (!sq->sqb_ptrs) | |
1191 | continue; | |
1192 | for (sqb = 0; sqb < sq->sqb_count; sqb++) { | |
1193 | if (!sq->sqb_ptrs[sqb]) | |
1194 | continue; | |
1195 | iova = sq->sqb_ptrs[sqb]; | |
1196 | pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); | |
1197 | dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size, | |
1198 | DMA_FROM_DEVICE, | |
1199 | DMA_ATTR_SKIP_CPU_SYNC); | |
1200 | put_page(virt_to_page(phys_to_virt(pa))); | |
1201 | } | |
1202 | sq->sqb_count = 0; | |
1203 | } | |
1204 | } | |
1205 | ||
b2e3406a RK |
1206 | void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, |
1207 | u64 iova, int size) | |
1208 | { | |
1209 | struct page *page; | |
1210 | u64 pa; | |
1211 | ||
1212 | pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); | |
1213 | page = virt_to_head_page(phys_to_virt(pa)); | |
1214 | ||
1215 | if (pool->page_pool) { | |
1216 | page_pool_put_full_page(pool->page_pool, page, true); | |
1217 | } else { | |
1218 | dma_unmap_page_attrs(pfvf->dev, iova, size, | |
1219 | DMA_FROM_DEVICE, | |
1220 | DMA_ATTR_SKIP_CPU_SYNC); | |
1221 | ||
1222 | put_page(page); | |
1223 | } | |
1224 | } | |
1225 | ||
caa2da34 SG |
1226 | void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) |
1227 | { | |
1228 | int pool_id, pool_start = 0, pool_end = 0, size = 0; | |
b2e3406a RK |
1229 | struct otx2_pool *pool; |
1230 | u64 iova; | |
caa2da34 SG |
1231 | |
1232 | if (type == AURA_NIX_SQ) { | |
1233 | pool_start = otx2_get_pool_idx(pfvf, type, 0); | |
1234 | pool_end = pool_start + pfvf->hw.sqpool_cnt; | |
1235 | size = pfvf->hw.sqb_size; | |
1236 | } | |
1237 | if (type == AURA_NIX_RQ) { | |
1238 | pool_start = otx2_get_pool_idx(pfvf, type, 0); | |
1239 | pool_end = pfvf->hw.rqpool_cnt; | |
1240 | size = pfvf->rbsize; | |
1241 | } | |
1242 | ||
1243 | /* Free SQB and RQB pointers from the aura pool */ | |
1244 | for (pool_id = pool_start; pool_id < pool_end; pool_id++) { | |
1245 | iova = otx2_aura_allocptr(pfvf, pool_id); | |
b2e3406a | 1246 | pool = &pfvf->qset.pool[pool_id]; |
caa2da34 SG |
1247 | while (iova) { |
1248 | if (type == AURA_NIX_RQ) | |
1249 | iova -= OTX2_HEAD_ROOM; | |
1250 | ||
b2e3406a RK |
1251 | otx2_free_bufs(pfvf, pool, iova, size); |
1252 | ||
caa2da34 SG |
1253 | iova = otx2_aura_allocptr(pfvf, pool_id); |
1254 | } | |
1255 | } | |
1256 | } | |
1257 | ||
1258 | void otx2_aura_pool_free(struct otx2_nic *pfvf) | |
1259 | { | |
1260 | struct otx2_pool *pool; | |
1261 | int pool_id; | |
1262 | ||
1263 | if (!pfvf->qset.pool) | |
1264 | return; | |
1265 | ||
1266 | for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) { | |
1267 | pool = &pfvf->qset.pool[pool_id]; | |
1268 | qmem_free(pfvf->dev, pool->stack); | |
1269 | qmem_free(pfvf->dev, pool->fc_addr); | |
b2e3406a RK |
1270 | page_pool_destroy(pool->page_pool); |
1271 | pool->page_pool = NULL; | |
caa2da34 SG |
1272 | } |
1273 | devm_kfree(pfvf->dev, pfvf->qset.pool); | |
b1bc8457 | 1274 | pfvf->qset.pool = NULL; |
caa2da34 SG |
1275 | } |
1276 | ||
ab6dddd2 SS |
1277 | int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, |
1278 | int pool_id, int numptrs) | |
caa2da34 SG |
1279 | { |
1280 | struct npa_aq_enq_req *aq; | |
1281 | struct otx2_pool *pool; | |
1282 | int err; | |
1283 | ||
1284 | pool = &pfvf->qset.pool[pool_id]; | |
1285 | ||
1286 | /* Allocate memory for HW to update Aura count. | |
1287 | * Alloc one cache line, so that it fits all FC_STYPE modes. | |
1288 | */ | |
1289 | if (!pool->fc_addr) { | |
1290 | err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN); | |
1291 | if (err) | |
1292 | return err; | |
1293 | } | |
1294 | ||
1295 | /* Initialize this aura's context via AF */ | |
1296 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1297 | if (!aq) { | |
1298 | /* Shared mbox memory buffer is full, flush it and retry */ | |
1299 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1300 | if (err) | |
1301 | return err; | |
1302 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1303 | if (!aq) | |
1304 | return -ENOMEM; | |
1305 | } | |
1306 | ||
1307 | aq->aura_id = aura_id; | |
1308 | /* Will be filled by AF with correct pool context address */ | |
1309 | aq->aura.pool_addr = pool_id; | |
1310 | aq->aura.pool_caching = 1; | |
1311 | aq->aura.shift = ilog2(numptrs) - 8; | |
1312 | aq->aura.count = numptrs; | |
1313 | aq->aura.limit = numptrs; | |
1314 | aq->aura.avg_level = 255; | |
1315 | aq->aura.ena = 1; | |
1316 | aq->aura.fc_ena = 1; | |
1317 | aq->aura.fc_addr = pool->fc_addr->iova; | |
1318 | aq->aura.fc_hyst_bits = 0; /* Store count on all updates */ | |
1319 | ||
75f36270 | 1320 | /* Enable backpressure for RQ aura */ |
4c85e575 | 1321 | if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { |
75f36270 | 1322 | aq->aura.bp_ena = 0; |
e8fb4df1 SS |
1323 | /* If NIX1 LF is attached then specify NIX1_RX. |
1324 | * | |
1325 | * Below NPA_AURA_S[BP_ENA] is set according to the | |
1326 | * NPA_BPINTF_E enumeration given as: | |
1327 | * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so | |
1328 | * NIX0_RX is 0x0 + 0*0x1 = 0 | |
1329 | * NIX1_RX is 0x0 + 1*0x1 = 1 | |
1330 | * But in HRM it is given that | |
1331 | * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to | |
1332 | * NIX-RX based on [BP] level. One bit per NIX-RX; index | |
1333 | * enumerated by NPA_BPINTF_E." | |
1334 | */ | |
1335 | if (pfvf->nix_blkaddr == BLKADDR_NIX1) | |
1336 | aq->aura.bp_ena = 1; | |
8e675581 HK |
1337 | #ifdef CONFIG_DCB |
1338 | aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]]; | |
1339 | #else | |
75f36270 | 1340 | aq->aura.nix0_bpid = pfvf->bpid[0]; |
8e675581 | 1341 | #endif |
e8fb4df1 | 1342 | |
75f36270 G |
1343 | /* Set backpressure level for RQ's Aura */ |
1344 | aq->aura.bp = RQ_BP_LVL_AURA; | |
1345 | } | |
1346 | ||
caa2da34 SG |
1347 | /* Fill AQ info */ |
1348 | aq->ctype = NPA_AQ_CTYPE_AURA; | |
1349 | aq->op = NPA_AQ_INSTOP_INIT; | |
1350 | ||
1351 | return 0; | |
1352 | } | |
1353 | ||
ab6dddd2 | 1354 | int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, |
b2e3406a | 1355 | int stack_pages, int numptrs, int buf_size, int type) |
caa2da34 | 1356 | { |
b2e3406a | 1357 | struct page_pool_params pp_params = { 0 }; |
caa2da34 SG |
1358 | struct npa_aq_enq_req *aq; |
1359 | struct otx2_pool *pool; | |
1360 | int err; | |
1361 | ||
1362 | pool = &pfvf->qset.pool[pool_id]; | |
1363 | /* Alloc memory for stack which is used to store buffer pointers */ | |
1364 | err = qmem_alloc(pfvf->dev, &pool->stack, | |
1365 | stack_pages, pfvf->hw.stack_pg_bytes); | |
1366 | if (err) | |
1367 | return err; | |
1368 | ||
1369 | pool->rbsize = buf_size; | |
caa2da34 SG |
1370 | |
1371 | /* Initialize this pool's context via AF */ | |
1372 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1373 | if (!aq) { | |
1374 | /* Shared mbox memory buffer is full, flush it and retry */ | |
1375 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1376 | if (err) { | |
1377 | qmem_free(pfvf->dev, pool->stack); | |
1378 | return err; | |
1379 | } | |
1380 | aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); | |
1381 | if (!aq) { | |
1382 | qmem_free(pfvf->dev, pool->stack); | |
1383 | return -ENOMEM; | |
1384 | } | |
1385 | } | |
1386 | ||
1387 | aq->aura_id = pool_id; | |
1388 | aq->pool.stack_base = pool->stack->iova; | |
1389 | aq->pool.stack_caching = 1; | |
1390 | aq->pool.ena = 1; | |
1391 | aq->pool.buf_size = buf_size / 128; | |
1392 | aq->pool.stack_max_pages = stack_pages; | |
1393 | aq->pool.shift = ilog2(numptrs) - 8; | |
1394 | aq->pool.ptr_start = 0; | |
1395 | aq->pool.ptr_end = ~0ULL; | |
1396 | ||
1397 | /* Fill AQ info */ | |
1398 | aq->ctype = NPA_AQ_CTYPE_POOL; | |
1399 | aq->op = NPA_AQ_INSTOP_INIT; | |
1400 | ||
b2e3406a RK |
1401 | if (type != AURA_NIX_RQ) { |
1402 | pool->page_pool = NULL; | |
1403 | return 0; | |
1404 | } | |
1405 | ||
50e49214 | 1406 | pp_params.order = get_order(buf_size); |
b2e3406a | 1407 | pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; |
49fa4b0d | 1408 | pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); |
b2e3406a RK |
1409 | pp_params.nid = NUMA_NO_NODE; |
1410 | pp_params.dev = pfvf->dev; | |
1411 | pp_params.dma_dir = DMA_FROM_DEVICE; | |
1412 | pool->page_pool = page_pool_create(&pp_params); | |
1413 | if (IS_ERR(pool->page_pool)) { | |
1414 | netdev_err(pfvf->netdev, "Creation of page pool failed\n"); | |
1415 | return PTR_ERR(pool->page_pool); | |
1416 | } | |
1417 | ||
caa2da34 SG |
1418 | return 0; |
1419 | } | |
1420 | ||
1421 | int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) | |
1422 | { | |
1423 | int qidx, pool_id, stack_pages, num_sqbs; | |
1424 | struct otx2_qset *qset = &pfvf->qset; | |
1425 | struct otx2_hw *hw = &pfvf->hw; | |
1426 | struct otx2_snd_queue *sq; | |
1427 | struct otx2_pool *pool; | |
1fb3ca76 | 1428 | dma_addr_t bufptr; |
caa2da34 | 1429 | int err, ptr; |
caa2da34 SG |
1430 | |
1431 | /* Calculate number of SQBs needed. | |
1432 | * | |
1433 | * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB. | |
1434 | * Last SQE is used for pointing to next SQB. | |
1435 | */ | |
1436 | num_sqbs = (hw->sqb_size / 128) - 1; | |
1437 | num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs; | |
1438 | ||
1439 | /* Get no of stack pages needed */ | |
1440 | stack_pages = | |
1441 | (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; | |
1442 | ||
508c58f7 | 1443 | for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { |
caa2da34 SG |
1444 | pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); |
1445 | /* Initialize aura context */ | |
1446 | err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs); | |
1447 | if (err) | |
1448 | goto fail; | |
1449 | ||
1450 | /* Initialize pool context */ | |
1451 | err = otx2_pool_init(pfvf, pool_id, stack_pages, | |
b2e3406a | 1452 | num_sqbs, hw->sqb_size, AURA_NIX_SQ); |
caa2da34 SG |
1453 | if (err) |
1454 | goto fail; | |
1455 | } | |
1456 | ||
1457 | /* Flush accumulated messages */ | |
1458 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1459 | if (err) | |
1460 | goto fail; | |
1461 | ||
1462 | /* Allocate pointers and free them to aura/pool */ | |
508c58f7 | 1463 | for (qidx = 0; qidx < hw->non_qos_queues; qidx++) { |
caa2da34 SG |
1464 | pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx); |
1465 | pool = &pfvf->qset.pool[pool_id]; | |
1466 | ||
1467 | sq = &qset->sq[qidx]; | |
1468 | sq->sqb_count = 0; | |
873b807c | 1469 | sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL); |
4af1b64f G |
1470 | if (!sq->sqb_ptrs) { |
1471 | err = -ENOMEM; | |
1472 | goto err_mem; | |
1473 | } | |
caa2da34 SG |
1474 | |
1475 | for (ptr = 0; ptr < num_sqbs; ptr++) { | |
4af1b64f G |
1476 | err = otx2_alloc_rbuf(pfvf, pool, &bufptr); |
1477 | if (err) | |
1478 | goto err_mem; | |
4c236d5d | 1479 | pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); |
caa2da34 SG |
1480 | sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; |
1481 | } | |
caa2da34 SG |
1482 | } |
1483 | ||
4af1b64f | 1484 | err_mem: |
4af1b64f G |
1485 | return err ? -ENOMEM : 0; |
1486 | ||
caa2da34 SG |
1487 | fail: |
1488 | otx2_mbox_reset(&pfvf->mbox.mbox, 0); | |
1489 | otx2_aura_pool_free(pfvf); | |
1490 | return err; | |
1491 | } | |
1492 | ||
1493 | int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) | |
1494 | { | |
1495 | struct otx2_hw *hw = &pfvf->hw; | |
1496 | int stack_pages, pool_id, rq; | |
1497 | struct otx2_pool *pool; | |
1498 | int err, ptr, num_ptrs; | |
1fb3ca76 | 1499 | dma_addr_t bufptr; |
caa2da34 SG |
1500 | |
1501 | num_ptrs = pfvf->qset.rqe_cnt; | |
1502 | ||
1503 | stack_pages = | |
1504 | (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs; | |
1505 | ||
1506 | for (rq = 0; rq < hw->rx_queues; rq++) { | |
1507 | pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq); | |
1508 | /* Initialize aura context */ | |
1509 | err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs); | |
1510 | if (err) | |
1511 | goto fail; | |
1512 | } | |
1513 | for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { | |
1514 | err = otx2_pool_init(pfvf, pool_id, stack_pages, | |
b2e3406a | 1515 | num_ptrs, pfvf->rbsize, AURA_NIX_RQ); |
caa2da34 SG |
1516 | if (err) |
1517 | goto fail; | |
1518 | } | |
1519 | ||
1520 | /* Flush accumulated messages */ | |
1521 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1522 | if (err) | |
1523 | goto fail; | |
1524 | ||
1525 | /* Allocate pointers and free them to aura/pool */ | |
1526 | for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { | |
1527 | pool = &pfvf->qset.pool[pool_id]; | |
1528 | for (ptr = 0; ptr < num_ptrs; ptr++) { | |
4af1b64f G |
1529 | err = otx2_alloc_rbuf(pfvf, pool, &bufptr); |
1530 | if (err) | |
55ba18dc | 1531 | return -ENOMEM; |
4c236d5d G |
1532 | pfvf->hw_ops->aura_freeptr(pfvf, pool_id, |
1533 | bufptr + OTX2_HEAD_ROOM); | |
caa2da34 | 1534 | } |
caa2da34 | 1535 | } |
55ba18dc | 1536 | return 0; |
caa2da34 SG |
1537 | fail: |
1538 | otx2_mbox_reset(&pfvf->mbox.mbox, 0); | |
1539 | otx2_aura_pool_free(pfvf); | |
1540 | return err; | |
1541 | } | |
1542 | ||
05fcc9e0 SG |
1543 | int otx2_config_npa(struct otx2_nic *pfvf) |
1544 | { | |
1545 | struct otx2_qset *qset = &pfvf->qset; | |
1546 | struct npa_lf_alloc_req *npalf; | |
1547 | struct otx2_hw *hw = &pfvf->hw; | |
1548 | int aura_cnt; | |
1549 | ||
1550 | /* Pool - Stack of free buffer pointers | |
1551 | * Aura - Alloc/frees pointers from/to pool for NIX DMA. | |
1552 | */ | |
1553 | ||
1554 | if (!hw->pool_cnt) | |
1555 | return -EINVAL; | |
1556 | ||
bf2bcd6f XW |
1557 | qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, |
1558 | sizeof(struct otx2_pool), GFP_KERNEL); | |
05fcc9e0 SG |
1559 | if (!qset->pool) |
1560 | return -ENOMEM; | |
1561 | ||
1562 | /* Get memory to put this msg */ | |
1563 | npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox); | |
1564 | if (!npalf) | |
1565 | return -ENOMEM; | |
1566 | ||
1567 | /* Set aura and pool counts */ | |
1568 | npalf->nr_pools = hw->pool_cnt; | |
1569 | aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt)); | |
1570 | npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1; | |
1571 | ||
1572 | return otx2_sync_mbox_msg(&pfvf->mbox); | |
1573 | } | |
1574 | ||
1575 | int otx2_detach_resources(struct mbox *mbox) | |
1576 | { | |
1577 | struct rsrc_detach *detach; | |
1578 | ||
4c3212f5 | 1579 | mutex_lock(&mbox->lock); |
05fcc9e0 SG |
1580 | detach = otx2_mbox_alloc_msg_detach_resources(mbox); |
1581 | if (!detach) { | |
4c3212f5 | 1582 | mutex_unlock(&mbox->lock); |
05fcc9e0 SG |
1583 | return -ENOMEM; |
1584 | } | |
1585 | ||
1586 | /* detach all */ | |
1587 | detach->partial = false; | |
1588 | ||
1589 | /* Send detach request to AF */ | |
1590 | otx2_mbox_msg_send(&mbox->mbox, 0); | |
4c3212f5 | 1591 | mutex_unlock(&mbox->lock); |
05fcc9e0 SG |
1592 | return 0; |
1593 | } | |
3184fb5b | 1594 | EXPORT_SYMBOL(otx2_detach_resources); |
05fcc9e0 SG |
1595 | |
1596 | int otx2_attach_npa_nix(struct otx2_nic *pfvf) | |
1597 | { | |
1598 | struct rsrc_attach *attach; | |
1599 | struct msg_req *msix; | |
1600 | int err; | |
1601 | ||
4c3212f5 | 1602 | mutex_lock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1603 | /* Get memory to put this msg */ |
1604 | attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox); | |
1605 | if (!attach) { | |
4c3212f5 | 1606 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1607 | return -ENOMEM; |
1608 | } | |
1609 | ||
1610 | attach->npalf = true; | |
1611 | attach->nixlf = true; | |
1612 | ||
1613 | /* Send attach request to AF */ | |
1614 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1615 | if (err) { | |
4c3212f5 | 1616 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1617 | return err; |
1618 | } | |
1619 | ||
caa2da34 SG |
1620 | pfvf->nix_blkaddr = BLKADDR_NIX0; |
1621 | ||
1622 | /* If the platform has two NIX blocks then LF may be | |
1623 | * allocated from NIX1. | |
1624 | */ | |
1625 | if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL) | |
1626 | pfvf->nix_blkaddr = BLKADDR_NIX1; | |
1627 | ||
05fcc9e0 SG |
1628 | /* Get NPA and NIX MSIX vector offsets */ |
1629 | msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox); | |
1630 | if (!msix) { | |
4c3212f5 | 1631 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1632 | return -ENOMEM; |
1633 | } | |
1634 | ||
1635 | err = otx2_sync_mbox_msg(&pfvf->mbox); | |
1636 | if (err) { | |
4c3212f5 | 1637 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1638 | return err; |
1639 | } | |
4c3212f5 | 1640 | mutex_unlock(&pfvf->mbox.lock); |
05fcc9e0 SG |
1641 | |
1642 | if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID || | |
1643 | pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) { | |
1644 | dev_err(pfvf->dev, | |
1645 | "RVUPF: Invalid MSIX vector offset for NPA/NIX\n"); | |
1646 | return -EINVAL; | |
1647 | } | |
1648 | ||
1649 | return 0; | |
1650 | } | |
3184fb5b | 1651 | EXPORT_SYMBOL(otx2_attach_npa_nix); |
05fcc9e0 | 1652 | |
caa2da34 SG |
1653 | void otx2_ctx_disable(struct mbox *mbox, int type, bool npa) |
1654 | { | |
1655 | struct hwctx_disable_req *req; | |
1656 | ||
4c3212f5 | 1657 | mutex_lock(&mbox->lock); |
caa2da34 SG |
1658 | /* Request AQ to disable this context */ |
1659 | if (npa) | |
1660 | req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox); | |
1661 | else | |
1662 | req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox); | |
1663 | ||
1664 | if (!req) { | |
4c3212f5 | 1665 | mutex_unlock(&mbox->lock); |
caa2da34 SG |
1666 | return; |
1667 | } | |
1668 | ||
1669 | req->ctype = type; | |
1670 | ||
1671 | if (otx2_sync_mbox_msg(mbox)) | |
1672 | dev_err(mbox->pfvf->dev, "%s failed to disable context\n", | |
1673 | __func__); | |
1674 | ||
4c3212f5 | 1675 | mutex_unlock(&mbox->lock); |
caa2da34 SG |
1676 | } |
1677 | ||
75f36270 G |
1678 | int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) |
1679 | { | |
1680 | struct nix_bp_cfg_req *req; | |
1681 | ||
1682 | if (enable) | |
1683 | req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox); | |
1684 | else | |
1685 | req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox); | |
1686 | ||
1687 | if (!req) | |
1688 | return -ENOMEM; | |
1689 | ||
1690 | req->chan_base = 0; | |
8e675581 HK |
1691 | #ifdef CONFIG_DCB |
1692 | req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1; | |
1693 | req->bpid_per_chan = pfvf->pfc_en ? 1 : 0; | |
1694 | #else | |
1695 | req->chan_cnt = 1; | |
75f36270 | 1696 | req->bpid_per_chan = 0; |
8e675581 HK |
1697 | #endif |
1698 | ||
75f36270 G |
1699 | return otx2_sync_mbox_msg(&pfvf->mbox); |
1700 | } | |
8e675581 | 1701 | EXPORT_SYMBOL(otx2_nix_config_bp); |
75f36270 | 1702 | |
d45d8979 CJ |
1703 | /* Mbox message handlers */ |
1704 | void mbox_handler_cgx_stats(struct otx2_nic *pfvf, | |
1705 | struct cgx_stats_rsp *rsp) | |
1706 | { | |
1707 | int id; | |
1708 | ||
1709 | for (id = 0; id < CGX_RX_STATS_COUNT; id++) | |
1710 | pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id]; | |
1711 | for (id = 0; id < CGX_TX_STATS_COUNT; id++) | |
1712 | pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id]; | |
1713 | } | |
1714 | ||
d0cf9503 CJ |
1715 | void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf, |
1716 | struct cgx_fec_stats_rsp *rsp) | |
1717 | { | |
1718 | pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks; | |
1719 | pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks; | |
1720 | } | |
1721 | ||
05fcc9e0 SG |
1722 | void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf, |
1723 | struct npa_lf_alloc_rsp *rsp) | |
1724 | { | |
1725 | pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs; | |
1726 | pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes; | |
1727 | } | |
3184fb5b | 1728 | EXPORT_SYMBOL(mbox_handler_npa_lf_alloc); |
05fcc9e0 SG |
1729 | |
1730 | void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, | |
1731 | struct nix_lf_alloc_rsp *rsp) | |
1732 | { | |
1733 | pfvf->hw.sqb_size = rsp->sqb_size; | |
1734 | pfvf->hw.rx_chan_base = rsp->rx_chan_base; | |
1735 | pfvf->hw.tx_chan_base = rsp->tx_chan_base; | |
86d74760 SG |
1736 | pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx; |
1737 | pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; | |
8bcf5ced SS |
1738 | pfvf->hw.cgx_links = rsp->cgx_links; |
1739 | pfvf->hw.lbk_links = rsp->lbk_links; | |
039190bb | 1740 | pfvf->hw.tx_link = rsp->tx_link; |
05fcc9e0 | 1741 | } |
3184fb5b | 1742 | EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); |
05fcc9e0 SG |
1743 | |
1744 | void mbox_handler_msix_offset(struct otx2_nic *pfvf, | |
1745 | struct msix_offset_rsp *rsp) | |
1746 | { | |
1747 | pfvf->hw.npa_msixoff = rsp->npa_msixoff; | |
1748 | pfvf->hw.nix_msixoff = rsp->nix_msixoff; | |
1749 | } | |
3184fb5b | 1750 | EXPORT_SYMBOL(mbox_handler_msix_offset); |
5a6d7c9d | 1751 | |
75f36270 G |
1752 | void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf, |
1753 | struct nix_bp_cfg_rsp *rsp) | |
1754 | { | |
1755 | int chan, chan_id; | |
1756 | ||
1757 | for (chan = 0; chan < rsp->chan_cnt; chan++) { | |
1758 | chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F); | |
1759 | pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF; | |
1760 | } | |
1761 | } | |
3184fb5b | 1762 | EXPORT_SYMBOL(mbox_handler_nix_bp_enable); |
75f36270 | 1763 | |
04a21ef3 SG |
1764 | void otx2_free_cints(struct otx2_nic *pfvf, int n) |
1765 | { | |
1766 | struct otx2_qset *qset = &pfvf->qset; | |
1767 | struct otx2_hw *hw = &pfvf->hw; | |
1768 | int irq, qidx; | |
1769 | ||
1770 | for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START; | |
1771 | qidx < n; | |
1772 | qidx++, irq++) { | |
1773 | int vector = pci_irq_vector(pfvf->pdev, irq); | |
1774 | ||
1775 | irq_set_affinity_hint(vector, NULL); | |
1776 | free_cpumask_var(hw->affinity_mask[irq]); | |
1777 | free_irq(vector, &qset->napi[qidx]); | |
1778 | } | |
1779 | } | |
1780 | ||
1781 | void otx2_set_cints_affinity(struct otx2_nic *pfvf) | |
1782 | { | |
1783 | struct otx2_hw *hw = &pfvf->hw; | |
1784 | int vec, cpu, irq, cint; | |
1785 | ||
1786 | vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START; | |
1787 | cpu = cpumask_first(cpu_online_mask); | |
1788 | ||
1789 | /* CQ interrupts */ | |
1790 | for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) { | |
1791 | if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL)) | |
1792 | return; | |
1793 | ||
1794 | cpumask_set_cpu(cpu, hw->affinity_mask[vec]); | |
1795 | ||
1796 | irq = pci_irq_vector(pfvf->pdev, vec); | |
1797 | irq_set_affinity_hint(irq, hw->affinity_mask[vec]); | |
1798 | ||
1799 | cpu = cpumask_next(cpu, cpu_online_mask); | |
1800 | if (unlikely(cpu >= nr_cpu_ids)) | |
1801 | cpu = 0; | |
1802 | } | |
1803 | } | |
1804 | ||
bbba125e SG |
1805 | static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw) |
1806 | { | |
1807 | if (is_otx2_lbkvf(pfvf->pdev)) { | |
1808 | pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK; | |
1809 | return hw->lbk_dwrr_mtu; | |
1810 | } | |
1811 | ||
1812 | pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM; | |
1813 | return hw->rpm_dwrr_mtu; | |
1814 | } | |
1815 | ||
ab58a416 HK |
1816 | u16 otx2_get_max_mtu(struct otx2_nic *pfvf) |
1817 | { | |
1818 | struct nix_hw_info *rsp; | |
1819 | struct msg_req *req; | |
1820 | u16 max_mtu; | |
1821 | int rc; | |
1822 | ||
1823 | mutex_lock(&pfvf->mbox.lock); | |
1824 | ||
1825 | req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox); | |
1826 | if (!req) { | |
1827 | rc = -ENOMEM; | |
1828 | goto out; | |
1829 | } | |
1830 | ||
1831 | rc = otx2_sync_mbox_msg(&pfvf->mbox); | |
1832 | if (!rc) { | |
1833 | rsp = (struct nix_hw_info *) | |
1834 | otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); | |
1835 | ||
1836 | /* HW counts VLAN insertion bytes (8 for double tag) | |
1837 | * irrespective of whether SQE is requesting to insert VLAN | |
1838 | * in the packet or not. Hence these 8 bytes have to be | |
1839 | * discounted from max packet size otherwise HW will throw | |
1840 | * SMQ errors | |
1841 | */ | |
1842 | max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; | |
c39830a4 SG |
1843 | |
1844 | /* Also save DWRR MTU, needed for DWRR weight calculation */ | |
bbba125e | 1845 | pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp); |
c39830a4 SG |
1846 | if (!pfvf->hw.dwrr_mtu) |
1847 | pfvf->hw.dwrr_mtu = 1; | |
ab58a416 HK |
1848 | } |
1849 | ||
1850 | out: | |
1851 | mutex_unlock(&pfvf->mbox.lock); | |
1852 | if (rc) { | |
1853 | dev_warn(pfvf->dev, | |
1854 | "Failed to get MTU from hardware setting default value(1500)\n"); | |
1855 | max_mtu = 1500; | |
1856 | } | |
1857 | return max_mtu; | |
1858 | } | |
1859 | EXPORT_SYMBOL(otx2_get_max_mtu); | |
1860 | ||
4b0385bc SS |
1861 | int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features) |
1862 | { | |
1863 | netdev_features_t changed = features ^ netdev->features; | |
1864 | struct otx2_nic *pfvf = netdev_priv(netdev); | |
1865 | bool ntuple = !!(features & NETIF_F_NTUPLE); | |
1866 | bool tc = !!(features & NETIF_F_HW_TC); | |
1867 | ||
1868 | if ((changed & NETIF_F_NTUPLE) && !ntuple) | |
1869 | otx2_destroy_ntuple_flows(pfvf); | |
1870 | ||
1871 | if ((changed & NETIF_F_NTUPLE) && ntuple) { | |
1872 | if (!pfvf->flow_cfg->max_flows) { | |
1873 | netdev_err(netdev, | |
1874 | "Can't enable NTUPLE, MCAM entries not allocated\n"); | |
1875 | return -EINVAL; | |
1876 | } | |
1877 | } | |
1878 | ||
4b0385bc | 1879 | if ((changed & NETIF_F_HW_TC) && !tc && |
61f98da4 | 1880 | otx2_tc_flower_rule_cnt(pfvf)) { |
4b0385bc SS |
1881 | netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); |
1882 | return -EBUSY; | |
1883 | } | |
1884 | ||
1885 | if ((changed & NETIF_F_NTUPLE) && ntuple && | |
61f98da4 | 1886 | otx2_tc_flower_rule_cnt(pfvf) && !(changed & NETIF_F_HW_TC)) { |
4b0385bc | 1887 | netdev_err(netdev, |
61f98da4 | 1888 | "Can't enable NTUPLE when TC flower offload is active, disable TC rules and retry\n"); |
4b0385bc SS |
1889 | return -EINVAL; |
1890 | } | |
1891 | ||
1892 | return 0; | |
1893 | } | |
1894 | EXPORT_SYMBOL(otx2_handle_ntuple_tc_features); | |
1895 | ||
5a6d7c9d SG |
1896 | #define M(_name, _id, _fn_name, _req_type, _rsp_type) \ |
1897 | int __weak \ | |
1898 | otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ | |
1899 | struct _req_type *req, \ | |
1900 | struct _rsp_type *rsp) \ | |
1901 | { \ | |
1902 | /* Nothing to do here */ \ | |
1903 | return 0; \ | |
1904 | } \ | |
1905 | EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); | |
1906 | MBOX_UP_CGX_MESSAGES | |
c54ffc73 | 1907 | MBOX_UP_MCS_MESSAGES |
5a6d7c9d | 1908 | #undef M |