Commit | Line | Data |
---|---|---|
e712d52b YM |
1 | /* QLogic qede NIC Driver |
2 | * Copyright (c) 2015 QLogic Corporation | |
3 | * | |
4 | * This software is available under the terms of the GNU General Public License | |
5 | * (GPL) Version 2, available from the file COPYING in the main directory of | |
6 | * this source tree. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/pci.h> | |
11 | #include <linux/version.h> | |
12 | #include <linux/device.h> | |
13 | #include <linux/netdevice.h> | |
14 | #include <linux/etherdevice.h> | |
15 | #include <linux/skbuff.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/dma-mapping.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <asm/byteorder.h> | |
22 | #include <asm/param.h> | |
23 | #include <linux/io.h> | |
24 | #include <linux/netdev_features.h> | |
25 | #include <linux/udp.h> | |
26 | #include <linux/tcp.h> | |
27 | #include <net/vxlan.h> | |
28 | #include <linux/ip.h> | |
29 | #include <net/ipv6.h> | |
30 | #include <net/tcp.h> | |
31 | #include <linux/if_ether.h> | |
32 | #include <linux/if_vlan.h> | |
33 | #include <linux/pkt_sched.h> | |
34 | #include <linux/ethtool.h> | |
35 | #include <linux/in.h> | |
36 | #include <linux/random.h> | |
37 | #include <net/ip6_checksum.h> | |
38 | #include <linux/bitops.h> | |
39 | ||
40 | #include "qede.h" | |
41 | ||
42 | static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede " | |
43 | DRV_MODULE_VERSION "\n"; | |
44 | ||
45 | MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver"); | |
46 | MODULE_LICENSE("GPL"); | |
47 | MODULE_VERSION(DRV_MODULE_VERSION); | |
48 | ||
49 | static uint debug; | |
50 | module_param(debug, uint, 0); | |
51 | MODULE_PARM_DESC(debug, " Default debug msglevel"); | |
52 | ||
53 | static const struct qed_eth_ops *qed_ops; | |
54 | ||
55 | #define CHIP_NUM_57980S_40 0x1634 | |
56 | #define CHIP_NUM_57980S_10 0x1635 | |
57 | #define CHIP_NUM_57980S_MF 0x1636 | |
58 | #define CHIP_NUM_57980S_100 0x1644 | |
59 | #define CHIP_NUM_57980S_50 0x1654 | |
60 | #define CHIP_NUM_57980S_25 0x1656 | |
61 | ||
62 | #ifndef PCI_DEVICE_ID_NX2_57980E | |
63 | #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 | |
64 | #define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10 | |
65 | #define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF | |
66 | #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 | |
67 | #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 | |
68 | #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 | |
69 | #endif | |
70 | ||
71 | static const struct pci_device_id qede_pci_tbl[] = { | |
72 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, | |
73 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, | |
74 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, | |
75 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, | |
76 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, | |
77 | { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, | |
78 | { 0 } | |
79 | }; | |
80 | ||
81 | MODULE_DEVICE_TABLE(pci, qede_pci_tbl); | |
82 | ||
83 | static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id); | |
84 | ||
85 | #define TX_TIMEOUT (5 * HZ) | |
86 | ||
87 | static void qede_remove(struct pci_dev *pdev); | |
2950219d YM |
88 | static int qede_alloc_rx_buffer(struct qede_dev *edev, |
89 | struct qede_rx_queue *rxq); | |
e712d52b YM |
90 | |
91 | static struct pci_driver qede_pci_driver = { | |
92 | .name = "qede", | |
93 | .id_table = qede_pci_tbl, | |
94 | .probe = qede_probe, | |
95 | .remove = qede_remove, | |
96 | }; | |
97 | ||
2950219d YM |
98 | static int qede_netdev_event(struct notifier_block *this, unsigned long event, |
99 | void *ptr) | |
100 | { | |
101 | struct net_device *ndev = netdev_notifier_info_to_dev(ptr); | |
102 | struct ethtool_drvinfo drvinfo; | |
103 | struct qede_dev *edev; | |
104 | ||
105 | /* Currently only support name change */ | |
106 | if (event != NETDEV_CHANGENAME) | |
107 | goto done; | |
108 | ||
109 | /* Check whether this is a qede device */ | |
110 | if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo) | |
111 | goto done; | |
112 | ||
113 | memset(&drvinfo, 0, sizeof(drvinfo)); | |
114 | ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo); | |
115 | if (strcmp(drvinfo.driver, "qede")) | |
116 | goto done; | |
117 | edev = netdev_priv(ndev); | |
118 | ||
119 | /* Notify qed of the name change */ | |
120 | if (!edev->ops || !edev->ops->common) | |
121 | goto done; | |
122 | edev->ops->common->set_id(edev->cdev, edev->ndev->name, | |
123 | "qede"); | |
124 | ||
125 | done: | |
126 | return NOTIFY_DONE; | |
127 | } | |
128 | ||
129 | static struct notifier_block qede_netdev_notifier = { | |
130 | .notifier_call = qede_netdev_event, | |
131 | }; | |
132 | ||
e712d52b YM |
133 | static |
134 | int __init qede_init(void) | |
135 | { | |
136 | int ret; | |
137 | u32 qed_ver; | |
138 | ||
139 | pr_notice("qede_init: %s\n", version); | |
140 | ||
141 | qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); | |
142 | if (qed_ver != QEDE_ETH_INTERFACE_VERSION) { | |
143 | pr_notice("Version mismatch [%08x != %08x]\n", | |
144 | qed_ver, | |
145 | QEDE_ETH_INTERFACE_VERSION); | |
146 | return -EINVAL; | |
147 | } | |
148 | ||
149 | qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION); | |
150 | if (!qed_ops) { | |
151 | pr_notice("Failed to get qed ethtool operations\n"); | |
152 | return -EINVAL; | |
153 | } | |
154 | ||
2950219d YM |
155 | /* Must register notifier before pci ops, since we might miss |
156 | * interface rename after pci probe and netdev registeration. | |
157 | */ | |
158 | ret = register_netdevice_notifier(&qede_netdev_notifier); | |
159 | if (ret) { | |
160 | pr_notice("Failed to register netdevice_notifier\n"); | |
161 | qed_put_eth_ops(); | |
162 | return -EINVAL; | |
163 | } | |
164 | ||
e712d52b YM |
165 | ret = pci_register_driver(&qede_pci_driver); |
166 | if (ret) { | |
167 | pr_notice("Failed to register driver\n"); | |
2950219d | 168 | unregister_netdevice_notifier(&qede_netdev_notifier); |
e712d52b YM |
169 | qed_put_eth_ops(); |
170 | return -EINVAL; | |
171 | } | |
172 | ||
173 | return 0; | |
174 | } | |
175 | ||
176 | static void __exit qede_cleanup(void) | |
177 | { | |
178 | pr_notice("qede_cleanup called\n"); | |
179 | ||
2950219d | 180 | unregister_netdevice_notifier(&qede_netdev_notifier); |
e712d52b YM |
181 | pci_unregister_driver(&qede_pci_driver); |
182 | qed_put_eth_ops(); | |
183 | } | |
184 | ||
185 | module_init(qede_init); | |
186 | module_exit(qede_cleanup); | |
187 | ||
2950219d YM |
188 | /* ------------------------------------------------------------------------- |
189 | * START OF FAST-PATH | |
190 | * ------------------------------------------------------------------------- | |
191 | */ | |
192 | ||
193 | /* Unmap the data and free skb */ | |
194 | static int qede_free_tx_pkt(struct qede_dev *edev, | |
195 | struct qede_tx_queue *txq, | |
196 | int *len) | |
197 | { | |
198 | u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX; | |
199 | struct sk_buff *skb = txq->sw_tx_ring[idx].skb; | |
200 | struct eth_tx_1st_bd *first_bd; | |
201 | struct eth_tx_bd *tx_data_bd; | |
202 | int bds_consumed = 0; | |
203 | int nbds; | |
204 | bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD; | |
205 | int i, split_bd_len = 0; | |
206 | ||
207 | if (unlikely(!skb)) { | |
208 | DP_ERR(edev, | |
209 | "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", | |
210 | idx, txq->sw_tx_cons, txq->sw_tx_prod); | |
211 | return -1; | |
212 | } | |
213 | ||
214 | *len = skb->len; | |
215 | ||
216 | first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); | |
217 | ||
218 | bds_consumed++; | |
219 | ||
220 | nbds = first_bd->data.nbds; | |
221 | ||
222 | if (data_split) { | |
223 | struct eth_tx_bd *split = (struct eth_tx_bd *) | |
224 | qed_chain_consume(&txq->tx_pbl); | |
225 | split_bd_len = BD_UNMAP_LEN(split); | |
226 | bds_consumed++; | |
227 | } | |
228 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | |
229 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); | |
230 | ||
231 | /* Unmap the data of the skb frags */ | |
232 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { | |
233 | tx_data_bd = (struct eth_tx_bd *) | |
234 | qed_chain_consume(&txq->tx_pbl); | |
235 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), | |
236 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | |
237 | } | |
238 | ||
239 | while (bds_consumed++ < nbds) | |
240 | qed_chain_consume(&txq->tx_pbl); | |
241 | ||
242 | /* Free skb */ | |
243 | dev_kfree_skb_any(skb); | |
244 | txq->sw_tx_ring[idx].skb = NULL; | |
245 | txq->sw_tx_ring[idx].flags = 0; | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | /* Unmap the data and free skb when mapping failed during start_xmit */ | |
251 | static void qede_free_failed_tx_pkt(struct qede_dev *edev, | |
252 | struct qede_tx_queue *txq, | |
253 | struct eth_tx_1st_bd *first_bd, | |
254 | int nbd, | |
255 | bool data_split) | |
256 | { | |
257 | u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; | |
258 | struct sk_buff *skb = txq->sw_tx_ring[idx].skb; | |
259 | struct eth_tx_bd *tx_data_bd; | |
260 | int i, split_bd_len = 0; | |
261 | ||
262 | /* Return prod to its position before this skb was handled */ | |
263 | qed_chain_set_prod(&txq->tx_pbl, | |
264 | le16_to_cpu(txq->tx_db.data.bd_prod), | |
265 | first_bd); | |
266 | ||
267 | first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); | |
268 | ||
269 | if (data_split) { | |
270 | struct eth_tx_bd *split = (struct eth_tx_bd *) | |
271 | qed_chain_produce(&txq->tx_pbl); | |
272 | split_bd_len = BD_UNMAP_LEN(split); | |
273 | nbd--; | |
274 | } | |
275 | ||
276 | dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), | |
277 | BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE); | |
278 | ||
279 | /* Unmap the data of the skb frags */ | |
280 | for (i = 0; i < nbd; i++) { | |
281 | tx_data_bd = (struct eth_tx_bd *) | |
282 | qed_chain_produce(&txq->tx_pbl); | |
283 | if (tx_data_bd->nbytes) | |
284 | dma_unmap_page(&edev->pdev->dev, | |
285 | BD_UNMAP_ADDR(tx_data_bd), | |
286 | BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE); | |
287 | } | |
288 | ||
289 | /* Return again prod to its position before this skb was handled */ | |
290 | qed_chain_set_prod(&txq->tx_pbl, | |
291 | le16_to_cpu(txq->tx_db.data.bd_prod), | |
292 | first_bd); | |
293 | ||
294 | /* Free skb */ | |
295 | dev_kfree_skb_any(skb); | |
296 | txq->sw_tx_ring[idx].skb = NULL; | |
297 | txq->sw_tx_ring[idx].flags = 0; | |
298 | } | |
299 | ||
300 | static u32 qede_xmit_type(struct qede_dev *edev, | |
301 | struct sk_buff *skb, | |
302 | int *ipv6_ext) | |
303 | { | |
304 | u32 rc = XMIT_L4_CSUM; | |
305 | __be16 l3_proto; | |
306 | ||
307 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
308 | return XMIT_PLAIN; | |
309 | ||
310 | l3_proto = vlan_get_protocol(skb); | |
311 | if (l3_proto == htons(ETH_P_IPV6) && | |
312 | (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) | |
313 | *ipv6_ext = 1; | |
314 | ||
315 | if (skb_is_gso(skb)) | |
316 | rc |= XMIT_LSO; | |
317 | ||
318 | return rc; | |
319 | } | |
320 | ||
321 | static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, | |
322 | struct eth_tx_2nd_bd *second_bd, | |
323 | struct eth_tx_3rd_bd *third_bd) | |
324 | { | |
325 | u8 l4_proto; | |
326 | u16 bd2_bits = 0, bd2_bits2 = 0; | |
327 | ||
328 | bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); | |
329 | ||
330 | bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & | |
331 | ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) | |
332 | << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; | |
333 | ||
334 | bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << | |
335 | ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); | |
336 | ||
337 | if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) | |
338 | l4_proto = ipv6_hdr(skb)->nexthdr; | |
339 | else | |
340 | l4_proto = ip_hdr(skb)->protocol; | |
341 | ||
342 | if (l4_proto == IPPROTO_UDP) | |
343 | bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; | |
344 | ||
345 | if (third_bd) { | |
346 | third_bd->data.bitfields |= | |
347 | ((tcp_hdrlen(skb) / 4) & | |
348 | ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << | |
349 | ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT; | |
350 | } | |
351 | ||
352 | second_bd->data.bitfields = cpu_to_le16(bd2_bits); | |
353 | second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); | |
354 | } | |
355 | ||
356 | static int map_frag_to_bd(struct qede_dev *edev, | |
357 | skb_frag_t *frag, | |
358 | struct eth_tx_bd *bd) | |
359 | { | |
360 | dma_addr_t mapping; | |
361 | ||
362 | /* Map skb non-linear frag data for DMA */ | |
363 | mapping = skb_frag_dma_map(&edev->pdev->dev, frag, 0, | |
364 | skb_frag_size(frag), | |
365 | DMA_TO_DEVICE); | |
366 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | |
367 | DP_NOTICE(edev, "Unable to map frag - dropping packet\n"); | |
368 | return -ENOMEM; | |
369 | } | |
370 | ||
371 | /* Setup the data pointer of the frag data */ | |
372 | BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); | |
373 | ||
374 | return 0; | |
375 | } | |
376 | ||
377 | /* Main transmit function */ | |
378 | static | |
379 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, | |
380 | struct net_device *ndev) | |
381 | { | |
382 | struct qede_dev *edev = netdev_priv(ndev); | |
383 | struct netdev_queue *netdev_txq; | |
384 | struct qede_tx_queue *txq; | |
385 | struct eth_tx_1st_bd *first_bd; | |
386 | struct eth_tx_2nd_bd *second_bd = NULL; | |
387 | struct eth_tx_3rd_bd *third_bd = NULL; | |
388 | struct eth_tx_bd *tx_data_bd = NULL; | |
389 | u16 txq_index; | |
390 | u8 nbd = 0; | |
391 | dma_addr_t mapping; | |
392 | int rc, frag_idx = 0, ipv6_ext = 0; | |
393 | u8 xmit_type; | |
394 | u16 idx; | |
395 | u16 hlen; | |
396 | bool data_split; | |
397 | ||
398 | /* Get tx-queue context and netdev index */ | |
399 | txq_index = skb_get_queue_mapping(skb); | |
400 | WARN_ON(txq_index >= QEDE_TSS_CNT(edev)); | |
401 | txq = QEDE_TX_QUEUE(edev, txq_index); | |
402 | netdev_txq = netdev_get_tx_queue(ndev, txq_index); | |
403 | ||
404 | /* Current code doesn't support SKB linearization, since the max number | |
405 | * of skb frags can be passed in the FW HSI. | |
406 | */ | |
407 | BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET); | |
408 | ||
409 | WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < | |
410 | (MAX_SKB_FRAGS + 1)); | |
411 | ||
412 | xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); | |
413 | ||
414 | /* Fill the entry in the SW ring and the BDs in the FW ring */ | |
415 | idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; | |
416 | txq->sw_tx_ring[idx].skb = skb; | |
417 | first_bd = (struct eth_tx_1st_bd *) | |
418 | qed_chain_produce(&txq->tx_pbl); | |
419 | memset(first_bd, 0, sizeof(*first_bd)); | |
420 | first_bd->data.bd_flags.bitfields = | |
421 | 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; | |
422 | ||
423 | /* Map skb linear data for DMA and set in the first BD */ | |
424 | mapping = dma_map_single(&edev->pdev->dev, skb->data, | |
425 | skb_headlen(skb), DMA_TO_DEVICE); | |
426 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | |
427 | DP_NOTICE(edev, "SKB mapping failed\n"); | |
428 | qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); | |
429 | return NETDEV_TX_OK; | |
430 | } | |
431 | nbd++; | |
432 | BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); | |
433 | ||
434 | /* In case there is IPv6 with extension headers or LSO we need 2nd and | |
435 | * 3rd BDs. | |
436 | */ | |
437 | if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) { | |
438 | second_bd = (struct eth_tx_2nd_bd *) | |
439 | qed_chain_produce(&txq->tx_pbl); | |
440 | memset(second_bd, 0, sizeof(*second_bd)); | |
441 | ||
442 | nbd++; | |
443 | third_bd = (struct eth_tx_3rd_bd *) | |
444 | qed_chain_produce(&txq->tx_pbl); | |
445 | memset(third_bd, 0, sizeof(*third_bd)); | |
446 | ||
447 | nbd++; | |
448 | /* We need to fill in additional data in second_bd... */ | |
449 | tx_data_bd = (struct eth_tx_bd *)second_bd; | |
450 | } | |
451 | ||
452 | if (skb_vlan_tag_present(skb)) { | |
453 | first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); | |
454 | first_bd->data.bd_flags.bitfields |= | |
455 | 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; | |
456 | } | |
457 | ||
458 | /* Fill the parsing flags & params according to the requested offload */ | |
459 | if (xmit_type & XMIT_L4_CSUM) { | |
460 | /* We don't re-calculate IP checksum as it is already done by | |
461 | * the upper stack | |
462 | */ | |
463 | first_bd->data.bd_flags.bitfields |= | |
464 | 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; | |
465 | ||
466 | /* If the packet is IPv6 with extension header, indicate that | |
467 | * to FW and pass few params, since the device cracker doesn't | |
468 | * support parsing IPv6 with extension header/s. | |
469 | */ | |
470 | if (unlikely(ipv6_ext)) | |
471 | qede_set_params_for_ipv6_ext(skb, second_bd, third_bd); | |
472 | } | |
473 | ||
474 | if (xmit_type & XMIT_LSO) { | |
475 | first_bd->data.bd_flags.bitfields |= | |
476 | (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); | |
477 | third_bd->data.lso_mss = | |
478 | cpu_to_le16(skb_shinfo(skb)->gso_size); | |
479 | ||
480 | first_bd->data.bd_flags.bitfields |= | |
481 | 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; | |
482 | hlen = skb_transport_header(skb) + | |
483 | tcp_hdrlen(skb) - skb->data; | |
484 | ||
485 | /* @@@TBD - if will not be removed need to check */ | |
486 | third_bd->data.bitfields |= | |
487 | (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); | |
488 | ||
489 | /* Make life easier for FW guys who can't deal with header and | |
490 | * data on same BD. If we need to split, use the second bd... | |
491 | */ | |
492 | if (unlikely(skb_headlen(skb) > hlen)) { | |
493 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | |
494 | "TSO split header size is %d (%x:%x)\n", | |
495 | first_bd->nbytes, first_bd->addr.hi, | |
496 | first_bd->addr.lo); | |
497 | ||
498 | mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), | |
499 | le32_to_cpu(first_bd->addr.lo)) + | |
500 | hlen; | |
501 | ||
502 | BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, | |
503 | le16_to_cpu(first_bd->nbytes) - | |
504 | hlen); | |
505 | ||
506 | /* this marks the BD as one that has no | |
507 | * individual mapping | |
508 | */ | |
509 | txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD; | |
510 | ||
511 | first_bd->nbytes = cpu_to_le16(hlen); | |
512 | ||
513 | tx_data_bd = (struct eth_tx_bd *)third_bd; | |
514 | data_split = true; | |
515 | } | |
516 | } | |
517 | ||
518 | /* Handle fragmented skb */ | |
519 | /* special handle for frags inside 2nd and 3rd bds.. */ | |
520 | while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { | |
521 | rc = map_frag_to_bd(edev, | |
522 | &skb_shinfo(skb)->frags[frag_idx], | |
523 | tx_data_bd); | |
524 | if (rc) { | |
525 | qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, | |
526 | data_split); | |
527 | return NETDEV_TX_OK; | |
528 | } | |
529 | ||
530 | if (tx_data_bd == (struct eth_tx_bd *)second_bd) | |
531 | tx_data_bd = (struct eth_tx_bd *)third_bd; | |
532 | else | |
533 | tx_data_bd = NULL; | |
534 | ||
535 | frag_idx++; | |
536 | } | |
537 | ||
538 | /* map last frags into 4th, 5th .... */ | |
539 | for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { | |
540 | tx_data_bd = (struct eth_tx_bd *) | |
541 | qed_chain_produce(&txq->tx_pbl); | |
542 | ||
543 | memset(tx_data_bd, 0, sizeof(*tx_data_bd)); | |
544 | ||
545 | rc = map_frag_to_bd(edev, | |
546 | &skb_shinfo(skb)->frags[frag_idx], | |
547 | tx_data_bd); | |
548 | if (rc) { | |
549 | qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, | |
550 | data_split); | |
551 | return NETDEV_TX_OK; | |
552 | } | |
553 | } | |
554 | ||
555 | /* update the first BD with the actual num BDs */ | |
556 | first_bd->data.nbds = nbd; | |
557 | ||
558 | netdev_tx_sent_queue(netdev_txq, skb->len); | |
559 | ||
560 | skb_tx_timestamp(skb); | |
561 | ||
562 | /* Advance packet producer only before sending the packet since mapping | |
563 | * of pages may fail. | |
564 | */ | |
565 | txq->sw_tx_prod++; | |
566 | ||
567 | /* 'next page' entries are counted in the producer value */ | |
568 | txq->tx_db.data.bd_prod = | |
569 | cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); | |
570 | ||
571 | /* wmb makes sure that the BDs data is updated before updating the | |
572 | * producer, otherwise FW may read old data from the BDs. | |
573 | */ | |
574 | wmb(); | |
575 | barrier(); | |
576 | writel(txq->tx_db.raw, txq->doorbell_addr); | |
577 | ||
578 | /* mmiowb is needed to synchronize doorbell writes from more than one | |
579 | * processor. It guarantees that the write arrives to the device before | |
580 | * the queue lock is released and another start_xmit is called (possibly | |
581 | * on another CPU). Without this barrier, the next doorbell can bypass | |
582 | * this doorbell. This is applicable to IA64/Altix systems. | |
583 | */ | |
584 | mmiowb(); | |
585 | ||
586 | if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) | |
587 | < (MAX_SKB_FRAGS + 1))) { | |
588 | netif_tx_stop_queue(netdev_txq); | |
589 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | |
590 | "Stop queue was called\n"); | |
591 | /* paired memory barrier is in qede_tx_int(), we have to keep | |
592 | * ordering of set_bit() in netif_tx_stop_queue() and read of | |
593 | * fp->bd_tx_cons | |
594 | */ | |
595 | smp_mb(); | |
596 | ||
597 | if (qed_chain_get_elem_left(&txq->tx_pbl) | |
598 | >= (MAX_SKB_FRAGS + 1) && | |
599 | (edev->state == QEDE_STATE_OPEN)) { | |
600 | netif_tx_wake_queue(netdev_txq); | |
601 | DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, | |
602 | "Wake queue was called\n"); | |
603 | } | |
604 | } | |
605 | ||
606 | return NETDEV_TX_OK; | |
607 | } | |
608 | ||
609 | static int qede_txq_has_work(struct qede_tx_queue *txq) | |
610 | { | |
611 | u16 hw_bd_cons; | |
612 | ||
613 | /* Tell compiler that consumer and producer can change */ | |
614 | barrier(); | |
615 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); | |
616 | if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) | |
617 | return 0; | |
618 | ||
619 | return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); | |
620 | } | |
621 | ||
622 | static int qede_tx_int(struct qede_dev *edev, | |
623 | struct qede_tx_queue *txq) | |
624 | { | |
625 | struct netdev_queue *netdev_txq; | |
626 | u16 hw_bd_cons; | |
627 | unsigned int pkts_compl = 0, bytes_compl = 0; | |
628 | int rc; | |
629 | ||
630 | netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index); | |
631 | ||
632 | hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); | |
633 | barrier(); | |
634 | ||
635 | while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { | |
636 | int len = 0; | |
637 | ||
638 | rc = qede_free_tx_pkt(edev, txq, &len); | |
639 | if (rc) { | |
640 | DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n", | |
641 | hw_bd_cons, | |
642 | qed_chain_get_cons_idx(&txq->tx_pbl)); | |
643 | break; | |
644 | } | |
645 | ||
646 | bytes_compl += len; | |
647 | pkts_compl++; | |
648 | txq->sw_tx_cons++; | |
649 | } | |
650 | ||
651 | netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl); | |
652 | ||
653 | /* Need to make the tx_bd_cons update visible to start_xmit() | |
654 | * before checking for netif_tx_queue_stopped(). Without the | |
655 | * memory barrier, there is a small possibility that | |
656 | * start_xmit() will miss it and cause the queue to be stopped | |
657 | * forever. | |
658 | * On the other hand we need an rmb() here to ensure the proper | |
659 | * ordering of bit testing in the following | |
660 | * netif_tx_queue_stopped(txq) call. | |
661 | */ | |
662 | smp_mb(); | |
663 | ||
664 | if (unlikely(netif_tx_queue_stopped(netdev_txq))) { | |
665 | /* Taking tx_lock is needed to prevent reenabling the queue | |
666 | * while it's empty. This could have happen if rx_action() gets | |
667 | * suspended in qede_tx_int() after the condition before | |
668 | * netif_tx_wake_queue(), while tx_action (qede_start_xmit()): | |
669 | * | |
670 | * stops the queue->sees fresh tx_bd_cons->releases the queue-> | |
671 | * sends some packets consuming the whole queue again-> | |
672 | * stops the queue | |
673 | */ | |
674 | ||
675 | __netif_tx_lock(netdev_txq, smp_processor_id()); | |
676 | ||
677 | if ((netif_tx_queue_stopped(netdev_txq)) && | |
678 | (edev->state == QEDE_STATE_OPEN) && | |
679 | (qed_chain_get_elem_left(&txq->tx_pbl) | |
680 | >= (MAX_SKB_FRAGS + 1))) { | |
681 | netif_tx_wake_queue(netdev_txq); | |
682 | DP_VERBOSE(edev, NETIF_MSG_TX_DONE, | |
683 | "Wake queue was called\n"); | |
684 | } | |
685 | ||
686 | __netif_tx_unlock(netdev_txq); | |
687 | } | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
692 | static bool qede_has_rx_work(struct qede_rx_queue *rxq) | |
693 | { | |
694 | u16 hw_comp_cons, sw_comp_cons; | |
695 | ||
696 | /* Tell compiler that status block fields can change */ | |
697 | barrier(); | |
698 | ||
699 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); | |
700 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | |
701 | ||
702 | return hw_comp_cons != sw_comp_cons; | |
703 | } | |
704 | ||
705 | static bool qede_has_tx_work(struct qede_fastpath *fp) | |
706 | { | |
707 | u8 tc; | |
708 | ||
709 | for (tc = 0; tc < fp->edev->num_tc; tc++) | |
710 | if (qede_txq_has_work(&fp->txqs[tc])) | |
711 | return true; | |
712 | return false; | |
713 | } | |
714 | ||
715 | /* This function copies the Rx buffer from the CONS position to the PROD | |
716 | * position, since we failed to allocate a new Rx buffer. | |
717 | */ | |
718 | static void qede_reuse_rx_data(struct qede_rx_queue *rxq) | |
719 | { | |
720 | struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); | |
721 | struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); | |
722 | struct sw_rx_data *sw_rx_data_cons = | |
723 | &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; | |
724 | struct sw_rx_data *sw_rx_data_prod = | |
725 | &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; | |
726 | ||
727 | dma_unmap_addr_set(sw_rx_data_prod, mapping, | |
728 | dma_unmap_addr(sw_rx_data_cons, mapping)); | |
729 | ||
730 | sw_rx_data_prod->data = sw_rx_data_cons->data; | |
731 | memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); | |
732 | ||
733 | rxq->sw_rx_cons++; | |
734 | rxq->sw_rx_prod++; | |
735 | } | |
736 | ||
737 | static inline void qede_update_rx_prod(struct qede_dev *edev, | |
738 | struct qede_rx_queue *rxq) | |
739 | { | |
740 | u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); | |
741 | u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); | |
742 | struct eth_rx_prod_data rx_prods = {0}; | |
743 | ||
744 | /* Update producers */ | |
745 | rx_prods.bd_prod = cpu_to_le16(bd_prod); | |
746 | rx_prods.cqe_prod = cpu_to_le16(cqe_prod); | |
747 | ||
748 | /* Make sure that the BD and SGE data is updated before updating the | |
749 | * producers since FW might read the BD/SGE right after the producer | |
750 | * is updated. | |
751 | */ | |
752 | wmb(); | |
753 | ||
754 | internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), | |
755 | (u32 *)&rx_prods); | |
756 | ||
757 | /* mmiowb is needed to synchronize doorbell writes from more than one | |
758 | * processor. It guarantees that the write arrives to the device before | |
759 | * the napi lock is released and another qede_poll is called (possibly | |
760 | * on another CPU). Without this barrier, the next doorbell can bypass | |
761 | * this doorbell. This is applicable to IA64/Altix systems. | |
762 | */ | |
763 | mmiowb(); | |
764 | } | |
765 | ||
766 | static u32 qede_get_rxhash(struct qede_dev *edev, | |
767 | u8 bitfields, | |
768 | __le32 rss_hash, | |
769 | enum pkt_hash_types *rxhash_type) | |
770 | { | |
771 | enum rss_hash_type htype; | |
772 | ||
773 | htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); | |
774 | ||
775 | if ((edev->ndev->features & NETIF_F_RXHASH) && htype) { | |
776 | *rxhash_type = ((htype == RSS_HASH_TYPE_IPV4) || | |
777 | (htype == RSS_HASH_TYPE_IPV6)) ? | |
778 | PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4; | |
779 | return le32_to_cpu(rss_hash); | |
780 | } | |
781 | *rxhash_type = PKT_HASH_TYPE_NONE; | |
782 | return 0; | |
783 | } | |
784 | ||
785 | static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) | |
786 | { | |
787 | skb_checksum_none_assert(skb); | |
788 | ||
789 | if (csum_flag & QEDE_CSUM_UNNECESSARY) | |
790 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
791 | } | |
792 | ||
793 | static inline void qede_skb_receive(struct qede_dev *edev, | |
794 | struct qede_fastpath *fp, | |
795 | struct sk_buff *skb, | |
796 | u16 vlan_tag) | |
797 | { | |
798 | if (vlan_tag) | |
799 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), | |
800 | vlan_tag); | |
801 | ||
802 | napi_gro_receive(&fp->napi, skb); | |
803 | } | |
804 | ||
805 | static u8 qede_check_csum(u16 flag) | |
806 | { | |
807 | u16 csum_flag = 0; | |
808 | u8 csum = 0; | |
809 | ||
810 | if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << | |
811 | PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { | |
812 | csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << | |
813 | PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; | |
814 | csum = QEDE_CSUM_UNNECESSARY; | |
815 | } | |
816 | ||
817 | csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << | |
818 | PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; | |
819 | ||
820 | if (csum_flag & flag) | |
821 | return QEDE_CSUM_ERROR; | |
822 | ||
823 | return csum; | |
824 | } | |
825 | ||
826 | static int qede_rx_int(struct qede_fastpath *fp, int budget) | |
827 | { | |
828 | struct qede_dev *edev = fp->edev; | |
829 | struct qede_rx_queue *rxq = fp->rxq; | |
830 | ||
831 | u16 hw_comp_cons, sw_comp_cons, sw_rx_index, parse_flag; | |
832 | int rx_pkt = 0; | |
833 | u8 csum_flag; | |
834 | ||
835 | hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); | |
836 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | |
837 | ||
838 | /* Memory barrier to prevent the CPU from doing speculative reads of CQE | |
839 | * / BD in the while-loop before reading hw_comp_cons. If the CQE is | |
840 | * read before it is written by FW, then FW writes CQE and SB, and then | |
841 | * the CPU reads the hw_comp_cons, it will use an old CQE. | |
842 | */ | |
843 | rmb(); | |
844 | ||
845 | /* Loop to complete all indicated BDs */ | |
846 | while (sw_comp_cons != hw_comp_cons) { | |
847 | struct eth_fast_path_rx_reg_cqe *fp_cqe; | |
848 | enum pkt_hash_types rxhash_type; | |
849 | enum eth_rx_cqe_type cqe_type; | |
850 | struct sw_rx_data *sw_rx_data; | |
851 | union eth_rx_cqe *cqe; | |
852 | struct sk_buff *skb; | |
853 | u16 len, pad; | |
854 | u32 rx_hash; | |
855 | u8 *data; | |
856 | ||
857 | /* Get the CQE from the completion ring */ | |
858 | cqe = (union eth_rx_cqe *) | |
859 | qed_chain_consume(&rxq->rx_comp_ring); | |
860 | cqe_type = cqe->fast_path_regular.type; | |
861 | ||
862 | if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { | |
863 | edev->ops->eth_cqe_completion( | |
864 | edev->cdev, fp->rss_id, | |
865 | (struct eth_slow_path_rx_cqe *)cqe); | |
866 | goto next_cqe; | |
867 | } | |
868 | ||
869 | /* Get the data from the SW ring */ | |
870 | sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; | |
871 | sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; | |
872 | data = sw_rx_data->data; | |
873 | ||
874 | fp_cqe = &cqe->fast_path_regular; | |
875 | len = le16_to_cpu(fp_cqe->pkt_len); | |
876 | pad = fp_cqe->placement_offset; | |
877 | ||
878 | /* For every Rx BD consumed, we allocate a new BD so the BD ring | |
879 | * is always with a fixed size. If allocation fails, we take the | |
880 | * consumed BD and return it to the ring in the PROD position. | |
881 | * The packet that was received on that BD will be dropped (and | |
882 | * not passed to the upper stack). | |
883 | */ | |
884 | if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { | |
885 | dma_unmap_single(&edev->pdev->dev, | |
886 | dma_unmap_addr(sw_rx_data, mapping), | |
887 | rxq->rx_buf_size, DMA_FROM_DEVICE); | |
888 | ||
889 | /* If this is an error packet then drop it */ | |
890 | parse_flag = | |
891 | le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); | |
892 | csum_flag = qede_check_csum(parse_flag); | |
893 | if (csum_flag == QEDE_CSUM_ERROR) { | |
894 | DP_NOTICE(edev, | |
895 | "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", | |
896 | sw_comp_cons, parse_flag); | |
897 | rxq->rx_hw_errors++; | |
898 | kfree(data); | |
899 | goto next_rx; | |
900 | } | |
901 | ||
902 | skb = build_skb(data, 0); | |
903 | ||
904 | if (unlikely(!skb)) { | |
905 | DP_NOTICE(edev, | |
906 | "Build_skb failed, dropping incoming packet\n"); | |
907 | kfree(data); | |
908 | rxq->rx_alloc_errors++; | |
909 | goto next_rx; | |
910 | } | |
911 | ||
912 | skb_reserve(skb, pad); | |
913 | ||
914 | } else { | |
915 | DP_NOTICE(edev, | |
916 | "New buffer allocation failed, dropping incoming packet and reusing its buffer\n"); | |
917 | qede_reuse_rx_data(rxq); | |
918 | rxq->rx_alloc_errors++; | |
919 | goto next_cqe; | |
920 | } | |
921 | ||
922 | sw_rx_data->data = NULL; | |
923 | ||
924 | skb_put(skb, len); | |
925 | ||
926 | skb->protocol = eth_type_trans(skb, edev->ndev); | |
927 | ||
928 | rx_hash = qede_get_rxhash(edev, fp_cqe->bitfields, | |
929 | fp_cqe->rss_hash, | |
930 | &rxhash_type); | |
931 | ||
932 | skb_set_hash(skb, rx_hash, rxhash_type); | |
933 | ||
934 | qede_set_skb_csum(skb, csum_flag); | |
935 | ||
936 | skb_record_rx_queue(skb, fp->rss_id); | |
937 | ||
938 | qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); | |
939 | ||
940 | qed_chain_consume(&rxq->rx_bd_ring); | |
941 | ||
942 | next_rx: | |
943 | rxq->sw_rx_cons++; | |
944 | rx_pkt++; | |
945 | ||
946 | next_cqe: /* don't consume bd rx buffer */ | |
947 | qed_chain_recycle_consumed(&rxq->rx_comp_ring); | |
948 | sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); | |
949 | /* CR TPA - revisit how to handle budget in TPA perhaps | |
950 | * increase on "end" | |
951 | */ | |
952 | if (rx_pkt == budget) | |
953 | break; | |
954 | } /* repeat while sw_comp_cons != hw_comp_cons... */ | |
955 | ||
956 | /* Update producers */ | |
957 | qede_update_rx_prod(edev, rxq); | |
958 | ||
959 | return rx_pkt; | |
960 | } | |
961 | ||
962 | static int qede_poll(struct napi_struct *napi, int budget) | |
963 | { | |
964 | int work_done = 0; | |
965 | struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, | |
966 | napi); | |
967 | struct qede_dev *edev = fp->edev; | |
968 | ||
969 | while (1) { | |
970 | u8 tc; | |
971 | ||
972 | for (tc = 0; tc < edev->num_tc; tc++) | |
973 | if (qede_txq_has_work(&fp->txqs[tc])) | |
974 | qede_tx_int(edev, &fp->txqs[tc]); | |
975 | ||
976 | if (qede_has_rx_work(fp->rxq)) { | |
977 | work_done += qede_rx_int(fp, budget - work_done); | |
978 | ||
979 | /* must not complete if we consumed full budget */ | |
980 | if (work_done >= budget) | |
981 | break; | |
982 | } | |
983 | ||
984 | /* Fall out from the NAPI loop if needed */ | |
985 | if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { | |
986 | qed_sb_update_sb_idx(fp->sb_info); | |
987 | /* *_has_*_work() reads the status block, | |
988 | * thus we need to ensure that status block indices | |
989 | * have been actually read (qed_sb_update_sb_idx) | |
990 | * prior to this check (*_has_*_work) so that | |
991 | * we won't write the "newer" value of the status block | |
992 | * to HW (if there was a DMA right after | |
993 | * qede_has_rx_work and if there is no rmb, the memory | |
994 | * reading (qed_sb_update_sb_idx) may be postponed | |
995 | * to right before *_ack_sb). In this case there | |
996 | * will never be another interrupt until there is | |
997 | * another update of the status block, while there | |
998 | * is still unhandled work. | |
999 | */ | |
1000 | rmb(); | |
1001 | ||
1002 | if (!(qede_has_rx_work(fp->rxq) || | |
1003 | qede_has_tx_work(fp))) { | |
1004 | napi_complete(napi); | |
1005 | /* Update and reenable interrupts */ | |
1006 | qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, | |
1007 | 1 /*update*/); | |
1008 | break; | |
1009 | } | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | return work_done; | |
1014 | } | |
1015 | ||
1016 | static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) | |
1017 | { | |
1018 | struct qede_fastpath *fp = fp_cookie; | |
1019 | ||
1020 | qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); | |
1021 | ||
1022 | napi_schedule_irqoff(&fp->napi); | |
1023 | return IRQ_HANDLED; | |
1024 | } | |
1025 | ||
1026 | /* ------------------------------------------------------------------------- | |
1027 | * END OF FAST-PATH | |
1028 | * ------------------------------------------------------------------------- | |
1029 | */ | |
1030 | ||
1031 | static int qede_open(struct net_device *ndev); | |
1032 | static int qede_close(struct net_device *ndev); | |
0d8e0aa0 SK |
1033 | static int qede_set_mac_addr(struct net_device *ndev, void *p); |
1034 | static void qede_set_rx_mode(struct net_device *ndev); | |
1035 | static void qede_config_rx_mode(struct net_device *ndev); | |
1036 | ||
1037 | static int qede_set_ucast_rx_mac(struct qede_dev *edev, | |
1038 | enum qed_filter_xcast_params_type opcode, | |
1039 | unsigned char mac[ETH_ALEN]) | |
1040 | { | |
1041 | struct qed_filter_params filter_cmd; | |
1042 | ||
1043 | memset(&filter_cmd, 0, sizeof(filter_cmd)); | |
1044 | filter_cmd.type = QED_FILTER_TYPE_UCAST; | |
1045 | filter_cmd.filter.ucast.type = opcode; | |
1046 | filter_cmd.filter.ucast.mac_valid = 1; | |
1047 | ether_addr_copy(filter_cmd.filter.ucast.mac, mac); | |
1048 | ||
1049 | return edev->ops->filter_config(edev->cdev, &filter_cmd); | |
1050 | } | |
1051 | ||
2950219d YM |
1052 | static const struct net_device_ops qede_netdev_ops = { |
1053 | .ndo_open = qede_open, | |
1054 | .ndo_stop = qede_close, | |
1055 | .ndo_start_xmit = qede_start_xmit, | |
0d8e0aa0 SK |
1056 | .ndo_set_rx_mode = qede_set_rx_mode, |
1057 | .ndo_set_mac_address = qede_set_mac_addr, | |
2950219d YM |
1058 | .ndo_validate_addr = eth_validate_addr, |
1059 | }; | |
1060 | ||
e712d52b YM |
1061 | /* ------------------------------------------------------------------------- |
1062 | * START OF PROBE / REMOVE | |
1063 | * ------------------------------------------------------------------------- | |
1064 | */ | |
1065 | ||
1066 | static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, | |
1067 | struct pci_dev *pdev, | |
1068 | struct qed_dev_eth_info *info, | |
1069 | u32 dp_module, | |
1070 | u8 dp_level) | |
1071 | { | |
1072 | struct net_device *ndev; | |
1073 | struct qede_dev *edev; | |
1074 | ||
1075 | ndev = alloc_etherdev_mqs(sizeof(*edev), | |
1076 | info->num_queues, | |
1077 | info->num_queues); | |
1078 | if (!ndev) { | |
1079 | pr_err("etherdev allocation failed\n"); | |
1080 | return NULL; | |
1081 | } | |
1082 | ||
1083 | edev = netdev_priv(ndev); | |
1084 | edev->ndev = ndev; | |
1085 | edev->cdev = cdev; | |
1086 | edev->pdev = pdev; | |
1087 | edev->dp_module = dp_module; | |
1088 | edev->dp_level = dp_level; | |
1089 | edev->ops = qed_ops; | |
2950219d YM |
1090 | edev->q_num_rx_buffers = NUM_RX_BDS_DEF; |
1091 | edev->q_num_tx_buffers = NUM_TX_BDS_DEF; | |
e712d52b YM |
1092 | |
1093 | DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); | |
1094 | ||
1095 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
1096 | ||
1097 | memcpy(&edev->dev_info, info, sizeof(*info)); | |
1098 | ||
1099 | edev->num_tc = edev->dev_info.num_tc; | |
1100 | ||
1101 | return edev; | |
1102 | } | |
1103 | ||
1104 | static void qede_init_ndev(struct qede_dev *edev) | |
1105 | { | |
1106 | struct net_device *ndev = edev->ndev; | |
1107 | struct pci_dev *pdev = edev->pdev; | |
1108 | u32 hw_features; | |
1109 | ||
1110 | pci_set_drvdata(pdev, ndev); | |
1111 | ||
1112 | ndev->mem_start = edev->dev_info.common.pci_mem_start; | |
1113 | ndev->base_addr = ndev->mem_start; | |
1114 | ndev->mem_end = edev->dev_info.common.pci_mem_end; | |
1115 | ndev->irq = edev->dev_info.common.pci_irq; | |
1116 | ||
1117 | ndev->watchdog_timeo = TX_TIMEOUT; | |
1118 | ||
2950219d YM |
1119 | ndev->netdev_ops = &qede_netdev_ops; |
1120 | ||
e712d52b YM |
1121 | /* user-changeble features */ |
1122 | hw_features = NETIF_F_GRO | NETIF_F_SG | | |
1123 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
1124 | NETIF_F_TSO | NETIF_F_TSO6; | |
1125 | ||
1126 | ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | | |
1127 | NETIF_F_HIGHDMA; | |
1128 | ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | | |
1129 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | | |
1130 | NETIF_F_HW_VLAN_CTAG_TX; | |
1131 | ||
1132 | ndev->hw_features = hw_features; | |
1133 | ||
1134 | /* Set network device HW mac */ | |
1135 | ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac); | |
1136 | } | |
1137 | ||
1138 | /* This function converts from 32b param to two params of level and module | |
1139 | * Input 32b decoding: | |
1140 | * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the | |
1141 | * 'happy' flow, e.g. memory allocation failed. | |
1142 | * b30 - enable all INFO prints. INFO prints are for major steps in the flow | |
1143 | * and provide important parameters. | |
1144 | * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that | |
1145 | * module. VERBOSE prints are for tracking the specific flow in low level. | |
1146 | * | |
1147 | * Notice that the level should be that of the lowest required logs. | |
1148 | */ | |
1149 | static void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level) | |
1150 | { | |
1151 | *p_dp_level = QED_LEVEL_NOTICE; | |
1152 | *p_dp_module = 0; | |
1153 | ||
1154 | if (debug & QED_LOG_VERBOSE_MASK) { | |
1155 | *p_dp_level = QED_LEVEL_VERBOSE; | |
1156 | *p_dp_module = (debug & 0x3FFFFFFF); | |
1157 | } else if (debug & QED_LOG_INFO_MASK) { | |
1158 | *p_dp_level = QED_LEVEL_INFO; | |
1159 | } else if (debug & QED_LOG_NOTICE_MASK) { | |
1160 | *p_dp_level = QED_LEVEL_NOTICE; | |
1161 | } | |
1162 | } | |
1163 | ||
2950219d YM |
1164 | static void qede_free_fp_array(struct qede_dev *edev) |
1165 | { | |
1166 | if (edev->fp_array) { | |
1167 | struct qede_fastpath *fp; | |
1168 | int i; | |
1169 | ||
1170 | for_each_rss(i) { | |
1171 | fp = &edev->fp_array[i]; | |
1172 | ||
1173 | kfree(fp->sb_info); | |
1174 | kfree(fp->rxq); | |
1175 | kfree(fp->txqs); | |
1176 | } | |
1177 | kfree(edev->fp_array); | |
1178 | } | |
1179 | edev->num_rss = 0; | |
1180 | } | |
1181 | ||
1182 | static int qede_alloc_fp_array(struct qede_dev *edev) | |
1183 | { | |
1184 | struct qede_fastpath *fp; | |
1185 | int i; | |
1186 | ||
1187 | edev->fp_array = kcalloc(QEDE_RSS_CNT(edev), | |
1188 | sizeof(*edev->fp_array), GFP_KERNEL); | |
1189 | if (!edev->fp_array) { | |
1190 | DP_NOTICE(edev, "fp array allocation failed\n"); | |
1191 | goto err; | |
1192 | } | |
1193 | ||
1194 | for_each_rss(i) { | |
1195 | fp = &edev->fp_array[i]; | |
1196 | ||
1197 | fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL); | |
1198 | if (!fp->sb_info) { | |
1199 | DP_NOTICE(edev, "sb info struct allocation failed\n"); | |
1200 | goto err; | |
1201 | } | |
1202 | ||
1203 | fp->rxq = kcalloc(1, sizeof(*fp->rxq), GFP_KERNEL); | |
1204 | if (!fp->rxq) { | |
1205 | DP_NOTICE(edev, "RXQ struct allocation failed\n"); | |
1206 | goto err; | |
1207 | } | |
1208 | ||
1209 | fp->txqs = kcalloc(edev->num_tc, sizeof(*fp->txqs), GFP_KERNEL); | |
1210 | if (!fp->txqs) { | |
1211 | DP_NOTICE(edev, "TXQ array allocation failed\n"); | |
1212 | goto err; | |
1213 | } | |
1214 | } | |
1215 | ||
1216 | return 0; | |
1217 | err: | |
1218 | qede_free_fp_array(edev); | |
1219 | return -ENOMEM; | |
1220 | } | |
1221 | ||
0d8e0aa0 SK |
1222 | static void qede_sp_task(struct work_struct *work) |
1223 | { | |
1224 | struct qede_dev *edev = container_of(work, struct qede_dev, | |
1225 | sp_task.work); | |
1226 | mutex_lock(&edev->qede_lock); | |
1227 | ||
1228 | if (edev->state == QEDE_STATE_OPEN) { | |
1229 | if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) | |
1230 | qede_config_rx_mode(edev->ndev); | |
1231 | } | |
1232 | ||
1233 | mutex_unlock(&edev->qede_lock); | |
1234 | } | |
1235 | ||
e712d52b YM |
1236 | static void qede_update_pf_params(struct qed_dev *cdev) |
1237 | { | |
1238 | struct qed_pf_params pf_params; | |
1239 | ||
1240 | /* 16 rx + 16 tx */ | |
1241 | memset(&pf_params, 0, sizeof(struct qed_pf_params)); | |
1242 | pf_params.eth_pf_params.num_cons = 32; | |
1243 | qed_ops->common->update_pf_params(cdev, &pf_params); | |
1244 | } | |
1245 | ||
1246 | enum qede_probe_mode { | |
1247 | QEDE_PROBE_NORMAL, | |
1248 | }; | |
1249 | ||
1250 | static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, | |
1251 | enum qede_probe_mode mode) | |
1252 | { | |
1253 | struct qed_slowpath_params params; | |
1254 | struct qed_dev_eth_info dev_info; | |
1255 | struct qede_dev *edev; | |
1256 | struct qed_dev *cdev; | |
1257 | int rc; | |
1258 | ||
1259 | if (unlikely(dp_level & QED_LEVEL_INFO)) | |
1260 | pr_notice("Starting qede probe\n"); | |
1261 | ||
1262 | cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, | |
1263 | dp_module, dp_level); | |
1264 | if (!cdev) { | |
1265 | rc = -ENODEV; | |
1266 | goto err0; | |
1267 | } | |
1268 | ||
1269 | qede_update_pf_params(cdev); | |
1270 | ||
1271 | /* Start the Slowpath-process */ | |
1272 | memset(¶ms, 0, sizeof(struct qed_slowpath_params)); | |
1273 | params.int_mode = QED_INT_MODE_MSIX; | |
1274 | params.drv_major = QEDE_MAJOR_VERSION; | |
1275 | params.drv_minor = QEDE_MINOR_VERSION; | |
1276 | params.drv_rev = QEDE_REVISION_VERSION; | |
1277 | params.drv_eng = QEDE_ENGINEERING_VERSION; | |
1278 | strlcpy(params.name, "qede LAN", QED_DRV_VER_STR_SIZE); | |
1279 | rc = qed_ops->common->slowpath_start(cdev, ¶ms); | |
1280 | if (rc) { | |
1281 | pr_notice("Cannot start slowpath\n"); | |
1282 | goto err1; | |
1283 | } | |
1284 | ||
1285 | /* Learn information crucial for qede to progress */ | |
1286 | rc = qed_ops->fill_dev_info(cdev, &dev_info); | |
1287 | if (rc) | |
1288 | goto err2; | |
1289 | ||
1290 | edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module, | |
1291 | dp_level); | |
1292 | if (!edev) { | |
1293 | rc = -ENOMEM; | |
1294 | goto err2; | |
1295 | } | |
1296 | ||
1297 | qede_init_ndev(edev); | |
1298 | ||
2950219d YM |
1299 | rc = register_netdev(edev->ndev); |
1300 | if (rc) { | |
1301 | DP_NOTICE(edev, "Cannot register net-device\n"); | |
1302 | goto err3; | |
1303 | } | |
1304 | ||
e712d52b YM |
1305 | edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); |
1306 | ||
0d8e0aa0 SK |
1307 | INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); |
1308 | mutex_init(&edev->qede_lock); | |
1309 | ||
e712d52b YM |
1310 | DP_INFO(edev, "Ending successfully qede probe\n"); |
1311 | ||
1312 | return 0; | |
1313 | ||
2950219d YM |
1314 | err3: |
1315 | free_netdev(edev->ndev); | |
e712d52b YM |
1316 | err2: |
1317 | qed_ops->common->slowpath_stop(cdev); | |
1318 | err1: | |
1319 | qed_ops->common->remove(cdev); | |
1320 | err0: | |
1321 | return rc; | |
1322 | } | |
1323 | ||
1324 | static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1325 | { | |
1326 | u32 dp_module = 0; | |
1327 | u8 dp_level = 0; | |
1328 | ||
1329 | qede_config_debug(debug, &dp_module, &dp_level); | |
1330 | ||
1331 | return __qede_probe(pdev, dp_module, dp_level, | |
1332 | QEDE_PROBE_NORMAL); | |
1333 | } | |
1334 | ||
1335 | enum qede_remove_mode { | |
1336 | QEDE_REMOVE_NORMAL, | |
1337 | }; | |
1338 | ||
1339 | static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) | |
1340 | { | |
1341 | struct net_device *ndev = pci_get_drvdata(pdev); | |
1342 | struct qede_dev *edev = netdev_priv(ndev); | |
1343 | struct qed_dev *cdev = edev->cdev; | |
1344 | ||
1345 | DP_INFO(edev, "Starting qede_remove\n"); | |
1346 | ||
0d8e0aa0 | 1347 | cancel_delayed_work_sync(&edev->sp_task); |
2950219d YM |
1348 | unregister_netdev(ndev); |
1349 | ||
e712d52b YM |
1350 | edev->ops->common->set_power_state(cdev, PCI_D0); |
1351 | ||
1352 | pci_set_drvdata(pdev, NULL); | |
1353 | ||
1354 | free_netdev(ndev); | |
1355 | ||
1356 | /* Use global ops since we've freed edev */ | |
1357 | qed_ops->common->slowpath_stop(cdev); | |
1358 | qed_ops->common->remove(cdev); | |
1359 | ||
1360 | pr_notice("Ending successfully qede_remove\n"); | |
1361 | } | |
1362 | ||
1363 | static void qede_remove(struct pci_dev *pdev) | |
1364 | { | |
1365 | __qede_remove(pdev, QEDE_REMOVE_NORMAL); | |
1366 | } | |
2950219d YM |
1367 | |
1368 | /* ------------------------------------------------------------------------- | |
1369 | * START OF LOAD / UNLOAD | |
1370 | * ------------------------------------------------------------------------- | |
1371 | */ | |
1372 | ||
1373 | static int qede_set_num_queues(struct qede_dev *edev) | |
1374 | { | |
1375 | int rc; | |
1376 | u16 rss_num; | |
1377 | ||
1378 | /* Setup queues according to possible resources*/ | |
1379 | rss_num = netif_get_num_default_rss_queues() * | |
1380 | edev->dev_info.common.num_hwfns; | |
1381 | ||
1382 | rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num); | |
1383 | ||
1384 | rc = edev->ops->common->set_fp_int(edev->cdev, rss_num); | |
1385 | if (rc > 0) { | |
1386 | /* Managed to request interrupts for our queues */ | |
1387 | edev->num_rss = rc; | |
1388 | DP_INFO(edev, "Managed %d [of %d] RSS queues\n", | |
1389 | QEDE_RSS_CNT(edev), rss_num); | |
1390 | rc = 0; | |
1391 | } | |
1392 | return rc; | |
1393 | } | |
1394 | ||
1395 | static void qede_free_mem_sb(struct qede_dev *edev, | |
1396 | struct qed_sb_info *sb_info) | |
1397 | { | |
1398 | if (sb_info->sb_virt) | |
1399 | dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt), | |
1400 | (void *)sb_info->sb_virt, sb_info->sb_phys); | |
1401 | } | |
1402 | ||
1403 | /* This function allocates fast-path status block memory */ | |
1404 | static int qede_alloc_mem_sb(struct qede_dev *edev, | |
1405 | struct qed_sb_info *sb_info, | |
1406 | u16 sb_id) | |
1407 | { | |
1408 | struct status_block *sb_virt; | |
1409 | dma_addr_t sb_phys; | |
1410 | int rc; | |
1411 | ||
1412 | sb_virt = dma_alloc_coherent(&edev->pdev->dev, | |
1413 | sizeof(*sb_virt), | |
1414 | &sb_phys, GFP_KERNEL); | |
1415 | if (!sb_virt) { | |
1416 | DP_ERR(edev, "Status block allocation failed\n"); | |
1417 | return -ENOMEM; | |
1418 | } | |
1419 | ||
1420 | rc = edev->ops->common->sb_init(edev->cdev, sb_info, | |
1421 | sb_virt, sb_phys, sb_id, | |
1422 | QED_SB_TYPE_L2_QUEUE); | |
1423 | if (rc) { | |
1424 | DP_ERR(edev, "Status block initialization failed\n"); | |
1425 | dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt), | |
1426 | sb_virt, sb_phys); | |
1427 | return rc; | |
1428 | } | |
1429 | ||
1430 | return 0; | |
1431 | } | |
1432 | ||
1433 | static void qede_free_rx_buffers(struct qede_dev *edev, | |
1434 | struct qede_rx_queue *rxq) | |
1435 | { | |
1436 | u16 i; | |
1437 | ||
1438 | for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { | |
1439 | struct sw_rx_data *rx_buf; | |
1440 | u8 *data; | |
1441 | ||
1442 | rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; | |
1443 | data = rx_buf->data; | |
1444 | ||
1445 | dma_unmap_single(&edev->pdev->dev, | |
1446 | dma_unmap_addr(rx_buf, mapping), | |
1447 | rxq->rx_buf_size, DMA_FROM_DEVICE); | |
1448 | ||
1449 | rx_buf->data = NULL; | |
1450 | kfree(data); | |
1451 | } | |
1452 | } | |
1453 | ||
1454 | static void qede_free_mem_rxq(struct qede_dev *edev, | |
1455 | struct qede_rx_queue *rxq) | |
1456 | { | |
1457 | /* Free rx buffers */ | |
1458 | qede_free_rx_buffers(edev, rxq); | |
1459 | ||
1460 | /* Free the parallel SW ring */ | |
1461 | kfree(rxq->sw_rx_ring); | |
1462 | ||
1463 | /* Free the real RQ ring used by FW */ | |
1464 | edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring); | |
1465 | edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring); | |
1466 | } | |
1467 | ||
1468 | static int qede_alloc_rx_buffer(struct qede_dev *edev, | |
1469 | struct qede_rx_queue *rxq) | |
1470 | { | |
1471 | struct sw_rx_data *sw_rx_data; | |
1472 | struct eth_rx_bd *rx_bd; | |
1473 | dma_addr_t mapping; | |
1474 | u16 rx_buf_size; | |
1475 | u8 *data; | |
1476 | ||
1477 | rx_buf_size = rxq->rx_buf_size; | |
1478 | ||
1479 | data = kmalloc(rx_buf_size, GFP_ATOMIC); | |
1480 | if (unlikely(!data)) { | |
1481 | DP_NOTICE(edev, "Failed to allocate Rx data\n"); | |
1482 | return -ENOMEM; | |
1483 | } | |
1484 | ||
1485 | mapping = dma_map_single(&edev->pdev->dev, data, | |
1486 | rx_buf_size, DMA_FROM_DEVICE); | |
1487 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | |
1488 | kfree(data); | |
1489 | DP_NOTICE(edev, "Failed to map Rx buffer\n"); | |
1490 | return -ENOMEM; | |
1491 | } | |
1492 | ||
1493 | sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; | |
1494 | sw_rx_data->data = data; | |
1495 | ||
1496 | dma_unmap_addr_set(sw_rx_data, mapping, mapping); | |
1497 | ||
1498 | /* Advance PROD and get BD pointer */ | |
1499 | rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); | |
1500 | WARN_ON(!rx_bd); | |
1501 | rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); | |
1502 | rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping)); | |
1503 | ||
1504 | rxq->sw_rx_prod++; | |
1505 | ||
1506 | return 0; | |
1507 | } | |
1508 | ||
1509 | /* This function allocates all memory needed per Rx queue */ | |
1510 | static int qede_alloc_mem_rxq(struct qede_dev *edev, | |
1511 | struct qede_rx_queue *rxq) | |
1512 | { | |
1513 | int i, rc, size, num_allocated; | |
1514 | ||
1515 | rxq->num_rx_buffers = edev->q_num_rx_buffers; | |
1516 | ||
1517 | rxq->rx_buf_size = NET_IP_ALIGN + | |
1518 | ETH_OVERHEAD + | |
1519 | edev->ndev->mtu + | |
1520 | QEDE_FW_RX_ALIGN_END; | |
1521 | ||
1522 | /* Allocate the parallel driver ring for Rx buffers */ | |
1523 | size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX; | |
1524 | rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); | |
1525 | if (!rxq->sw_rx_ring) { | |
1526 | DP_ERR(edev, "Rx buffers ring allocation failed\n"); | |
1527 | goto err; | |
1528 | } | |
1529 | ||
1530 | /* Allocate FW Rx ring */ | |
1531 | rc = edev->ops->common->chain_alloc(edev->cdev, | |
1532 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | |
1533 | QED_CHAIN_MODE_NEXT_PTR, | |
1534 | NUM_RX_BDS_MAX, | |
1535 | sizeof(struct eth_rx_bd), | |
1536 | &rxq->rx_bd_ring); | |
1537 | ||
1538 | if (rc) | |
1539 | goto err; | |
1540 | ||
1541 | /* Allocate FW completion ring */ | |
1542 | rc = edev->ops->common->chain_alloc(edev->cdev, | |
1543 | QED_CHAIN_USE_TO_CONSUME, | |
1544 | QED_CHAIN_MODE_PBL, | |
1545 | NUM_RX_BDS_MAX, | |
1546 | sizeof(union eth_rx_cqe), | |
1547 | &rxq->rx_comp_ring); | |
1548 | if (rc) | |
1549 | goto err; | |
1550 | ||
1551 | /* Allocate buffers for the Rx ring */ | |
1552 | for (i = 0; i < rxq->num_rx_buffers; i++) { | |
1553 | rc = qede_alloc_rx_buffer(edev, rxq); | |
1554 | if (rc) | |
1555 | break; | |
1556 | } | |
1557 | num_allocated = i; | |
1558 | if (!num_allocated) { | |
1559 | DP_ERR(edev, "Rx buffers allocation failed\n"); | |
1560 | goto err; | |
1561 | } else if (num_allocated < rxq->num_rx_buffers) { | |
1562 | DP_NOTICE(edev, | |
1563 | "Allocated less buffers than desired (%d allocated)\n", | |
1564 | num_allocated); | |
1565 | } | |
1566 | ||
1567 | return 0; | |
1568 | ||
1569 | err: | |
1570 | qede_free_mem_rxq(edev, rxq); | |
1571 | return -ENOMEM; | |
1572 | } | |
1573 | ||
1574 | static void qede_free_mem_txq(struct qede_dev *edev, | |
1575 | struct qede_tx_queue *txq) | |
1576 | { | |
1577 | /* Free the parallel SW ring */ | |
1578 | kfree(txq->sw_tx_ring); | |
1579 | ||
1580 | /* Free the real RQ ring used by FW */ | |
1581 | edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl); | |
1582 | } | |
1583 | ||
1584 | /* This function allocates all memory needed per Tx queue */ | |
1585 | static int qede_alloc_mem_txq(struct qede_dev *edev, | |
1586 | struct qede_tx_queue *txq) | |
1587 | { | |
1588 | int size, rc; | |
1589 | union eth_tx_bd_types *p_virt; | |
1590 | ||
1591 | txq->num_tx_buffers = edev->q_num_tx_buffers; | |
1592 | ||
1593 | /* Allocate the parallel driver ring for Tx buffers */ | |
1594 | size = sizeof(*txq->sw_tx_ring) * NUM_TX_BDS_MAX; | |
1595 | txq->sw_tx_ring = kzalloc(size, GFP_KERNEL); | |
1596 | if (!txq->sw_tx_ring) { | |
1597 | DP_NOTICE(edev, "Tx buffers ring allocation failed\n"); | |
1598 | goto err; | |
1599 | } | |
1600 | ||
1601 | rc = edev->ops->common->chain_alloc(edev->cdev, | |
1602 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | |
1603 | QED_CHAIN_MODE_PBL, | |
1604 | NUM_TX_BDS_MAX, | |
1605 | sizeof(*p_virt), | |
1606 | &txq->tx_pbl); | |
1607 | if (rc) | |
1608 | goto err; | |
1609 | ||
1610 | return 0; | |
1611 | ||
1612 | err: | |
1613 | qede_free_mem_txq(edev, txq); | |
1614 | return -ENOMEM; | |
1615 | } | |
1616 | ||
1617 | /* This function frees all memory of a single fp */ | |
1618 | static void qede_free_mem_fp(struct qede_dev *edev, | |
1619 | struct qede_fastpath *fp) | |
1620 | { | |
1621 | int tc; | |
1622 | ||
1623 | qede_free_mem_sb(edev, fp->sb_info); | |
1624 | ||
1625 | qede_free_mem_rxq(edev, fp->rxq); | |
1626 | ||
1627 | for (tc = 0; tc < edev->num_tc; tc++) | |
1628 | qede_free_mem_txq(edev, &fp->txqs[tc]); | |
1629 | } | |
1630 | ||
1631 | /* This function allocates all memory needed for a single fp (i.e. an entity | |
1632 | * which contains status block, one rx queue and multiple per-TC tx queues. | |
1633 | */ | |
1634 | static int qede_alloc_mem_fp(struct qede_dev *edev, | |
1635 | struct qede_fastpath *fp) | |
1636 | { | |
1637 | int rc, tc; | |
1638 | ||
1639 | rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->rss_id); | |
1640 | if (rc) | |
1641 | goto err; | |
1642 | ||
1643 | rc = qede_alloc_mem_rxq(edev, fp->rxq); | |
1644 | if (rc) | |
1645 | goto err; | |
1646 | ||
1647 | for (tc = 0; tc < edev->num_tc; tc++) { | |
1648 | rc = qede_alloc_mem_txq(edev, &fp->txqs[tc]); | |
1649 | if (rc) | |
1650 | goto err; | |
1651 | } | |
1652 | ||
1653 | return 0; | |
1654 | ||
1655 | err: | |
1656 | qede_free_mem_fp(edev, fp); | |
1657 | return -ENOMEM; | |
1658 | } | |
1659 | ||
1660 | static void qede_free_mem_load(struct qede_dev *edev) | |
1661 | { | |
1662 | int i; | |
1663 | ||
1664 | for_each_rss(i) { | |
1665 | struct qede_fastpath *fp = &edev->fp_array[i]; | |
1666 | ||
1667 | qede_free_mem_fp(edev, fp); | |
1668 | } | |
1669 | } | |
1670 | ||
1671 | /* This function allocates all qede memory at NIC load. */ | |
1672 | static int qede_alloc_mem_load(struct qede_dev *edev) | |
1673 | { | |
1674 | int rc = 0, rss_id; | |
1675 | ||
1676 | for (rss_id = 0; rss_id < QEDE_RSS_CNT(edev); rss_id++) { | |
1677 | struct qede_fastpath *fp = &edev->fp_array[rss_id]; | |
1678 | ||
1679 | rc = qede_alloc_mem_fp(edev, fp); | |
1680 | if (rc) | |
1681 | break; | |
1682 | } | |
1683 | ||
1684 | if (rss_id != QEDE_RSS_CNT(edev)) { | |
1685 | /* Failed allocating memory for all the queues */ | |
1686 | if (!rss_id) { | |
1687 | DP_ERR(edev, | |
1688 | "Failed to allocate memory for the leading queue\n"); | |
1689 | rc = -ENOMEM; | |
1690 | } else { | |
1691 | DP_NOTICE(edev, | |
1692 | "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", | |
1693 | QEDE_RSS_CNT(edev), rss_id); | |
1694 | } | |
1695 | edev->num_rss = rss_id; | |
1696 | } | |
1697 | ||
1698 | return 0; | |
1699 | } | |
1700 | ||
1701 | /* This function inits fp content and resets the SB, RXQ and TXQ structures */ | |
1702 | static void qede_init_fp(struct qede_dev *edev) | |
1703 | { | |
1704 | int rss_id, txq_index, tc; | |
1705 | struct qede_fastpath *fp; | |
1706 | ||
1707 | for_each_rss(rss_id) { | |
1708 | fp = &edev->fp_array[rss_id]; | |
1709 | ||
1710 | fp->edev = edev; | |
1711 | fp->rss_id = rss_id; | |
1712 | ||
1713 | memset((void *)&fp->napi, 0, sizeof(fp->napi)); | |
1714 | ||
1715 | memset((void *)fp->sb_info, 0, sizeof(*fp->sb_info)); | |
1716 | ||
1717 | memset((void *)fp->rxq, 0, sizeof(*fp->rxq)); | |
1718 | fp->rxq->rxq_id = rss_id; | |
1719 | ||
1720 | memset((void *)fp->txqs, 0, (edev->num_tc * sizeof(*fp->txqs))); | |
1721 | for (tc = 0; tc < edev->num_tc; tc++) { | |
1722 | txq_index = tc * QEDE_RSS_CNT(edev) + rss_id; | |
1723 | fp->txqs[tc].index = txq_index; | |
1724 | } | |
1725 | ||
1726 | snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", | |
1727 | edev->ndev->name, rss_id); | |
1728 | } | |
1729 | } | |
1730 | ||
1731 | static int qede_set_real_num_queues(struct qede_dev *edev) | |
1732 | { | |
1733 | int rc = 0; | |
1734 | ||
1735 | rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_CNT(edev)); | |
1736 | if (rc) { | |
1737 | DP_NOTICE(edev, "Failed to set real number of Tx queues\n"); | |
1738 | return rc; | |
1739 | } | |
1740 | rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_CNT(edev)); | |
1741 | if (rc) { | |
1742 | DP_NOTICE(edev, "Failed to set real number of Rx queues\n"); | |
1743 | return rc; | |
1744 | } | |
1745 | ||
1746 | return 0; | |
1747 | } | |
1748 | ||
1749 | static void qede_napi_disable_remove(struct qede_dev *edev) | |
1750 | { | |
1751 | int i; | |
1752 | ||
1753 | for_each_rss(i) { | |
1754 | napi_disable(&edev->fp_array[i].napi); | |
1755 | ||
1756 | netif_napi_del(&edev->fp_array[i].napi); | |
1757 | } | |
1758 | } | |
1759 | ||
1760 | static void qede_napi_add_enable(struct qede_dev *edev) | |
1761 | { | |
1762 | int i; | |
1763 | ||
1764 | /* Add NAPI objects */ | |
1765 | for_each_rss(i) { | |
1766 | netif_napi_add(edev->ndev, &edev->fp_array[i].napi, | |
1767 | qede_poll, NAPI_POLL_WEIGHT); | |
1768 | napi_enable(&edev->fp_array[i].napi); | |
1769 | } | |
1770 | } | |
1771 | ||
1772 | static void qede_sync_free_irqs(struct qede_dev *edev) | |
1773 | { | |
1774 | int i; | |
1775 | ||
1776 | for (i = 0; i < edev->int_info.used_cnt; i++) { | |
1777 | if (edev->int_info.msix_cnt) { | |
1778 | synchronize_irq(edev->int_info.msix[i].vector); | |
1779 | free_irq(edev->int_info.msix[i].vector, | |
1780 | &edev->fp_array[i]); | |
1781 | } else { | |
1782 | edev->ops->common->simd_handler_clean(edev->cdev, i); | |
1783 | } | |
1784 | } | |
1785 | ||
1786 | edev->int_info.used_cnt = 0; | |
1787 | } | |
1788 | ||
1789 | static int qede_req_msix_irqs(struct qede_dev *edev) | |
1790 | { | |
1791 | int i, rc; | |
1792 | ||
1793 | /* Sanitize number of interrupts == number of prepared RSS queues */ | |
1794 | if (QEDE_RSS_CNT(edev) > edev->int_info.msix_cnt) { | |
1795 | DP_ERR(edev, | |
1796 | "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n", | |
1797 | QEDE_RSS_CNT(edev), edev->int_info.msix_cnt); | |
1798 | return -EINVAL; | |
1799 | } | |
1800 | ||
1801 | for (i = 0; i < QEDE_RSS_CNT(edev); i++) { | |
1802 | rc = request_irq(edev->int_info.msix[i].vector, | |
1803 | qede_msix_fp_int, 0, edev->fp_array[i].name, | |
1804 | &edev->fp_array[i]); | |
1805 | if (rc) { | |
1806 | DP_ERR(edev, "Request fp %d irq failed\n", i); | |
1807 | qede_sync_free_irqs(edev); | |
1808 | return rc; | |
1809 | } | |
1810 | DP_VERBOSE(edev, NETIF_MSG_INTR, | |
1811 | "Requested fp irq for %s [entry %d]. Cookie is at %p\n", | |
1812 | edev->fp_array[i].name, i, | |
1813 | &edev->fp_array[i]); | |
1814 | edev->int_info.used_cnt++; | |
1815 | } | |
1816 | ||
1817 | return 0; | |
1818 | } | |
1819 | ||
1820 | static void qede_simd_fp_handler(void *cookie) | |
1821 | { | |
1822 | struct qede_fastpath *fp = (struct qede_fastpath *)cookie; | |
1823 | ||
1824 | napi_schedule_irqoff(&fp->napi); | |
1825 | } | |
1826 | ||
1827 | static int qede_setup_irqs(struct qede_dev *edev) | |
1828 | { | |
1829 | int i, rc = 0; | |
1830 | ||
1831 | /* Learn Interrupt configuration */ | |
1832 | rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info); | |
1833 | if (rc) | |
1834 | return rc; | |
1835 | ||
1836 | if (edev->int_info.msix_cnt) { | |
1837 | rc = qede_req_msix_irqs(edev); | |
1838 | if (rc) | |
1839 | return rc; | |
1840 | edev->ndev->irq = edev->int_info.msix[0].vector; | |
1841 | } else { | |
1842 | const struct qed_common_ops *ops; | |
1843 | ||
1844 | /* qed should learn receive the RSS ids and callbacks */ | |
1845 | ops = edev->ops->common; | |
1846 | for (i = 0; i < QEDE_RSS_CNT(edev); i++) | |
1847 | ops->simd_handler_config(edev->cdev, | |
1848 | &edev->fp_array[i], i, | |
1849 | qede_simd_fp_handler); | |
1850 | edev->int_info.used_cnt = QEDE_RSS_CNT(edev); | |
1851 | } | |
1852 | return 0; | |
1853 | } | |
1854 | ||
1855 | static int qede_drain_txq(struct qede_dev *edev, | |
1856 | struct qede_tx_queue *txq, | |
1857 | bool allow_drain) | |
1858 | { | |
1859 | int rc, cnt = 1000; | |
1860 | ||
1861 | while (txq->sw_tx_cons != txq->sw_tx_prod) { | |
1862 | if (!cnt) { | |
1863 | if (allow_drain) { | |
1864 | DP_NOTICE(edev, | |
1865 | "Tx queue[%d] is stuck, requesting MCP to drain\n", | |
1866 | txq->index); | |
1867 | rc = edev->ops->common->drain(edev->cdev); | |
1868 | if (rc) | |
1869 | return rc; | |
1870 | return qede_drain_txq(edev, txq, false); | |
1871 | } | |
1872 | DP_NOTICE(edev, | |
1873 | "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n", | |
1874 | txq->index, txq->sw_tx_prod, | |
1875 | txq->sw_tx_cons); | |
1876 | return -ENODEV; | |
1877 | } | |
1878 | cnt--; | |
1879 | usleep_range(1000, 2000); | |
1880 | barrier(); | |
1881 | } | |
1882 | ||
1883 | /* FW finished processing, wait for HW to transmit all tx packets */ | |
1884 | usleep_range(1000, 2000); | |
1885 | ||
1886 | return 0; | |
1887 | } | |
1888 | ||
1889 | static int qede_stop_queues(struct qede_dev *edev) | |
1890 | { | |
1891 | struct qed_update_vport_params vport_update_params; | |
1892 | struct qed_dev *cdev = edev->cdev; | |
1893 | int rc, tc, i; | |
1894 | ||
1895 | /* Disable the vport */ | |
1896 | memset(&vport_update_params, 0, sizeof(vport_update_params)); | |
1897 | vport_update_params.vport_id = 0; | |
1898 | vport_update_params.update_vport_active_flg = 1; | |
1899 | vport_update_params.vport_active_flg = 0; | |
1900 | vport_update_params.update_rss_flg = 0; | |
1901 | ||
1902 | rc = edev->ops->vport_update(cdev, &vport_update_params); | |
1903 | if (rc) { | |
1904 | DP_ERR(edev, "Failed to update vport\n"); | |
1905 | return rc; | |
1906 | } | |
1907 | ||
1908 | /* Flush Tx queues. If needed, request drain from MCP */ | |
1909 | for_each_rss(i) { | |
1910 | struct qede_fastpath *fp = &edev->fp_array[i]; | |
1911 | ||
1912 | for (tc = 0; tc < edev->num_tc; tc++) { | |
1913 | struct qede_tx_queue *txq = &fp->txqs[tc]; | |
1914 | ||
1915 | rc = qede_drain_txq(edev, txq, true); | |
1916 | if (rc) | |
1917 | return rc; | |
1918 | } | |
1919 | } | |
1920 | ||
1921 | /* Stop all Queues in reverse order*/ | |
1922 | for (i = QEDE_RSS_CNT(edev) - 1; i >= 0; i--) { | |
1923 | struct qed_stop_rxq_params rx_params; | |
1924 | ||
1925 | /* Stop the Tx Queue(s)*/ | |
1926 | for (tc = 0; tc < edev->num_tc; tc++) { | |
1927 | struct qed_stop_txq_params tx_params; | |
1928 | ||
1929 | tx_params.rss_id = i; | |
1930 | tx_params.tx_queue_id = tc * QEDE_RSS_CNT(edev) + i; | |
1931 | rc = edev->ops->q_tx_stop(cdev, &tx_params); | |
1932 | if (rc) { | |
1933 | DP_ERR(edev, "Failed to stop TXQ #%d\n", | |
1934 | tx_params.tx_queue_id); | |
1935 | return rc; | |
1936 | } | |
1937 | } | |
1938 | ||
1939 | /* Stop the Rx Queue*/ | |
1940 | memset(&rx_params, 0, sizeof(rx_params)); | |
1941 | rx_params.rss_id = i; | |
1942 | rx_params.rx_queue_id = i; | |
1943 | ||
1944 | rc = edev->ops->q_rx_stop(cdev, &rx_params); | |
1945 | if (rc) { | |
1946 | DP_ERR(edev, "Failed to stop RXQ #%d\n", i); | |
1947 | return rc; | |
1948 | } | |
1949 | } | |
1950 | ||
1951 | /* Stop the vport */ | |
1952 | rc = edev->ops->vport_stop(cdev, 0); | |
1953 | if (rc) | |
1954 | DP_ERR(edev, "Failed to stop VPORT\n"); | |
1955 | ||
1956 | return rc; | |
1957 | } | |
1958 | ||
1959 | static int qede_start_queues(struct qede_dev *edev) | |
1960 | { | |
1961 | int rc, tc, i; | |
1962 | int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1; | |
1963 | struct qed_dev *cdev = edev->cdev; | |
1964 | struct qed_update_vport_rss_params *rss_params = &edev->rss_params; | |
1965 | struct qed_update_vport_params vport_update_params; | |
1966 | struct qed_queue_start_common_params q_params; | |
1967 | ||
1968 | if (!edev->num_rss) { | |
1969 | DP_ERR(edev, | |
1970 | "Cannot update V-VPORT as active as there are no Rx queues\n"); | |
1971 | return -EINVAL; | |
1972 | } | |
1973 | ||
1974 | rc = edev->ops->vport_start(cdev, vport_id, | |
1975 | edev->ndev->mtu, | |
1976 | drop_ttl0_flg, | |
1977 | vlan_removal_en); | |
1978 | ||
1979 | if (rc) { | |
1980 | DP_ERR(edev, "Start V-PORT failed %d\n", rc); | |
1981 | return rc; | |
1982 | } | |
1983 | ||
1984 | DP_VERBOSE(edev, NETIF_MSG_IFUP, | |
1985 | "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", | |
1986 | vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); | |
1987 | ||
1988 | for_each_rss(i) { | |
1989 | struct qede_fastpath *fp = &edev->fp_array[i]; | |
1990 | dma_addr_t phys_table = fp->rxq->rx_comp_ring.pbl.p_phys_table; | |
1991 | ||
1992 | memset(&q_params, 0, sizeof(q_params)); | |
1993 | q_params.rss_id = i; | |
1994 | q_params.queue_id = i; | |
1995 | q_params.vport_id = 0; | |
1996 | q_params.sb = fp->sb_info->igu_sb_id; | |
1997 | q_params.sb_idx = RX_PI; | |
1998 | ||
1999 | rc = edev->ops->q_rx_start(cdev, &q_params, | |
2000 | fp->rxq->rx_buf_size, | |
2001 | fp->rxq->rx_bd_ring.p_phys_addr, | |
2002 | phys_table, | |
2003 | fp->rxq->rx_comp_ring.page_cnt, | |
2004 | &fp->rxq->hw_rxq_prod_addr); | |
2005 | if (rc) { | |
2006 | DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); | |
2007 | return rc; | |
2008 | } | |
2009 | ||
2010 | fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; | |
2011 | ||
2012 | qede_update_rx_prod(edev, fp->rxq); | |
2013 | ||
2014 | for (tc = 0; tc < edev->num_tc; tc++) { | |
2015 | struct qede_tx_queue *txq = &fp->txqs[tc]; | |
2016 | int txq_index = tc * QEDE_RSS_CNT(edev) + i; | |
2017 | ||
2018 | memset(&q_params, 0, sizeof(q_params)); | |
2019 | q_params.rss_id = i; | |
2020 | q_params.queue_id = txq_index; | |
2021 | q_params.vport_id = 0; | |
2022 | q_params.sb = fp->sb_info->igu_sb_id; | |
2023 | q_params.sb_idx = TX_PI(tc); | |
2024 | ||
2025 | rc = edev->ops->q_tx_start(cdev, &q_params, | |
2026 | txq->tx_pbl.pbl.p_phys_table, | |
2027 | txq->tx_pbl.page_cnt, | |
2028 | &txq->doorbell_addr); | |
2029 | if (rc) { | |
2030 | DP_ERR(edev, "Start TXQ #%d failed %d\n", | |
2031 | txq_index, rc); | |
2032 | return rc; | |
2033 | } | |
2034 | ||
2035 | txq->hw_cons_ptr = | |
2036 | &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; | |
2037 | SET_FIELD(txq->tx_db.data.params, | |
2038 | ETH_DB_DATA_DEST, DB_DEST_XCM); | |
2039 | SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, | |
2040 | DB_AGG_CMD_SET); | |
2041 | SET_FIELD(txq->tx_db.data.params, | |
2042 | ETH_DB_DATA_AGG_VAL_SEL, | |
2043 | DQ_XCM_ETH_TX_BD_PROD_CMD); | |
2044 | ||
2045 | txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; | |
2046 | } | |
2047 | } | |
2048 | ||
2049 | /* Prepare and send the vport enable */ | |
2050 | memset(&vport_update_params, 0, sizeof(vport_update_params)); | |
2051 | vport_update_params.vport_id = vport_id; | |
2052 | vport_update_params.update_vport_active_flg = 1; | |
2053 | vport_update_params.vport_active_flg = 1; | |
2054 | ||
2055 | /* Fill struct with RSS params */ | |
2056 | if (QEDE_RSS_CNT(edev) > 1) { | |
2057 | vport_update_params.update_rss_flg = 1; | |
2058 | for (i = 0; i < 128; i++) | |
2059 | rss_params->rss_ind_table[i] = | |
2060 | ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); | |
2061 | netdev_rss_key_fill(rss_params->rss_key, | |
2062 | sizeof(rss_params->rss_key)); | |
2063 | } else { | |
2064 | memset(rss_params, 0, sizeof(*rss_params)); | |
2065 | } | |
2066 | memcpy(&vport_update_params.rss_params, rss_params, | |
2067 | sizeof(*rss_params)); | |
2068 | ||
2069 | rc = edev->ops->vport_update(cdev, &vport_update_params); | |
2070 | if (rc) { | |
2071 | DP_ERR(edev, "Update V-PORT failed %d\n", rc); | |
2072 | return rc; | |
2073 | } | |
2074 | ||
2075 | return 0; | |
2076 | } | |
2077 | ||
0d8e0aa0 SK |
2078 | static int qede_set_mcast_rx_mac(struct qede_dev *edev, |
2079 | enum qed_filter_xcast_params_type opcode, | |
2080 | unsigned char *mac, int num_macs) | |
2081 | { | |
2082 | struct qed_filter_params filter_cmd; | |
2083 | int i; | |
2084 | ||
2085 | memset(&filter_cmd, 0, sizeof(filter_cmd)); | |
2086 | filter_cmd.type = QED_FILTER_TYPE_MCAST; | |
2087 | filter_cmd.filter.mcast.type = opcode; | |
2088 | filter_cmd.filter.mcast.num = num_macs; | |
2089 | ||
2090 | for (i = 0; i < num_macs; i++, mac += ETH_ALEN) | |
2091 | ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac); | |
2092 | ||
2093 | return edev->ops->filter_config(edev->cdev, &filter_cmd); | |
2094 | } | |
2095 | ||
2950219d YM |
2096 | enum qede_unload_mode { |
2097 | QEDE_UNLOAD_NORMAL, | |
2098 | }; | |
2099 | ||
2100 | static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) | |
2101 | { | |
2102 | int rc; | |
2103 | ||
2104 | DP_INFO(edev, "Starting qede unload\n"); | |
2105 | ||
0d8e0aa0 SK |
2106 | mutex_lock(&edev->qede_lock); |
2107 | edev->state = QEDE_STATE_CLOSED; | |
2108 | ||
2950219d YM |
2109 | /* Close OS Tx */ |
2110 | netif_tx_disable(edev->ndev); | |
2111 | netif_carrier_off(edev->ndev); | |
2112 | ||
2113 | rc = qede_stop_queues(edev); | |
2114 | if (rc) { | |
2115 | qede_sync_free_irqs(edev); | |
2116 | goto out; | |
2117 | } | |
2118 | ||
2119 | DP_INFO(edev, "Stopped Queues\n"); | |
2120 | ||
2121 | edev->ops->fastpath_stop(edev->cdev); | |
2122 | ||
2123 | /* Release the interrupts */ | |
2124 | qede_sync_free_irqs(edev); | |
2125 | edev->ops->common->set_fp_int(edev->cdev, 0); | |
2126 | ||
2127 | qede_napi_disable_remove(edev); | |
2128 | ||
2129 | qede_free_mem_load(edev); | |
2130 | qede_free_fp_array(edev); | |
2131 | ||
2132 | out: | |
2133 | mutex_unlock(&edev->qede_lock); | |
2134 | DP_INFO(edev, "Ending qede unload\n"); | |
2135 | } | |
2136 | ||
2137 | enum qede_load_mode { | |
2138 | QEDE_LOAD_NORMAL, | |
2139 | }; | |
2140 | ||
2141 | static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) | |
2142 | { | |
2143 | int rc; | |
2144 | ||
2145 | DP_INFO(edev, "Starting qede load\n"); | |
2146 | ||
2147 | rc = qede_set_num_queues(edev); | |
2148 | if (rc) | |
2149 | goto err0; | |
2150 | ||
2151 | rc = qede_alloc_fp_array(edev); | |
2152 | if (rc) | |
2153 | goto err0; | |
2154 | ||
2155 | qede_init_fp(edev); | |
2156 | ||
2157 | rc = qede_alloc_mem_load(edev); | |
2158 | if (rc) | |
2159 | goto err1; | |
2160 | DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", | |
2161 | QEDE_RSS_CNT(edev), edev->num_tc); | |
2162 | ||
2163 | rc = qede_set_real_num_queues(edev); | |
2164 | if (rc) | |
2165 | goto err2; | |
2166 | ||
2167 | qede_napi_add_enable(edev); | |
2168 | DP_INFO(edev, "Napi added and enabled\n"); | |
2169 | ||
2170 | rc = qede_setup_irqs(edev); | |
2171 | if (rc) | |
2172 | goto err3; | |
2173 | DP_INFO(edev, "Setup IRQs succeeded\n"); | |
2174 | ||
2175 | rc = qede_start_queues(edev); | |
2176 | if (rc) | |
2177 | goto err4; | |
2178 | DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); | |
2179 | ||
2180 | /* Add primary mac and set Rx filters */ | |
2181 | ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr); | |
2182 | ||
0d8e0aa0 SK |
2183 | mutex_lock(&edev->qede_lock); |
2184 | edev->state = QEDE_STATE_OPEN; | |
2185 | mutex_unlock(&edev->qede_lock); | |
2950219d YM |
2186 | DP_INFO(edev, "Ending successfully qede load\n"); |
2187 | ||
2188 | return 0; | |
2189 | ||
2190 | err4: | |
2191 | qede_sync_free_irqs(edev); | |
2192 | memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); | |
2193 | err3: | |
2194 | qede_napi_disable_remove(edev); | |
2195 | err2: | |
2196 | qede_free_mem_load(edev); | |
2197 | err1: | |
2198 | edev->ops->common->set_fp_int(edev->cdev, 0); | |
2199 | qede_free_fp_array(edev); | |
2200 | edev->num_rss = 0; | |
2201 | err0: | |
2202 | return rc; | |
2203 | } | |
2204 | ||
2205 | /* called with rtnl_lock */ | |
2206 | static int qede_open(struct net_device *ndev) | |
2207 | { | |
2208 | struct qede_dev *edev = netdev_priv(ndev); | |
2209 | ||
2210 | netif_carrier_off(ndev); | |
2211 | ||
2212 | edev->ops->common->set_power_state(edev->cdev, PCI_D0); | |
2213 | ||
2214 | return qede_load(edev, QEDE_LOAD_NORMAL); | |
2215 | } | |
2216 | ||
2217 | static int qede_close(struct net_device *ndev) | |
2218 | { | |
2219 | struct qede_dev *edev = netdev_priv(ndev); | |
2220 | ||
2221 | qede_unload(edev, QEDE_UNLOAD_NORMAL); | |
2222 | ||
2223 | return 0; | |
2224 | } | |
0d8e0aa0 SK |
2225 | |
2226 | static int qede_set_mac_addr(struct net_device *ndev, void *p) | |
2227 | { | |
2228 | struct qede_dev *edev = netdev_priv(ndev); | |
2229 | struct sockaddr *addr = p; | |
2230 | int rc; | |
2231 | ||
2232 | ASSERT_RTNL(); /* @@@TBD To be removed */ | |
2233 | ||
2234 | DP_INFO(edev, "Set_mac_addr called\n"); | |
2235 | ||
2236 | if (!is_valid_ether_addr(addr->sa_data)) { | |
2237 | DP_NOTICE(edev, "The MAC address is not valid\n"); | |
2238 | return -EFAULT; | |
2239 | } | |
2240 | ||
2241 | ether_addr_copy(ndev->dev_addr, addr->sa_data); | |
2242 | ||
2243 | if (!netif_running(ndev)) { | |
2244 | DP_NOTICE(edev, "The device is currently down\n"); | |
2245 | return 0; | |
2246 | } | |
2247 | ||
2248 | /* Remove the previous primary mac */ | |
2249 | rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, | |
2250 | edev->primary_mac); | |
2251 | if (rc) | |
2252 | return rc; | |
2253 | ||
2254 | /* Add MAC filter according to the new unicast HW MAC address */ | |
2255 | ether_addr_copy(edev->primary_mac, ndev->dev_addr); | |
2256 | return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, | |
2257 | edev->primary_mac); | |
2258 | } | |
2259 | ||
2260 | static int | |
2261 | qede_configure_mcast_filtering(struct net_device *ndev, | |
2262 | enum qed_filter_rx_mode_type *accept_flags) | |
2263 | { | |
2264 | struct qede_dev *edev = netdev_priv(ndev); | |
2265 | unsigned char *mc_macs, *temp; | |
2266 | struct netdev_hw_addr *ha; | |
2267 | int rc = 0, mc_count; | |
2268 | size_t size; | |
2269 | ||
2270 | size = 64 * ETH_ALEN; | |
2271 | ||
2272 | mc_macs = kzalloc(size, GFP_KERNEL); | |
2273 | if (!mc_macs) { | |
2274 | DP_NOTICE(edev, | |
2275 | "Failed to allocate memory for multicast MACs\n"); | |
2276 | rc = -ENOMEM; | |
2277 | goto exit; | |
2278 | } | |
2279 | ||
2280 | temp = mc_macs; | |
2281 | ||
2282 | /* Remove all previously configured MAC filters */ | |
2283 | rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL, | |
2284 | mc_macs, 1); | |
2285 | if (rc) | |
2286 | goto exit; | |
2287 | ||
2288 | netif_addr_lock_bh(ndev); | |
2289 | ||
2290 | mc_count = netdev_mc_count(ndev); | |
2291 | if (mc_count < 64) { | |
2292 | netdev_for_each_mc_addr(ha, ndev) { | |
2293 | ether_addr_copy(temp, ha->addr); | |
2294 | temp += ETH_ALEN; | |
2295 | } | |
2296 | } | |
2297 | ||
2298 | netif_addr_unlock_bh(ndev); | |
2299 | ||
2300 | /* Check for all multicast @@@TBD resource allocation */ | |
2301 | if ((ndev->flags & IFF_ALLMULTI) || | |
2302 | (mc_count > 64)) { | |
2303 | if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR) | |
2304 | *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; | |
2305 | } else { | |
2306 | /* Add all multicast MAC filters */ | |
2307 | rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD, | |
2308 | mc_macs, mc_count); | |
2309 | } | |
2310 | ||
2311 | exit: | |
2312 | kfree(mc_macs); | |
2313 | return rc; | |
2314 | } | |
2315 | ||
2316 | static void qede_set_rx_mode(struct net_device *ndev) | |
2317 | { | |
2318 | struct qede_dev *edev = netdev_priv(ndev); | |
2319 | ||
2320 | DP_INFO(edev, "qede_set_rx_mode called\n"); | |
2321 | ||
2322 | if (edev->state != QEDE_STATE_OPEN) { | |
2323 | DP_INFO(edev, | |
2324 | "qede_set_rx_mode called while interface is down\n"); | |
2325 | } else { | |
2326 | set_bit(QEDE_SP_RX_MODE, &edev->sp_flags); | |
2327 | schedule_delayed_work(&edev->sp_task, 0); | |
2328 | } | |
2329 | } | |
2330 | ||
2331 | /* Must be called with qede_lock held */ | |
2332 | static void qede_config_rx_mode(struct net_device *ndev) | |
2333 | { | |
2334 | enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST; | |
2335 | struct qede_dev *edev = netdev_priv(ndev); | |
2336 | struct qed_filter_params rx_mode; | |
2337 | unsigned char *uc_macs, *temp; | |
2338 | struct netdev_hw_addr *ha; | |
2339 | int rc, uc_count; | |
2340 | size_t size; | |
2341 | ||
2342 | netif_addr_lock_bh(ndev); | |
2343 | ||
2344 | uc_count = netdev_uc_count(ndev); | |
2345 | size = uc_count * ETH_ALEN; | |
2346 | ||
2347 | uc_macs = kzalloc(size, GFP_ATOMIC); | |
2348 | if (!uc_macs) { | |
2349 | DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n"); | |
2350 | netif_addr_unlock_bh(ndev); | |
2351 | return; | |
2352 | } | |
2353 | ||
2354 | temp = uc_macs; | |
2355 | netdev_for_each_uc_addr(ha, ndev) { | |
2356 | ether_addr_copy(temp, ha->addr); | |
2357 | temp += ETH_ALEN; | |
2358 | } | |
2359 | ||
2360 | netif_addr_unlock_bh(ndev); | |
2361 | ||
2362 | /* Configure the struct for the Rx mode */ | |
2363 | memset(&rx_mode, 0, sizeof(struct qed_filter_params)); | |
2364 | rx_mode.type = QED_FILTER_TYPE_RX_MODE; | |
2365 | ||
2366 | /* Remove all previous unicast secondary macs and multicast macs | |
2367 | * (configrue / leave the primary mac) | |
2368 | */ | |
2369 | rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE, | |
2370 | edev->primary_mac); | |
2371 | if (rc) | |
2372 | goto out; | |
2373 | ||
2374 | /* Check for promiscuous */ | |
2375 | if ((ndev->flags & IFF_PROMISC) || | |
2376 | (uc_count > 15)) { /* @@@TBD resource allocation - 1 */ | |
2377 | accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; | |
2378 | } else { | |
2379 | /* Add MAC filters according to the unicast secondary macs */ | |
2380 | int i; | |
2381 | ||
2382 | temp = uc_macs; | |
2383 | for (i = 0; i < uc_count; i++) { | |
2384 | rc = qede_set_ucast_rx_mac(edev, | |
2385 | QED_FILTER_XCAST_TYPE_ADD, | |
2386 | temp); | |
2387 | if (rc) | |
2388 | goto out; | |
2389 | ||
2390 | temp += ETH_ALEN; | |
2391 | } | |
2392 | ||
2393 | rc = qede_configure_mcast_filtering(ndev, &accept_flags); | |
2394 | if (rc) | |
2395 | goto out; | |
2396 | } | |
2397 | ||
2398 | rx_mode.filter.accept_flags = accept_flags; | |
2399 | edev->ops->filter_config(edev->cdev, &rx_mode); | |
2400 | out: | |
2401 | kfree(uc_macs); | |
2402 | } |