Commit | Line | Data |
---|---|---|
c4e84bde RM |
1 | /* |
2 | * QLogic qlge NIC HBA Driver | |
3 | * Copyright (c) 2003-2008 QLogic Corporation | |
4 | * See LICENSE.qlge for copyright and licensing details. | |
5 | * Author: Linux qlge network device driver by | |
6 | * Ron Mercer <ron.mercer@qlogic.com> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/init.h> | |
18c49b91 | 10 | #include <linux/bitops.h> |
c4e84bde RM |
11 | #include <linux/types.h> |
12 | #include <linux/module.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/pci.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/pagemap.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/dmapool.h> | |
20 | #include <linux/mempool.h> | |
21 | #include <linux/spinlock.h> | |
22 | #include <linux/kthread.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/ioport.h> | |
26 | #include <linux/in.h> | |
27 | #include <linux/ip.h> | |
28 | #include <linux/ipv6.h> | |
29 | #include <net/ipv6.h> | |
30 | #include <linux/tcp.h> | |
31 | #include <linux/udp.h> | |
32 | #include <linux/if_arp.h> | |
33 | #include <linux/if_ether.h> | |
34 | #include <linux/netdevice.h> | |
35 | #include <linux/etherdevice.h> | |
36 | #include <linux/ethtool.h> | |
18c49b91 | 37 | #include <linux/if_vlan.h> |
c4e84bde | 38 | #include <linux/skbuff.h> |
c4e84bde RM |
39 | #include <linux/delay.h> |
40 | #include <linux/mm.h> | |
41 | #include <linux/vmalloc.h> | |
70c71606 | 42 | #include <linux/prefetch.h> |
b7c6bfb7 | 43 | #include <net/ip6_checksum.h> |
c4e84bde RM |
44 | |
45 | #include "qlge.h" | |
46 | ||
47 | char qlge_driver_name[] = DRV_NAME; | |
48 | const char qlge_driver_version[] = DRV_VERSION; | |
49 | ||
50 | MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>"); | |
51 | MODULE_DESCRIPTION(DRV_STRING " "); | |
52 | MODULE_LICENSE("GPL"); | |
53 | MODULE_VERSION(DRV_VERSION); | |
54 | ||
55 | static const u32 default_msg = | |
56 | NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | | |
57 | /* NETIF_MSG_TIMER | */ | |
58 | NETIF_MSG_IFDOWN | | |
59 | NETIF_MSG_IFUP | | |
60 | NETIF_MSG_RX_ERR | | |
61 | NETIF_MSG_TX_ERR | | |
4974097a RM |
62 | /* NETIF_MSG_TX_QUEUED | */ |
63 | /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */ | |
c4e84bde RM |
64 | /* NETIF_MSG_PKTDATA | */ |
65 | NETIF_MSG_HW | NETIF_MSG_WOL | 0; | |
66 | ||
84cf7029 SR |
67 | static int debug = -1; /* defaults above */ |
68 | module_param(debug, int, 0664); | |
c4e84bde RM |
69 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); |
70 | ||
71 | #define MSIX_IRQ 0 | |
72 | #define MSI_IRQ 1 | |
73 | #define LEG_IRQ 2 | |
a5a62a1c | 74 | static int qlge_irq_type = MSIX_IRQ; |
84cf7029 | 75 | module_param(qlge_irq_type, int, 0664); |
a5a62a1c | 76 | MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); |
c4e84bde | 77 | |
8aae2600 RM |
78 | static int qlge_mpi_coredump; |
79 | module_param(qlge_mpi_coredump, int, 0); | |
80 | MODULE_PARM_DESC(qlge_mpi_coredump, | |
81 | "Option to enable MPI firmware dump. " | |
d5c1da56 RM |
82 | "Default is OFF - Do Not allocate memory. "); |
83 | ||
84 | static int qlge_force_coredump; | |
85 | module_param(qlge_force_coredump, int, 0); | |
86 | MODULE_PARM_DESC(qlge_force_coredump, | |
87 | "Option to allow force of firmware core dump. " | |
88 | "Default is OFF - Do not allow."); | |
8aae2600 | 89 | |
a3aa1884 | 90 | static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = { |
b0c2aadf | 91 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, |
cdca8d02 | 92 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)}, |
c4e84bde RM |
93 | /* required last entry */ |
94 | {0,} | |
95 | }; | |
96 | ||
97 | MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); | |
98 | ||
ac409215 | 99 | static int ql_wol(struct ql_adapter *qdev); |
100 | static void qlge_set_multicast_list(struct net_device *ndev); | |
101 | ||
c4e84bde RM |
102 | /* This hardware semaphore causes exclusive access to |
103 | * resources shared between the NIC driver, MPI firmware, | |
104 | * FCOE firmware and the FC driver. | |
105 | */ | |
106 | static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) | |
107 | { | |
108 | u32 sem_bits = 0; | |
109 | ||
110 | switch (sem_mask) { | |
111 | case SEM_XGMAC0_MASK: | |
112 | sem_bits = SEM_SET << SEM_XGMAC0_SHIFT; | |
113 | break; | |
114 | case SEM_XGMAC1_MASK: | |
115 | sem_bits = SEM_SET << SEM_XGMAC1_SHIFT; | |
116 | break; | |
117 | case SEM_ICB_MASK: | |
118 | sem_bits = SEM_SET << SEM_ICB_SHIFT; | |
119 | break; | |
120 | case SEM_MAC_ADDR_MASK: | |
121 | sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT; | |
122 | break; | |
123 | case SEM_FLASH_MASK: | |
124 | sem_bits = SEM_SET << SEM_FLASH_SHIFT; | |
125 | break; | |
126 | case SEM_PROBE_MASK: | |
127 | sem_bits = SEM_SET << SEM_PROBE_SHIFT; | |
128 | break; | |
129 | case SEM_RT_IDX_MASK: | |
130 | sem_bits = SEM_SET << SEM_RT_IDX_SHIFT; | |
131 | break; | |
132 | case SEM_PROC_REG_MASK: | |
133 | sem_bits = SEM_SET << SEM_PROC_REG_SHIFT; | |
134 | break; | |
135 | default: | |
ae9540f7 | 136 | netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n"); |
c4e84bde RM |
137 | return -EINVAL; |
138 | } | |
139 | ||
140 | ql_write32(qdev, SEM, sem_bits | sem_mask); | |
141 | return !(ql_read32(qdev, SEM) & sem_bits); | |
142 | } | |
143 | ||
144 | int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) | |
145 | { | |
0857e9d7 | 146 | unsigned int wait_count = 30; |
c4e84bde RM |
147 | do { |
148 | if (!ql_sem_trylock(qdev, sem_mask)) | |
149 | return 0; | |
0857e9d7 RM |
150 | udelay(100); |
151 | } while (--wait_count); | |
c4e84bde RM |
152 | return -ETIMEDOUT; |
153 | } | |
154 | ||
155 | void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) | |
156 | { | |
157 | ql_write32(qdev, SEM, sem_mask); | |
158 | ql_read32(qdev, SEM); /* flush */ | |
159 | } | |
160 | ||
161 | /* This function waits for a specific bit to come ready | |
162 | * in a given register. It is used mostly by the initialize | |
163 | * process, but is also used in kernel thread API such as | |
164 | * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. | |
165 | */ | |
166 | int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) | |
167 | { | |
168 | u32 temp; | |
169 | int count = UDELAY_COUNT; | |
170 | ||
171 | while (count) { | |
172 | temp = ql_read32(qdev, reg); | |
173 | ||
174 | /* check for errors */ | |
175 | if (temp & err_bit) { | |
ae9540f7 JP |
176 | netif_alert(qdev, probe, qdev->ndev, |
177 | "register 0x%.08x access error, value = 0x%.08x!.\n", | |
178 | reg, temp); | |
c4e84bde RM |
179 | return -EIO; |
180 | } else if (temp & bit) | |
181 | return 0; | |
182 | udelay(UDELAY_DELAY); | |
183 | count--; | |
184 | } | |
ae9540f7 JP |
185 | netif_alert(qdev, probe, qdev->ndev, |
186 | "Timed out waiting for reg %x to come ready.\n", reg); | |
c4e84bde RM |
187 | return -ETIMEDOUT; |
188 | } | |
189 | ||
190 | /* The CFG register is used to download TX and RX control blocks | |
191 | * to the chip. This function waits for an operation to complete. | |
192 | */ | |
193 | static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) | |
194 | { | |
195 | int count = UDELAY_COUNT; | |
196 | u32 temp; | |
197 | ||
198 | while (count) { | |
199 | temp = ql_read32(qdev, CFG); | |
200 | if (temp & CFG_LE) | |
201 | return -EIO; | |
202 | if (!(temp & bit)) | |
203 | return 0; | |
204 | udelay(UDELAY_DELAY); | |
205 | count--; | |
206 | } | |
207 | return -ETIMEDOUT; | |
208 | } | |
209 | ||
210 | ||
211 | /* Used to issue init control blocks to hw. Maps control block, | |
212 | * sets address, triggers download, waits for completion. | |
213 | */ | |
214 | int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, | |
215 | u16 q_id) | |
216 | { | |
217 | u64 map; | |
218 | int status = 0; | |
219 | int direction; | |
220 | u32 mask; | |
221 | u32 value; | |
222 | ||
223 | direction = | |
224 | (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE : | |
225 | PCI_DMA_FROMDEVICE; | |
226 | ||
227 | map = pci_map_single(qdev->pdev, ptr, size, direction); | |
228 | if (pci_dma_mapping_error(qdev->pdev, map)) { | |
ae9540f7 | 229 | netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n"); |
c4e84bde RM |
230 | return -ENOMEM; |
231 | } | |
232 | ||
4322c5be RM |
233 | status = ql_sem_spinlock(qdev, SEM_ICB_MASK); |
234 | if (status) | |
235 | return status; | |
236 | ||
c4e84bde RM |
237 | status = ql_wait_cfg(qdev, bit); |
238 | if (status) { | |
ae9540f7 JP |
239 | netif_err(qdev, ifup, qdev->ndev, |
240 | "Timed out waiting for CFG to come ready.\n"); | |
c4e84bde RM |
241 | goto exit; |
242 | } | |
243 | ||
c4e84bde RM |
244 | ql_write32(qdev, ICB_L, (u32) map); |
245 | ql_write32(qdev, ICB_H, (u32) (map >> 32)); | |
c4e84bde RM |
246 | |
247 | mask = CFG_Q_MASK | (bit << 16); | |
248 | value = bit | (q_id << CFG_Q_SHIFT); | |
249 | ql_write32(qdev, CFG, (mask | value)); | |
250 | ||
251 | /* | |
252 | * Wait for the bit to clear after signaling hw. | |
253 | */ | |
254 | status = ql_wait_cfg(qdev, bit); | |
255 | exit: | |
4322c5be | 256 | ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ |
c4e84bde RM |
257 | pci_unmap_single(qdev->pdev, map, size, direction); |
258 | return status; | |
259 | } | |
260 | ||
261 | /* Get a specific MAC address from the CAM. Used for debug and reg dump. */ | |
262 | int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, | |
263 | u32 *value) | |
264 | { | |
265 | u32 offset = 0; | |
266 | int status; | |
267 | ||
c4e84bde RM |
268 | switch (type) { |
269 | case MAC_ADDR_TYPE_MULTI_MAC: | |
270 | case MAC_ADDR_TYPE_CAM_MAC: | |
271 | { | |
272 | status = | |
273 | ql_wait_reg_rdy(qdev, | |
939678f8 | 274 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
275 | if (status) |
276 | goto exit; | |
277 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
278 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
279 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | |
280 | status = | |
281 | ql_wait_reg_rdy(qdev, | |
939678f8 | 282 | MAC_ADDR_IDX, MAC_ADDR_MR, 0); |
c4e84bde RM |
283 | if (status) |
284 | goto exit; | |
285 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | |
286 | status = | |
287 | ql_wait_reg_rdy(qdev, | |
939678f8 | 288 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
289 | if (status) |
290 | goto exit; | |
291 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
292 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
293 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | |
294 | status = | |
295 | ql_wait_reg_rdy(qdev, | |
939678f8 | 296 | MAC_ADDR_IDX, MAC_ADDR_MR, 0); |
c4e84bde RM |
297 | if (status) |
298 | goto exit; | |
299 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | |
300 | if (type == MAC_ADDR_TYPE_CAM_MAC) { | |
301 | status = | |
302 | ql_wait_reg_rdy(qdev, | |
939678f8 | 303 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
304 | if (status) |
305 | goto exit; | |
306 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
307 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
308 | MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */ | |
309 | status = | |
310 | ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, | |
939678f8 | 311 | MAC_ADDR_MR, 0); |
c4e84bde RM |
312 | if (status) |
313 | goto exit; | |
314 | *value++ = ql_read32(qdev, MAC_ADDR_DATA); | |
315 | } | |
316 | break; | |
317 | } | |
318 | case MAC_ADDR_TYPE_VLAN: | |
319 | case MAC_ADDR_TYPE_MULTI_FLTR: | |
320 | default: | |
ae9540f7 JP |
321 | netif_crit(qdev, ifup, qdev->ndev, |
322 | "Address type %d not yet supported.\n", type); | |
c4e84bde RM |
323 | status = -EPERM; |
324 | } | |
325 | exit: | |
c4e84bde RM |
326 | return status; |
327 | } | |
328 | ||
329 | /* Set up a MAC, multicast or VLAN address for the | |
330 | * inbound frame matching. | |
331 | */ | |
332 | static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, | |
333 | u16 index) | |
334 | { | |
335 | u32 offset = 0; | |
336 | int status = 0; | |
337 | ||
c4e84bde RM |
338 | switch (type) { |
339 | case MAC_ADDR_TYPE_MULTI_MAC: | |
76b26694 RM |
340 | { |
341 | u32 upper = (addr[0] << 8) | addr[1]; | |
342 | u32 lower = (addr[2] << 24) | (addr[3] << 16) | | |
343 | (addr[4] << 8) | (addr[5]); | |
344 | ||
345 | status = | |
346 | ql_wait_reg_rdy(qdev, | |
347 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | |
348 | if (status) | |
349 | goto exit; | |
350 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | | |
351 | (index << MAC_ADDR_IDX_SHIFT) | | |
352 | type | MAC_ADDR_E); | |
353 | ql_write32(qdev, MAC_ADDR_DATA, lower); | |
354 | status = | |
355 | ql_wait_reg_rdy(qdev, | |
356 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | |
357 | if (status) | |
358 | goto exit; | |
359 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | | |
360 | (index << MAC_ADDR_IDX_SHIFT) | | |
361 | type | MAC_ADDR_E); | |
362 | ||
363 | ql_write32(qdev, MAC_ADDR_DATA, upper); | |
364 | status = | |
365 | ql_wait_reg_rdy(qdev, | |
366 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); | |
367 | if (status) | |
368 | goto exit; | |
369 | break; | |
370 | } | |
c4e84bde RM |
371 | case MAC_ADDR_TYPE_CAM_MAC: |
372 | { | |
373 | u32 cam_output; | |
374 | u32 upper = (addr[0] << 8) | addr[1]; | |
375 | u32 lower = | |
376 | (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | | |
377 | (addr[5]); | |
c4e84bde RM |
378 | status = |
379 | ql_wait_reg_rdy(qdev, | |
939678f8 | 380 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
381 | if (status) |
382 | goto exit; | |
383 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
384 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
385 | type); /* type */ | |
386 | ql_write32(qdev, MAC_ADDR_DATA, lower); | |
387 | status = | |
388 | ql_wait_reg_rdy(qdev, | |
939678f8 | 389 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
390 | if (status) |
391 | goto exit; | |
392 | ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */ | |
393 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
394 | type); /* type */ | |
395 | ql_write32(qdev, MAC_ADDR_DATA, upper); | |
396 | status = | |
397 | ql_wait_reg_rdy(qdev, | |
939678f8 | 398 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
399 | if (status) |
400 | goto exit; | |
401 | ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */ | |
402 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
403 | type); /* type */ | |
404 | /* This field should also include the queue id | |
405 | and possibly the function id. Right now we hardcode | |
406 | the route field to NIC core. | |
407 | */ | |
76b26694 RM |
408 | cam_output = (CAM_OUT_ROUTE_NIC | |
409 | (qdev-> | |
410 | func << CAM_OUT_FUNC_SHIFT) | | |
411 | (0 << CAM_OUT_CQ_ID_SHIFT)); | |
18c49b91 | 412 | if (qdev->ndev->features & NETIF_F_HW_VLAN_RX) |
76b26694 RM |
413 | cam_output |= CAM_OUT_RV; |
414 | /* route to NIC core */ | |
415 | ql_write32(qdev, MAC_ADDR_DATA, cam_output); | |
c4e84bde RM |
416 | break; |
417 | } | |
418 | case MAC_ADDR_TYPE_VLAN: | |
419 | { | |
420 | u32 enable_bit = *((u32 *) &addr[0]); | |
421 | /* For VLAN, the addr actually holds a bit that | |
422 | * either enables or disables the vlan id we are | |
423 | * addressing. It's either MAC_ADDR_E on or off. | |
424 | * That's bit-27 we're talking about. | |
425 | */ | |
c4e84bde RM |
426 | status = |
427 | ql_wait_reg_rdy(qdev, | |
939678f8 | 428 | MAC_ADDR_IDX, MAC_ADDR_MW, 0); |
c4e84bde RM |
429 | if (status) |
430 | goto exit; | |
431 | ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */ | |
432 | (index << MAC_ADDR_IDX_SHIFT) | /* index */ | |
433 | type | /* type */ | |
434 | enable_bit); /* enable/disable */ | |
435 | break; | |
436 | } | |
437 | case MAC_ADDR_TYPE_MULTI_FLTR: | |
438 | default: | |
ae9540f7 JP |
439 | netif_crit(qdev, ifup, qdev->ndev, |
440 | "Address type %d not yet supported.\n", type); | |
c4e84bde RM |
441 | status = -EPERM; |
442 | } | |
443 | exit: | |
c4e84bde RM |
444 | return status; |
445 | } | |
446 | ||
7fab3bfe RM |
447 | /* Set or clear MAC address in hardware. We sometimes |
448 | * have to clear it to prevent wrong frame routing | |
449 | * especially in a bonding environment. | |
450 | */ | |
451 | static int ql_set_mac_addr(struct ql_adapter *qdev, int set) | |
452 | { | |
453 | int status; | |
454 | char zero_mac_addr[ETH_ALEN]; | |
455 | char *addr; | |
456 | ||
457 | if (set) { | |
801e9096 | 458 | addr = &qdev->current_mac_addr[0]; |
ae9540f7 JP |
459 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
460 | "Set Mac addr %pM\n", addr); | |
7fab3bfe RM |
461 | } else { |
462 | memset(zero_mac_addr, 0, ETH_ALEN); | |
463 | addr = &zero_mac_addr[0]; | |
ae9540f7 JP |
464 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
465 | "Clearing MAC address\n"); | |
7fab3bfe RM |
466 | } |
467 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | |
468 | if (status) | |
469 | return status; | |
470 | status = ql_set_mac_addr_reg(qdev, (u8 *) addr, | |
471 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | |
472 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | |
473 | if (status) | |
ae9540f7 JP |
474 | netif_err(qdev, ifup, qdev->ndev, |
475 | "Failed to init mac address.\n"); | |
7fab3bfe RM |
476 | return status; |
477 | } | |
478 | ||
6a473308 RM |
479 | void ql_link_on(struct ql_adapter *qdev) |
480 | { | |
ae9540f7 | 481 | netif_err(qdev, link, qdev->ndev, "Link is up.\n"); |
6a473308 RM |
482 | netif_carrier_on(qdev->ndev); |
483 | ql_set_mac_addr(qdev, 1); | |
484 | } | |
485 | ||
486 | void ql_link_off(struct ql_adapter *qdev) | |
487 | { | |
ae9540f7 | 488 | netif_err(qdev, link, qdev->ndev, "Link is down.\n"); |
6a473308 RM |
489 | netif_carrier_off(qdev->ndev); |
490 | ql_set_mac_addr(qdev, 0); | |
491 | } | |
492 | ||
c4e84bde RM |
493 | /* Get a specific frame routing value from the CAM. |
494 | * Used for debug and reg dump. | |
495 | */ | |
496 | int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) | |
497 | { | |
498 | int status = 0; | |
499 | ||
939678f8 | 500 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); |
c4e84bde RM |
501 | if (status) |
502 | goto exit; | |
503 | ||
504 | ql_write32(qdev, RT_IDX, | |
505 | RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); | |
939678f8 | 506 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); |
c4e84bde RM |
507 | if (status) |
508 | goto exit; | |
509 | *value = ql_read32(qdev, RT_DATA); | |
510 | exit: | |
c4e84bde RM |
511 | return status; |
512 | } | |
513 | ||
514 | /* The NIC function for this chip has 16 routing indexes. Each one can be used | |
515 | * to route different frame types to various inbound queues. We send broadcast/ | |
516 | * multicast/error frames to the default queue for slow handling, | |
517 | * and CAM hit/RSS frames to the fast handling queues. | |
518 | */ | |
519 | static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, | |
520 | int enable) | |
521 | { | |
8587ea35 | 522 | int status = -EINVAL; /* Return error if no mask match. */ |
c4e84bde RM |
523 | u32 value = 0; |
524 | ||
c4e84bde RM |
525 | switch (mask) { |
526 | case RT_IDX_CAM_HIT: | |
527 | { | |
528 | value = RT_IDX_DST_CAM_Q | /* dest */ | |
529 | RT_IDX_TYPE_NICQ | /* type */ | |
530 | (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
531 | break; | |
532 | } | |
533 | case RT_IDX_VALID: /* Promiscuous Mode frames. */ | |
534 | { | |
535 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
536 | RT_IDX_TYPE_NICQ | /* type */ | |
537 | (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
538 | break; | |
539 | } | |
540 | case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */ | |
541 | { | |
542 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
543 | RT_IDX_TYPE_NICQ | /* type */ | |
544 | (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
545 | break; | |
546 | } | |
fbc2ac33 RM |
547 | case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */ |
548 | { | |
549 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
550 | RT_IDX_TYPE_NICQ | /* type */ | |
551 | (RT_IDX_IP_CSUM_ERR_SLOT << | |
552 | RT_IDX_IDX_SHIFT); /* index */ | |
553 | break; | |
554 | } | |
555 | case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */ | |
556 | { | |
557 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
558 | RT_IDX_TYPE_NICQ | /* type */ | |
559 | (RT_IDX_TCP_UDP_CSUM_ERR_SLOT << | |
560 | RT_IDX_IDX_SHIFT); /* index */ | |
561 | break; | |
562 | } | |
c4e84bde RM |
563 | case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */ |
564 | { | |
565 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
566 | RT_IDX_TYPE_NICQ | /* type */ | |
567 | (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
568 | break; | |
569 | } | |
570 | case RT_IDX_MCAST: /* Pass up All Multicast frames. */ | |
571 | { | |
e163d7f2 | 572 | value = RT_IDX_DST_DFLT_Q | /* dest */ |
c4e84bde RM |
573 | RT_IDX_TYPE_NICQ | /* type */ |
574 | (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
575 | break; | |
576 | } | |
577 | case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ | |
578 | { | |
e163d7f2 | 579 | value = RT_IDX_DST_DFLT_Q | /* dest */ |
c4e84bde RM |
580 | RT_IDX_TYPE_NICQ | /* type */ |
581 | (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
582 | break; | |
583 | } | |
584 | case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */ | |
585 | { | |
586 | value = RT_IDX_DST_RSS | /* dest */ | |
587 | RT_IDX_TYPE_NICQ | /* type */ | |
588 | (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ | |
589 | break; | |
590 | } | |
591 | case 0: /* Clear the E-bit on an entry. */ | |
592 | { | |
593 | value = RT_IDX_DST_DFLT_Q | /* dest */ | |
594 | RT_IDX_TYPE_NICQ | /* type */ | |
595 | (index << RT_IDX_IDX_SHIFT);/* index */ | |
596 | break; | |
597 | } | |
598 | default: | |
ae9540f7 JP |
599 | netif_err(qdev, ifup, qdev->ndev, |
600 | "Mask type %d not yet supported.\n", mask); | |
c4e84bde RM |
601 | status = -EPERM; |
602 | goto exit; | |
603 | } | |
604 | ||
605 | if (value) { | |
606 | status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); | |
607 | if (status) | |
608 | goto exit; | |
609 | value |= (enable ? RT_IDX_E : 0); | |
610 | ql_write32(qdev, RT_IDX, value); | |
611 | ql_write32(qdev, RT_DATA, enable ? mask : 0); | |
612 | } | |
613 | exit: | |
c4e84bde RM |
614 | return status; |
615 | } | |
616 | ||
617 | static void ql_enable_interrupts(struct ql_adapter *qdev) | |
618 | { | |
619 | ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); | |
620 | } | |
621 | ||
622 | static void ql_disable_interrupts(struct ql_adapter *qdev) | |
623 | { | |
624 | ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); | |
625 | } | |
626 | ||
627 | /* If we're running with multiple MSI-X vectors then we enable on the fly. | |
628 | * Otherwise, we may have multiple outstanding workers and don't want to | |
629 | * enable until the last one finishes. In this case, the irq_cnt gets | |
25985edc | 630 | * incremented every time we queue a worker and decremented every time |
c4e84bde RM |
631 | * a worker finishes. Once it hits zero we enable the interrupt. |
632 | */ | |
bb0d215c | 633 | u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) |
c4e84bde | 634 | { |
bb0d215c RM |
635 | u32 var = 0; |
636 | unsigned long hw_flags = 0; | |
637 | struct intr_context *ctx = qdev->intr_context + intr; | |
638 | ||
639 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { | |
640 | /* Always enable if we're MSIX multi interrupts and | |
641 | * it's not the default (zeroeth) interrupt. | |
642 | */ | |
c4e84bde | 643 | ql_write32(qdev, INTR_EN, |
bb0d215c RM |
644 | ctx->intr_en_mask); |
645 | var = ql_read32(qdev, STS); | |
646 | return var; | |
c4e84bde | 647 | } |
bb0d215c RM |
648 | |
649 | spin_lock_irqsave(&qdev->hw_lock, hw_flags); | |
650 | if (atomic_dec_and_test(&ctx->irq_cnt)) { | |
651 | ql_write32(qdev, INTR_EN, | |
652 | ctx->intr_en_mask); | |
653 | var = ql_read32(qdev, STS); | |
654 | } | |
655 | spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); | |
656 | return var; | |
c4e84bde RM |
657 | } |
658 | ||
659 | static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) | |
660 | { | |
661 | u32 var = 0; | |
bb0d215c | 662 | struct intr_context *ctx; |
c4e84bde | 663 | |
bb0d215c RM |
664 | /* HW disables for us if we're MSIX multi interrupts and |
665 | * it's not the default (zeroeth) interrupt. | |
666 | */ | |
667 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) | |
668 | return 0; | |
669 | ||
670 | ctx = qdev->intr_context + intr; | |
08b1bc8f | 671 | spin_lock(&qdev->hw_lock); |
bb0d215c | 672 | if (!atomic_read(&ctx->irq_cnt)) { |
c4e84bde | 673 | ql_write32(qdev, INTR_EN, |
bb0d215c | 674 | ctx->intr_dis_mask); |
c4e84bde RM |
675 | var = ql_read32(qdev, STS); |
676 | } | |
bb0d215c | 677 | atomic_inc(&ctx->irq_cnt); |
08b1bc8f | 678 | spin_unlock(&qdev->hw_lock); |
c4e84bde RM |
679 | return var; |
680 | } | |
681 | ||
682 | static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) | |
683 | { | |
684 | int i; | |
685 | for (i = 0; i < qdev->intr_count; i++) { | |
686 | /* The enable call does a atomic_dec_and_test | |
687 | * and enables only if the result is zero. | |
688 | * So we precharge it here. | |
689 | */ | |
bb0d215c RM |
690 | if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || |
691 | i == 0)) | |
692 | atomic_set(&qdev->intr_context[i].irq_cnt, 1); | |
c4e84bde RM |
693 | ql_enable_completion_interrupt(qdev, i); |
694 | } | |
695 | ||
696 | } | |
697 | ||
b0c2aadf RM |
698 | static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) |
699 | { | |
700 | int status, i; | |
701 | u16 csum = 0; | |
702 | __le16 *flash = (__le16 *)&qdev->flash; | |
703 | ||
704 | status = strncmp((char *)&qdev->flash, str, 4); | |
705 | if (status) { | |
ae9540f7 | 706 | netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n"); |
b0c2aadf RM |
707 | return status; |
708 | } | |
709 | ||
710 | for (i = 0; i < size; i++) | |
711 | csum += le16_to_cpu(*flash++); | |
712 | ||
713 | if (csum) | |
ae9540f7 JP |
714 | netif_err(qdev, ifup, qdev->ndev, |
715 | "Invalid flash checksum, csum = 0x%.04x.\n", csum); | |
b0c2aadf RM |
716 | |
717 | return csum; | |
718 | } | |
719 | ||
26351479 | 720 | static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) |
c4e84bde RM |
721 | { |
722 | int status = 0; | |
723 | /* wait for reg to come ready */ | |
724 | status = ql_wait_reg_rdy(qdev, | |
725 | FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); | |
726 | if (status) | |
727 | goto exit; | |
728 | /* set up for reg read */ | |
729 | ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); | |
730 | /* wait for reg to come ready */ | |
731 | status = ql_wait_reg_rdy(qdev, | |
732 | FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); | |
733 | if (status) | |
734 | goto exit; | |
26351479 RM |
735 | /* This data is stored on flash as an array of |
736 | * __le32. Since ql_read32() returns cpu endian | |
737 | * we need to swap it back. | |
738 | */ | |
739 | *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); | |
c4e84bde RM |
740 | exit: |
741 | return status; | |
742 | } | |
743 | ||
cdca8d02 RM |
744 | static int ql_get_8000_flash_params(struct ql_adapter *qdev) |
745 | { | |
746 | u32 i, size; | |
747 | int status; | |
748 | __le32 *p = (__le32 *)&qdev->flash; | |
749 | u32 offset; | |
542512e4 | 750 | u8 mac_addr[6]; |
cdca8d02 RM |
751 | |
752 | /* Get flash offset for function and adjust | |
753 | * for dword access. | |
754 | */ | |
e4552f51 | 755 | if (!qdev->port) |
cdca8d02 RM |
756 | offset = FUNC0_FLASH_OFFSET / sizeof(u32); |
757 | else | |
758 | offset = FUNC1_FLASH_OFFSET / sizeof(u32); | |
759 | ||
760 | if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) | |
761 | return -ETIMEDOUT; | |
762 | ||
763 | size = sizeof(struct flash_params_8000) / sizeof(u32); | |
764 | for (i = 0; i < size; i++, p++) { | |
765 | status = ql_read_flash_word(qdev, i+offset, p); | |
766 | if (status) { | |
ae9540f7 JP |
767 | netif_err(qdev, ifup, qdev->ndev, |
768 | "Error reading flash.\n"); | |
cdca8d02 RM |
769 | goto exit; |
770 | } | |
771 | } | |
772 | ||
773 | status = ql_validate_flash(qdev, | |
774 | sizeof(struct flash_params_8000) / sizeof(u16), | |
775 | "8000"); | |
776 | if (status) { | |
ae9540f7 | 777 | netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); |
cdca8d02 RM |
778 | status = -EINVAL; |
779 | goto exit; | |
780 | } | |
781 | ||
542512e4 RM |
782 | /* Extract either manufacturer or BOFM modified |
783 | * MAC address. | |
784 | */ | |
785 | if (qdev->flash.flash_params_8000.data_type1 == 2) | |
786 | memcpy(mac_addr, | |
787 | qdev->flash.flash_params_8000.mac_addr1, | |
788 | qdev->ndev->addr_len); | |
789 | else | |
790 | memcpy(mac_addr, | |
791 | qdev->flash.flash_params_8000.mac_addr, | |
792 | qdev->ndev->addr_len); | |
793 | ||
794 | if (!is_valid_ether_addr(mac_addr)) { | |
ae9540f7 | 795 | netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n"); |
cdca8d02 RM |
796 | status = -EINVAL; |
797 | goto exit; | |
798 | } | |
799 | ||
800 | memcpy(qdev->ndev->dev_addr, | |
542512e4 | 801 | mac_addr, |
cdca8d02 RM |
802 | qdev->ndev->addr_len); |
803 | ||
804 | exit: | |
805 | ql_sem_unlock(qdev, SEM_FLASH_MASK); | |
806 | return status; | |
807 | } | |
808 | ||
b0c2aadf | 809 | static int ql_get_8012_flash_params(struct ql_adapter *qdev) |
c4e84bde RM |
810 | { |
811 | int i; | |
812 | int status; | |
26351479 | 813 | __le32 *p = (__le32 *)&qdev->flash; |
e78f5fa7 | 814 | u32 offset = 0; |
b0c2aadf | 815 | u32 size = sizeof(struct flash_params_8012) / sizeof(u32); |
e78f5fa7 RM |
816 | |
817 | /* Second function's parameters follow the first | |
818 | * function's. | |
819 | */ | |
e4552f51 | 820 | if (qdev->port) |
b0c2aadf | 821 | offset = size; |
c4e84bde RM |
822 | |
823 | if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) | |
824 | return -ETIMEDOUT; | |
825 | ||
b0c2aadf | 826 | for (i = 0; i < size; i++, p++) { |
e78f5fa7 | 827 | status = ql_read_flash_word(qdev, i+offset, p); |
c4e84bde | 828 | if (status) { |
ae9540f7 JP |
829 | netif_err(qdev, ifup, qdev->ndev, |
830 | "Error reading flash.\n"); | |
c4e84bde RM |
831 | goto exit; |
832 | } | |
833 | ||
834 | } | |
b0c2aadf RM |
835 | |
836 | status = ql_validate_flash(qdev, | |
837 | sizeof(struct flash_params_8012) / sizeof(u16), | |
838 | "8012"); | |
839 | if (status) { | |
ae9540f7 | 840 | netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); |
b0c2aadf RM |
841 | status = -EINVAL; |
842 | goto exit; | |
843 | } | |
844 | ||
845 | if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) { | |
846 | status = -EINVAL; | |
847 | goto exit; | |
848 | } | |
849 | ||
850 | memcpy(qdev->ndev->dev_addr, | |
851 | qdev->flash.flash_params_8012.mac_addr, | |
852 | qdev->ndev->addr_len); | |
853 | ||
c4e84bde RM |
854 | exit: |
855 | ql_sem_unlock(qdev, SEM_FLASH_MASK); | |
856 | return status; | |
857 | } | |
858 | ||
859 | /* xgmac register are located behind the xgmac_addr and xgmac_data | |
860 | * register pair. Each read/write requires us to wait for the ready | |
861 | * bit before reading/writing the data. | |
862 | */ | |
863 | static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) | |
864 | { | |
865 | int status; | |
866 | /* wait for reg to come ready */ | |
867 | status = ql_wait_reg_rdy(qdev, | |
868 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | |
869 | if (status) | |
870 | return status; | |
871 | /* write the data to the data reg */ | |
872 | ql_write32(qdev, XGMAC_DATA, data); | |
873 | /* trigger the write */ | |
874 | ql_write32(qdev, XGMAC_ADDR, reg); | |
875 | return status; | |
876 | } | |
877 | ||
878 | /* xgmac register are located behind the xgmac_addr and xgmac_data | |
879 | * register pair. Each read/write requires us to wait for the ready | |
880 | * bit before reading/writing the data. | |
881 | */ | |
882 | int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) | |
883 | { | |
884 | int status = 0; | |
885 | /* wait for reg to come ready */ | |
886 | status = ql_wait_reg_rdy(qdev, | |
887 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | |
888 | if (status) | |
889 | goto exit; | |
890 | /* set up for reg read */ | |
891 | ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); | |
892 | /* wait for reg to come ready */ | |
893 | status = ql_wait_reg_rdy(qdev, | |
894 | XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); | |
895 | if (status) | |
896 | goto exit; | |
897 | /* get the data */ | |
898 | *data = ql_read32(qdev, XGMAC_DATA); | |
899 | exit: | |
900 | return status; | |
901 | } | |
902 | ||
903 | /* This is used for reading the 64-bit statistics regs. */ | |
904 | int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) | |
905 | { | |
906 | int status = 0; | |
907 | u32 hi = 0; | |
908 | u32 lo = 0; | |
909 | ||
910 | status = ql_read_xgmac_reg(qdev, reg, &lo); | |
911 | if (status) | |
912 | goto exit; | |
913 | ||
914 | status = ql_read_xgmac_reg(qdev, reg + 4, &hi); | |
915 | if (status) | |
916 | goto exit; | |
917 | ||
918 | *data = (u64) lo | ((u64) hi << 32); | |
919 | ||
920 | exit: | |
921 | return status; | |
922 | } | |
923 | ||
cdca8d02 RM |
924 | static int ql_8000_port_initialize(struct ql_adapter *qdev) |
925 | { | |
bcc2cb3b | 926 | int status; |
cfec0cbc RM |
927 | /* |
928 | * Get MPI firmware version for driver banner | |
929 | * and ethool info. | |
930 | */ | |
931 | status = ql_mb_about_fw(qdev); | |
932 | if (status) | |
933 | goto exit; | |
bcc2cb3b RM |
934 | status = ql_mb_get_fw_state(qdev); |
935 | if (status) | |
936 | goto exit; | |
937 | /* Wake up a worker to get/set the TX/RX frame sizes. */ | |
938 | queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0); | |
939 | exit: | |
940 | return status; | |
cdca8d02 RM |
941 | } |
942 | ||
c4e84bde RM |
943 | /* Take the MAC Core out of reset. |
944 | * Enable statistics counting. | |
945 | * Take the transmitter/receiver out of reset. | |
946 | * This functionality may be done in the MPI firmware at a | |
947 | * later date. | |
948 | */ | |
b0c2aadf | 949 | static int ql_8012_port_initialize(struct ql_adapter *qdev) |
c4e84bde RM |
950 | { |
951 | int status = 0; | |
952 | u32 data; | |
953 | ||
954 | if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { | |
955 | /* Another function has the semaphore, so | |
956 | * wait for the port init bit to come ready. | |
957 | */ | |
ae9540f7 JP |
958 | netif_info(qdev, link, qdev->ndev, |
959 | "Another function has the semaphore, so wait for the port init bit to come ready.\n"); | |
c4e84bde RM |
960 | status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); |
961 | if (status) { | |
ae9540f7 JP |
962 | netif_crit(qdev, link, qdev->ndev, |
963 | "Port initialize timed out.\n"); | |
c4e84bde RM |
964 | } |
965 | return status; | |
966 | } | |
967 | ||
ae9540f7 | 968 | netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); |
c4e84bde RM |
969 | /* Set the core reset. */ |
970 | status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); | |
971 | if (status) | |
972 | goto end; | |
973 | data |= GLOBAL_CFG_RESET; | |
974 | status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); | |
975 | if (status) | |
976 | goto end; | |
977 | ||
978 | /* Clear the core reset and turn on jumbo for receiver. */ | |
979 | data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */ | |
980 | data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ | |
981 | data |= GLOBAL_CFG_TX_STAT_EN; | |
982 | data |= GLOBAL_CFG_RX_STAT_EN; | |
983 | status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); | |
984 | if (status) | |
985 | goto end; | |
986 | ||
987 | /* Enable transmitter, and clear it's reset. */ | |
988 | status = ql_read_xgmac_reg(qdev, TX_CFG, &data); | |
989 | if (status) | |
990 | goto end; | |
991 | data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ | |
992 | data |= TX_CFG_EN; /* Enable the transmitter. */ | |
993 | status = ql_write_xgmac_reg(qdev, TX_CFG, data); | |
994 | if (status) | |
995 | goto end; | |
996 | ||
997 | /* Enable receiver and clear it's reset. */ | |
998 | status = ql_read_xgmac_reg(qdev, RX_CFG, &data); | |
999 | if (status) | |
1000 | goto end; | |
1001 | data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ | |
1002 | data |= RX_CFG_EN; /* Enable the receiver. */ | |
1003 | status = ql_write_xgmac_reg(qdev, RX_CFG, data); | |
1004 | if (status) | |
1005 | goto end; | |
1006 | ||
1007 | /* Turn on jumbo. */ | |
1008 | status = | |
1009 | ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); | |
1010 | if (status) | |
1011 | goto end; | |
1012 | status = | |
1013 | ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); | |
1014 | if (status) | |
1015 | goto end; | |
1016 | ||
1017 | /* Signal to the world that the port is enabled. */ | |
1018 | ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); | |
1019 | end: | |
1020 | ql_sem_unlock(qdev, qdev->xg_sem_mask); | |
1021 | return status; | |
1022 | } | |
1023 | ||
7c734359 RM |
1024 | static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) |
1025 | { | |
1026 | return PAGE_SIZE << qdev->lbq_buf_order; | |
1027 | } | |
1028 | ||
c4e84bde | 1029 | /* Get the next large buffer. */ |
8668ae92 | 1030 | static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) |
c4e84bde RM |
1031 | { |
1032 | struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; | |
1033 | rx_ring->lbq_curr_idx++; | |
1034 | if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) | |
1035 | rx_ring->lbq_curr_idx = 0; | |
1036 | rx_ring->lbq_free_cnt++; | |
1037 | return lbq_desc; | |
1038 | } | |
1039 | ||
7c734359 RM |
1040 | static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, |
1041 | struct rx_ring *rx_ring) | |
1042 | { | |
1043 | struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); | |
1044 | ||
1045 | pci_dma_sync_single_for_cpu(qdev->pdev, | |
64b9b41d | 1046 | dma_unmap_addr(lbq_desc, mapaddr), |
7c734359 RM |
1047 | rx_ring->lbq_buf_size, |
1048 | PCI_DMA_FROMDEVICE); | |
1049 | ||
1050 | /* If it's the last chunk of our master page then | |
1051 | * we unmap it. | |
1052 | */ | |
1053 | if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) | |
1054 | == ql_lbq_block_size(qdev)) | |
1055 | pci_unmap_page(qdev->pdev, | |
1056 | lbq_desc->p.pg_chunk.map, | |
1057 | ql_lbq_block_size(qdev), | |
1058 | PCI_DMA_FROMDEVICE); | |
1059 | return lbq_desc; | |
1060 | } | |
1061 | ||
c4e84bde | 1062 | /* Get the next small buffer. */ |
8668ae92 | 1063 | static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) |
c4e84bde RM |
1064 | { |
1065 | struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; | |
1066 | rx_ring->sbq_curr_idx++; | |
1067 | if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) | |
1068 | rx_ring->sbq_curr_idx = 0; | |
1069 | rx_ring->sbq_free_cnt++; | |
1070 | return sbq_desc; | |
1071 | } | |
1072 | ||
1073 | /* Update an rx ring index. */ | |
1074 | static void ql_update_cq(struct rx_ring *rx_ring) | |
1075 | { | |
1076 | rx_ring->cnsmr_idx++; | |
1077 | rx_ring->curr_entry++; | |
1078 | if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { | |
1079 | rx_ring->cnsmr_idx = 0; | |
1080 | rx_ring->curr_entry = rx_ring->cq_base; | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | static void ql_write_cq_idx(struct rx_ring *rx_ring) | |
1085 | { | |
1086 | ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); | |
1087 | } | |
1088 | ||
7c734359 RM |
1089 | static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, |
1090 | struct bq_desc *lbq_desc) | |
1091 | { | |
1092 | if (!rx_ring->pg_chunk.page) { | |
1093 | u64 map; | |
1094 | rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | | |
1095 | GFP_ATOMIC, | |
1096 | qdev->lbq_buf_order); | |
1097 | if (unlikely(!rx_ring->pg_chunk.page)) { | |
ae9540f7 JP |
1098 | netif_err(qdev, drv, qdev->ndev, |
1099 | "page allocation failed.\n"); | |
7c734359 RM |
1100 | return -ENOMEM; |
1101 | } | |
1102 | rx_ring->pg_chunk.offset = 0; | |
1103 | map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, | |
1104 | 0, ql_lbq_block_size(qdev), | |
1105 | PCI_DMA_FROMDEVICE); | |
1106 | if (pci_dma_mapping_error(qdev->pdev, map)) { | |
1107 | __free_pages(rx_ring->pg_chunk.page, | |
1108 | qdev->lbq_buf_order); | |
ae9540f7 JP |
1109 | netif_err(qdev, drv, qdev->ndev, |
1110 | "PCI mapping failed.\n"); | |
7c734359 RM |
1111 | return -ENOMEM; |
1112 | } | |
1113 | rx_ring->pg_chunk.map = map; | |
1114 | rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); | |
1115 | } | |
1116 | ||
1117 | /* Copy the current master pg_chunk info | |
1118 | * to the current descriptor. | |
1119 | */ | |
1120 | lbq_desc->p.pg_chunk = rx_ring->pg_chunk; | |
1121 | ||
1122 | /* Adjust the master page chunk for next | |
1123 | * buffer get. | |
1124 | */ | |
1125 | rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; | |
1126 | if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { | |
1127 | rx_ring->pg_chunk.page = NULL; | |
1128 | lbq_desc->p.pg_chunk.last_flag = 1; | |
1129 | } else { | |
1130 | rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; | |
1131 | get_page(rx_ring->pg_chunk.page); | |
1132 | lbq_desc->p.pg_chunk.last_flag = 0; | |
1133 | } | |
1134 | return 0; | |
1135 | } | |
c4e84bde RM |
1136 | /* Process (refill) a large buffer queue. */ |
1137 | static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
1138 | { | |
49f2186d RM |
1139 | u32 clean_idx = rx_ring->lbq_clean_idx; |
1140 | u32 start_idx = clean_idx; | |
c4e84bde | 1141 | struct bq_desc *lbq_desc; |
c4e84bde RM |
1142 | u64 map; |
1143 | int i; | |
1144 | ||
7c734359 | 1145 | while (rx_ring->lbq_free_cnt > 32) { |
81f25d96 | 1146 | for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { |
ae9540f7 JP |
1147 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1148 | "lbq: try cleaning clean_idx = %d.\n", | |
1149 | clean_idx); | |
c4e84bde | 1150 | lbq_desc = &rx_ring->lbq[clean_idx]; |
7c734359 | 1151 | if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { |
81f25d96 | 1152 | rx_ring->lbq_clean_idx = clean_idx; |
ae9540f7 | 1153 | netif_err(qdev, ifup, qdev->ndev, |
81f25d96 JK |
1154 | "Could not get a page chunk, i=%d, clean_idx =%d .\n", |
1155 | i, clean_idx); | |
ae9540f7 JP |
1156 | return; |
1157 | } | |
7c734359 RM |
1158 | |
1159 | map = lbq_desc->p.pg_chunk.map + | |
1160 | lbq_desc->p.pg_chunk.offset; | |
64b9b41d FT |
1161 | dma_unmap_addr_set(lbq_desc, mapaddr, map); |
1162 | dma_unmap_len_set(lbq_desc, maplen, | |
7c734359 | 1163 | rx_ring->lbq_buf_size); |
2c9a0d41 | 1164 | *lbq_desc->addr = cpu_to_le64(map); |
7c734359 RM |
1165 | |
1166 | pci_dma_sync_single_for_device(qdev->pdev, map, | |
1167 | rx_ring->lbq_buf_size, | |
1168 | PCI_DMA_FROMDEVICE); | |
c4e84bde RM |
1169 | clean_idx++; |
1170 | if (clean_idx == rx_ring->lbq_len) | |
1171 | clean_idx = 0; | |
1172 | } | |
1173 | ||
1174 | rx_ring->lbq_clean_idx = clean_idx; | |
1175 | rx_ring->lbq_prod_idx += 16; | |
1176 | if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) | |
1177 | rx_ring->lbq_prod_idx = 0; | |
49f2186d RM |
1178 | rx_ring->lbq_free_cnt -= 16; |
1179 | } | |
1180 | ||
1181 | if (start_idx != clean_idx) { | |
ae9540f7 JP |
1182 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1183 | "lbq: updating prod idx = %d.\n", | |
1184 | rx_ring->lbq_prod_idx); | |
c4e84bde RM |
1185 | ql_write_db_reg(rx_ring->lbq_prod_idx, |
1186 | rx_ring->lbq_prod_idx_db_reg); | |
c4e84bde RM |
1187 | } |
1188 | } | |
1189 | ||
1190 | /* Process (refill) a small buffer queue. */ | |
1191 | static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
1192 | { | |
49f2186d RM |
1193 | u32 clean_idx = rx_ring->sbq_clean_idx; |
1194 | u32 start_idx = clean_idx; | |
c4e84bde | 1195 | struct bq_desc *sbq_desc; |
c4e84bde RM |
1196 | u64 map; |
1197 | int i; | |
1198 | ||
1199 | while (rx_ring->sbq_free_cnt > 16) { | |
81f25d96 | 1200 | for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { |
c4e84bde | 1201 | sbq_desc = &rx_ring->sbq[clean_idx]; |
ae9540f7 JP |
1202 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1203 | "sbq: try cleaning clean_idx = %d.\n", | |
1204 | clean_idx); | |
c4e84bde | 1205 | if (sbq_desc->p.skb == NULL) { |
ae9540f7 JP |
1206 | netif_printk(qdev, rx_status, KERN_DEBUG, |
1207 | qdev->ndev, | |
1208 | "sbq: getting new skb for index %d.\n", | |
1209 | sbq_desc->index); | |
c4e84bde RM |
1210 | sbq_desc->p.skb = |
1211 | netdev_alloc_skb(qdev->ndev, | |
52e55f3c | 1212 | SMALL_BUFFER_SIZE); |
c4e84bde | 1213 | if (sbq_desc->p.skb == NULL) { |
ae9540f7 JP |
1214 | netif_err(qdev, probe, qdev->ndev, |
1215 | "Couldn't get an skb.\n"); | |
c4e84bde RM |
1216 | rx_ring->sbq_clean_idx = clean_idx; |
1217 | return; | |
1218 | } | |
1219 | skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); | |
1220 | map = pci_map_single(qdev->pdev, | |
1221 | sbq_desc->p.skb->data, | |
52e55f3c RM |
1222 | rx_ring->sbq_buf_size, |
1223 | PCI_DMA_FROMDEVICE); | |
c907a35a | 1224 | if (pci_dma_mapping_error(qdev->pdev, map)) { |
ae9540f7 JP |
1225 | netif_err(qdev, ifup, qdev->ndev, |
1226 | "PCI mapping failed.\n"); | |
c907a35a | 1227 | rx_ring->sbq_clean_idx = clean_idx; |
06a3d510 RM |
1228 | dev_kfree_skb_any(sbq_desc->p.skb); |
1229 | sbq_desc->p.skb = NULL; | |
c907a35a RM |
1230 | return; |
1231 | } | |
64b9b41d FT |
1232 | dma_unmap_addr_set(sbq_desc, mapaddr, map); |
1233 | dma_unmap_len_set(sbq_desc, maplen, | |
52e55f3c | 1234 | rx_ring->sbq_buf_size); |
2c9a0d41 | 1235 | *sbq_desc->addr = cpu_to_le64(map); |
c4e84bde RM |
1236 | } |
1237 | ||
1238 | clean_idx++; | |
1239 | if (clean_idx == rx_ring->sbq_len) | |
1240 | clean_idx = 0; | |
1241 | } | |
1242 | rx_ring->sbq_clean_idx = clean_idx; | |
1243 | rx_ring->sbq_prod_idx += 16; | |
1244 | if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) | |
1245 | rx_ring->sbq_prod_idx = 0; | |
49f2186d RM |
1246 | rx_ring->sbq_free_cnt -= 16; |
1247 | } | |
1248 | ||
1249 | if (start_idx != clean_idx) { | |
ae9540f7 JP |
1250 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1251 | "sbq: updating prod idx = %d.\n", | |
1252 | rx_ring->sbq_prod_idx); | |
c4e84bde RM |
1253 | ql_write_db_reg(rx_ring->sbq_prod_idx, |
1254 | rx_ring->sbq_prod_idx_db_reg); | |
c4e84bde RM |
1255 | } |
1256 | } | |
1257 | ||
1258 | static void ql_update_buffer_queues(struct ql_adapter *qdev, | |
1259 | struct rx_ring *rx_ring) | |
1260 | { | |
1261 | ql_update_sbq(qdev, rx_ring); | |
1262 | ql_update_lbq(qdev, rx_ring); | |
1263 | } | |
1264 | ||
1265 | /* Unmaps tx buffers. Can be called from send() if a pci mapping | |
1266 | * fails at some stage, or from the interrupt when a tx completes. | |
1267 | */ | |
1268 | static void ql_unmap_send(struct ql_adapter *qdev, | |
1269 | struct tx_ring_desc *tx_ring_desc, int mapped) | |
1270 | { | |
1271 | int i; | |
1272 | for (i = 0; i < mapped; i++) { | |
1273 | if (i == 0 || (i == 7 && mapped > 7)) { | |
1274 | /* | |
1275 | * Unmap the skb->data area, or the | |
1276 | * external sglist (AKA the Outbound | |
1277 | * Address List (OAL)). | |
1278 | * If its the zeroeth element, then it's | |
1279 | * the skb->data area. If it's the 7th | |
1280 | * element and there is more than 6 frags, | |
1281 | * then its an OAL. | |
1282 | */ | |
1283 | if (i == 7) { | |
ae9540f7 JP |
1284 | netif_printk(qdev, tx_done, KERN_DEBUG, |
1285 | qdev->ndev, | |
1286 | "unmapping OAL area.\n"); | |
c4e84bde RM |
1287 | } |
1288 | pci_unmap_single(qdev->pdev, | |
64b9b41d | 1289 | dma_unmap_addr(&tx_ring_desc->map[i], |
c4e84bde | 1290 | mapaddr), |
64b9b41d | 1291 | dma_unmap_len(&tx_ring_desc->map[i], |
c4e84bde RM |
1292 | maplen), |
1293 | PCI_DMA_TODEVICE); | |
1294 | } else { | |
ae9540f7 JP |
1295 | netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, |
1296 | "unmapping frag %d.\n", i); | |
c4e84bde | 1297 | pci_unmap_page(qdev->pdev, |
64b9b41d | 1298 | dma_unmap_addr(&tx_ring_desc->map[i], |
c4e84bde | 1299 | mapaddr), |
64b9b41d | 1300 | dma_unmap_len(&tx_ring_desc->map[i], |
c4e84bde RM |
1301 | maplen), PCI_DMA_TODEVICE); |
1302 | } | |
1303 | } | |
1304 | ||
1305 | } | |
1306 | ||
1307 | /* Map the buffers for this transmit. This will return | |
1308 | * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. | |
1309 | */ | |
1310 | static int ql_map_send(struct ql_adapter *qdev, | |
1311 | struct ob_mac_iocb_req *mac_iocb_ptr, | |
1312 | struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) | |
1313 | { | |
1314 | int len = skb_headlen(skb); | |
1315 | dma_addr_t map; | |
1316 | int frag_idx, err, map_idx = 0; | |
1317 | struct tx_buf_desc *tbd = mac_iocb_ptr->tbd; | |
1318 | int frag_cnt = skb_shinfo(skb)->nr_frags; | |
1319 | ||
1320 | if (frag_cnt) { | |
ae9540f7 JP |
1321 | netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, |
1322 | "frag_cnt = %d.\n", frag_cnt); | |
c4e84bde RM |
1323 | } |
1324 | /* | |
1325 | * Map the skb buffer first. | |
1326 | */ | |
1327 | map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); | |
1328 | ||
1329 | err = pci_dma_mapping_error(qdev->pdev, map); | |
1330 | if (err) { | |
ae9540f7 JP |
1331 | netif_err(qdev, tx_queued, qdev->ndev, |
1332 | "PCI mapping failed with error: %d\n", err); | |
c4e84bde RM |
1333 | |
1334 | return NETDEV_TX_BUSY; | |
1335 | } | |
1336 | ||
1337 | tbd->len = cpu_to_le32(len); | |
1338 | tbd->addr = cpu_to_le64(map); | |
64b9b41d FT |
1339 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); |
1340 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); | |
c4e84bde RM |
1341 | map_idx++; |
1342 | ||
1343 | /* | |
1344 | * This loop fills the remainder of the 8 address descriptors | |
1345 | * in the IOCB. If there are more than 7 fragments, then the | |
1346 | * eighth address desc will point to an external list (OAL). | |
1347 | * When this happens, the remainder of the frags will be stored | |
1348 | * in this list. | |
1349 | */ | |
1350 | for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) { | |
1351 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx]; | |
1352 | tbd++; | |
1353 | if (frag_idx == 6 && frag_cnt > 7) { | |
1354 | /* Let's tack on an sglist. | |
1355 | * Our control block will now | |
1356 | * look like this: | |
1357 | * iocb->seg[0] = skb->data | |
1358 | * iocb->seg[1] = frag[0] | |
1359 | * iocb->seg[2] = frag[1] | |
1360 | * iocb->seg[3] = frag[2] | |
1361 | * iocb->seg[4] = frag[3] | |
1362 | * iocb->seg[5] = frag[4] | |
1363 | * iocb->seg[6] = frag[5] | |
1364 | * iocb->seg[7] = ptr to OAL (external sglist) | |
1365 | * oal->seg[0] = frag[6] | |
1366 | * oal->seg[1] = frag[7] | |
1367 | * oal->seg[2] = frag[8] | |
1368 | * oal->seg[3] = frag[9] | |
1369 | * oal->seg[4] = frag[10] | |
1370 | * etc... | |
1371 | */ | |
1372 | /* Tack on the OAL in the eighth segment of IOCB. */ | |
1373 | map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, | |
1374 | sizeof(struct oal), | |
1375 | PCI_DMA_TODEVICE); | |
1376 | err = pci_dma_mapping_error(qdev->pdev, map); | |
1377 | if (err) { | |
ae9540f7 JP |
1378 | netif_err(qdev, tx_queued, qdev->ndev, |
1379 | "PCI mapping outbound address list with error: %d\n", | |
1380 | err); | |
c4e84bde RM |
1381 | goto map_error; |
1382 | } | |
1383 | ||
1384 | tbd->addr = cpu_to_le64(map); | |
1385 | /* | |
1386 | * The length is the number of fragments | |
1387 | * that remain to be mapped times the length | |
1388 | * of our sglist (OAL). | |
1389 | */ | |
1390 | tbd->len = | |
1391 | cpu_to_le32((sizeof(struct tx_buf_desc) * | |
1392 | (frag_cnt - frag_idx)) | TX_DESC_C); | |
64b9b41d | 1393 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, |
c4e84bde | 1394 | map); |
64b9b41d | 1395 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, |
c4e84bde RM |
1396 | sizeof(struct oal)); |
1397 | tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; | |
1398 | map_idx++; | |
1399 | } | |
1400 | ||
9e903e08 | 1401 | map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), |
5d6bcdfe | 1402 | DMA_TO_DEVICE); |
c4e84bde | 1403 | |
5d6bcdfe | 1404 | err = dma_mapping_error(&qdev->pdev->dev, map); |
c4e84bde | 1405 | if (err) { |
ae9540f7 JP |
1406 | netif_err(qdev, tx_queued, qdev->ndev, |
1407 | "PCI mapping frags failed with error: %d.\n", | |
1408 | err); | |
c4e84bde RM |
1409 | goto map_error; |
1410 | } | |
1411 | ||
1412 | tbd->addr = cpu_to_le64(map); | |
9e903e08 | 1413 | tbd->len = cpu_to_le32(skb_frag_size(frag)); |
64b9b41d FT |
1414 | dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); |
1415 | dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, | |
9e903e08 | 1416 | skb_frag_size(frag)); |
c4e84bde RM |
1417 | |
1418 | } | |
1419 | /* Save the number of segments we've mapped. */ | |
1420 | tx_ring_desc->map_cnt = map_idx; | |
1421 | /* Terminate the last segment. */ | |
1422 | tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E); | |
1423 | return NETDEV_TX_OK; | |
1424 | ||
1425 | map_error: | |
1426 | /* | |
1427 | * If the first frag mapping failed, then i will be zero. | |
1428 | * This causes the unmap of the skb->data area. Otherwise | |
1429 | * we pass in the number of frags that mapped successfully | |
1430 | * so they can be umapped. | |
1431 | */ | |
1432 | ql_unmap_send(qdev, tx_ring_desc, map_idx); | |
1433 | return NETDEV_TX_BUSY; | |
1434 | } | |
1435 | ||
63526713 RM |
1436 | /* Process an inbound completion from an rx ring. */ |
1437 | static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, | |
1438 | struct rx_ring *rx_ring, | |
1439 | struct ib_mac_iocb_rsp *ib_mac_rsp, | |
1440 | u32 length, | |
1441 | u16 vlan_id) | |
1442 | { | |
1443 | struct sk_buff *skb; | |
1444 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | |
63526713 RM |
1445 | struct napi_struct *napi = &rx_ring->napi; |
1446 | ||
1447 | napi->dev = qdev->ndev; | |
1448 | ||
1449 | skb = napi_get_frags(napi); | |
1450 | if (!skb) { | |
ae9540f7 JP |
1451 | netif_err(qdev, drv, qdev->ndev, |
1452 | "Couldn't get an skb, exiting.\n"); | |
63526713 RM |
1453 | rx_ring->rx_dropped++; |
1454 | put_page(lbq_desc->p.pg_chunk.page); | |
1455 | return; | |
1456 | } | |
1457 | prefetch(lbq_desc->p.pg_chunk.va); | |
da7ebfd7 IC |
1458 | __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, |
1459 | lbq_desc->p.pg_chunk.page, | |
1460 | lbq_desc->p.pg_chunk.offset, | |
1461 | length); | |
63526713 RM |
1462 | |
1463 | skb->len += length; | |
1464 | skb->data_len += length; | |
1465 | skb->truesize += length; | |
1466 | skb_shinfo(skb)->nr_frags++; | |
1467 | ||
1468 | rx_ring->rx_packets++; | |
1469 | rx_ring->rx_bytes += length; | |
1470 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1471 | skb_record_rx_queue(skb, rx_ring->cq_id); | |
18c49b91 JP |
1472 | if (vlan_id != 0xffff) |
1473 | __vlan_hwaccel_put_tag(skb, vlan_id); | |
1474 | napi_gro_frags(napi); | |
63526713 RM |
1475 | } |
1476 | ||
4f848c0a RM |
1477 | /* Process an inbound completion from an rx ring. */ |
1478 | static void ql_process_mac_rx_page(struct ql_adapter *qdev, | |
1479 | struct rx_ring *rx_ring, | |
1480 | struct ib_mac_iocb_rsp *ib_mac_rsp, | |
1481 | u32 length, | |
1482 | u16 vlan_id) | |
1483 | { | |
1484 | struct net_device *ndev = qdev->ndev; | |
1485 | struct sk_buff *skb = NULL; | |
1486 | void *addr; | |
1487 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | |
1488 | struct napi_struct *napi = &rx_ring->napi; | |
1489 | ||
1490 | skb = netdev_alloc_skb(ndev, length); | |
1491 | if (!skb) { | |
ae9540f7 JP |
1492 | netif_err(qdev, drv, qdev->ndev, |
1493 | "Couldn't get an skb, need to unwind!.\n"); | |
4f848c0a RM |
1494 | rx_ring->rx_dropped++; |
1495 | put_page(lbq_desc->p.pg_chunk.page); | |
1496 | return; | |
1497 | } | |
1498 | ||
1499 | addr = lbq_desc->p.pg_chunk.va; | |
1500 | prefetch(addr); | |
1501 | ||
1502 | ||
1503 | /* Frame error, so drop the packet. */ | |
1504 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | |
3b11d36e | 1505 | netif_info(qdev, drv, qdev->ndev, |
ae9540f7 | 1506 | "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); |
4f848c0a RM |
1507 | rx_ring->rx_errors++; |
1508 | goto err_out; | |
1509 | } | |
1510 | ||
1511 | /* The max framesize filter on this chip is set higher than | |
1512 | * MTU since FCoE uses 2k frames. | |
1513 | */ | |
1514 | if (skb->len > ndev->mtu + ETH_HLEN) { | |
ae9540f7 JP |
1515 | netif_err(qdev, drv, qdev->ndev, |
1516 | "Segment too small, dropping.\n"); | |
4f848c0a RM |
1517 | rx_ring->rx_dropped++; |
1518 | goto err_out; | |
1519 | } | |
1520 | memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); | |
ae9540f7 JP |
1521 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1522 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", | |
1523 | length); | |
4f848c0a RM |
1524 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, |
1525 | lbq_desc->p.pg_chunk.offset+ETH_HLEN, | |
1526 | length-ETH_HLEN); | |
1527 | skb->len += length-ETH_HLEN; | |
1528 | skb->data_len += length-ETH_HLEN; | |
1529 | skb->truesize += length-ETH_HLEN; | |
1530 | ||
1531 | rx_ring->rx_packets++; | |
1532 | rx_ring->rx_bytes += skb->len; | |
1533 | skb->protocol = eth_type_trans(skb, ndev); | |
bc8acf2c | 1534 | skb_checksum_none_assert(skb); |
4f848c0a | 1535 | |
88230fd5 | 1536 | if ((ndev->features & NETIF_F_RXCSUM) && |
4f848c0a RM |
1537 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1538 | /* TCP frame. */ | |
1539 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | |
ae9540f7 JP |
1540 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1541 | "TCP checksum done!\n"); | |
4f848c0a RM |
1542 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1543 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | |
1544 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | |
1545 | /* Unfragmented ipv4 UDP frame. */ | |
e02ef331 JK |
1546 | struct iphdr *iph = |
1547 | (struct iphdr *) ((u8 *)addr + ETH_HLEN); | |
4f848c0a RM |
1548 | if (!(iph->frag_off & |
1549 | cpu_to_be16(IP_MF|IP_OFFSET))) { | |
1550 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
ae9540f7 JP |
1551 | netif_printk(qdev, rx_status, KERN_DEBUG, |
1552 | qdev->ndev, | |
e02ef331 | 1553 | "UDP checksum done!\n"); |
4f848c0a RM |
1554 | } |
1555 | } | |
1556 | } | |
1557 | ||
1558 | skb_record_rx_queue(skb, rx_ring->cq_id); | |
18c49b91 JP |
1559 | if (vlan_id != 0xffff) |
1560 | __vlan_hwaccel_put_tag(skb, vlan_id); | |
1561 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
1562 | napi_gro_receive(napi, skb); | |
1563 | else | |
1564 | netif_receive_skb(skb); | |
4f848c0a RM |
1565 | return; |
1566 | err_out: | |
1567 | dev_kfree_skb_any(skb); | |
1568 | put_page(lbq_desc->p.pg_chunk.page); | |
1569 | } | |
1570 | ||
1571 | /* Process an inbound completion from an rx ring. */ | |
1572 | static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | |
1573 | struct rx_ring *rx_ring, | |
1574 | struct ib_mac_iocb_rsp *ib_mac_rsp, | |
1575 | u32 length, | |
1576 | u16 vlan_id) | |
1577 | { | |
1578 | struct net_device *ndev = qdev->ndev; | |
1579 | struct sk_buff *skb = NULL; | |
1580 | struct sk_buff *new_skb = NULL; | |
1581 | struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1582 | ||
1583 | skb = sbq_desc->p.skb; | |
1584 | /* Allocate new_skb and copy */ | |
1585 | new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); | |
1586 | if (new_skb == NULL) { | |
ae9540f7 JP |
1587 | netif_err(qdev, probe, qdev->ndev, |
1588 | "No skb available, drop the packet.\n"); | |
4f848c0a RM |
1589 | rx_ring->rx_dropped++; |
1590 | return; | |
1591 | } | |
1592 | skb_reserve(new_skb, NET_IP_ALIGN); | |
1593 | memcpy(skb_put(new_skb, length), skb->data, length); | |
1594 | skb = new_skb; | |
1595 | ||
1596 | /* Frame error, so drop the packet. */ | |
1597 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | |
3b11d36e | 1598 | netif_info(qdev, drv, qdev->ndev, |
ae9540f7 | 1599 | "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); |
4f848c0a RM |
1600 | dev_kfree_skb_any(skb); |
1601 | rx_ring->rx_errors++; | |
1602 | return; | |
1603 | } | |
1604 | ||
1605 | /* loopback self test for ethtool */ | |
1606 | if (test_bit(QL_SELFTEST, &qdev->flags)) { | |
1607 | ql_check_lb_frame(qdev, skb); | |
1608 | dev_kfree_skb_any(skb); | |
1609 | return; | |
1610 | } | |
1611 | ||
1612 | /* The max framesize filter on this chip is set higher than | |
1613 | * MTU since FCoE uses 2k frames. | |
1614 | */ | |
1615 | if (skb->len > ndev->mtu + ETH_HLEN) { | |
1616 | dev_kfree_skb_any(skb); | |
1617 | rx_ring->rx_dropped++; | |
1618 | return; | |
1619 | } | |
1620 | ||
1621 | prefetch(skb->data); | |
4f848c0a | 1622 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { |
ae9540f7 JP |
1623 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1624 | "%s Multicast.\n", | |
1625 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1626 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : | |
1627 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1628 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : | |
1629 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1630 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | |
4f848c0a RM |
1631 | } |
1632 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) | |
ae9540f7 JP |
1633 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1634 | "Promiscuous Packet.\n"); | |
4f848c0a RM |
1635 | |
1636 | rx_ring->rx_packets++; | |
1637 | rx_ring->rx_bytes += skb->len; | |
1638 | skb->protocol = eth_type_trans(skb, ndev); | |
bc8acf2c | 1639 | skb_checksum_none_assert(skb); |
4f848c0a RM |
1640 | |
1641 | /* If rx checksum is on, and there are no | |
1642 | * csum or frame errors. | |
1643 | */ | |
88230fd5 | 1644 | if ((ndev->features & NETIF_F_RXCSUM) && |
4f848c0a RM |
1645 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1646 | /* TCP frame. */ | |
1647 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | |
ae9540f7 JP |
1648 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1649 | "TCP checksum done!\n"); | |
4f848c0a RM |
1650 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1651 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | |
1652 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | |
1653 | /* Unfragmented ipv4 UDP frame. */ | |
1654 | struct iphdr *iph = (struct iphdr *) skb->data; | |
1655 | if (!(iph->frag_off & | |
6d29b1ef | 1656 | ntohs(IP_MF|IP_OFFSET))) { |
4f848c0a | 1657 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
ae9540f7 JP |
1658 | netif_printk(qdev, rx_status, KERN_DEBUG, |
1659 | qdev->ndev, | |
e02ef331 | 1660 | "UDP checksum done!\n"); |
4f848c0a RM |
1661 | } |
1662 | } | |
1663 | } | |
1664 | ||
1665 | skb_record_rx_queue(skb, rx_ring->cq_id); | |
18c49b91 JP |
1666 | if (vlan_id != 0xffff) |
1667 | __vlan_hwaccel_put_tag(skb, vlan_id); | |
1668 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
1669 | napi_gro_receive(&rx_ring->napi, skb); | |
1670 | else | |
1671 | netif_receive_skb(skb); | |
4f848c0a RM |
1672 | } |
1673 | ||
8668ae92 | 1674 | static void ql_realign_skb(struct sk_buff *skb, int len) |
c4e84bde RM |
1675 | { |
1676 | void *temp_addr = skb->data; | |
1677 | ||
1678 | /* Undo the skb_reserve(skb,32) we did before | |
1679 | * giving to hardware, and realign data on | |
1680 | * a 2-byte boundary. | |
1681 | */ | |
1682 | skb->data -= QLGE_SB_PAD - NET_IP_ALIGN; | |
1683 | skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN; | |
1684 | skb_copy_to_linear_data(skb, temp_addr, | |
1685 | (unsigned int)len); | |
1686 | } | |
1687 | ||
1688 | /* | |
1689 | * This function builds an skb for the given inbound | |
1690 | * completion. It will be rewritten for readability in the near | |
1691 | * future, but for not it works well. | |
1692 | */ | |
1693 | static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |
1694 | struct rx_ring *rx_ring, | |
1695 | struct ib_mac_iocb_rsp *ib_mac_rsp) | |
1696 | { | |
1697 | struct bq_desc *lbq_desc; | |
1698 | struct bq_desc *sbq_desc; | |
1699 | struct sk_buff *skb = NULL; | |
1700 | u32 length = le32_to_cpu(ib_mac_rsp->data_len); | |
1701 | u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); | |
1702 | ||
1703 | /* | |
1704 | * Handle the header buffer if present. | |
1705 | */ | |
1706 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV && | |
1707 | ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | |
ae9540f7 JP |
1708 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1709 | "Header of %d bytes in small buffer.\n", hdr_len); | |
c4e84bde RM |
1710 | /* |
1711 | * Headers fit nicely into a small buffer. | |
1712 | */ | |
1713 | sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1714 | pci_unmap_single(qdev->pdev, | |
64b9b41d FT |
1715 | dma_unmap_addr(sbq_desc, mapaddr), |
1716 | dma_unmap_len(sbq_desc, maplen), | |
c4e84bde RM |
1717 | PCI_DMA_FROMDEVICE); |
1718 | skb = sbq_desc->p.skb; | |
1719 | ql_realign_skb(skb, hdr_len); | |
1720 | skb_put(skb, hdr_len); | |
1721 | sbq_desc->p.skb = NULL; | |
1722 | } | |
1723 | ||
1724 | /* | |
1725 | * Handle the data buffer(s). | |
1726 | */ | |
1727 | if (unlikely(!length)) { /* Is there data too? */ | |
ae9540f7 JP |
1728 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1729 | "No Data buffer in this packet.\n"); | |
c4e84bde RM |
1730 | return skb; |
1731 | } | |
1732 | ||
1733 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { | |
1734 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | |
ae9540f7 JP |
1735 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1736 | "Headers in small, data of %d bytes in small, combine them.\n", | |
1737 | length); | |
c4e84bde RM |
1738 | /* |
1739 | * Data is less than small buffer size so it's | |
1740 | * stuffed in a small buffer. | |
1741 | * For this case we append the data | |
1742 | * from the "data" small buffer to the "header" small | |
1743 | * buffer. | |
1744 | */ | |
1745 | sbq_desc = ql_get_curr_sbuf(rx_ring); | |
1746 | pci_dma_sync_single_for_cpu(qdev->pdev, | |
64b9b41d | 1747 | dma_unmap_addr |
c4e84bde | 1748 | (sbq_desc, mapaddr), |
64b9b41d | 1749 | dma_unmap_len |
c4e84bde RM |
1750 | (sbq_desc, maplen), |
1751 | PCI_DMA_FROMDEVICE); | |
1752 | memcpy(skb_put(skb, length), | |
1753 | sbq_desc->p.skb->data, length); | |
1754 | pci_dma_sync_single_for_device(qdev->pdev, | |
64b9b41d | 1755 | dma_unmap_addr |
c4e84bde RM |
1756 | (sbq_desc, |
1757 | mapaddr), | |
64b9b41d | 1758 | dma_unmap_len |
c4e84bde RM |
1759 | (sbq_desc, |
1760 | maplen), | |
1761 | PCI_DMA_FROMDEVICE); | |
1762 | } else { | |
ae9540f7 JP |
1763 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1764 | "%d bytes in a single small buffer.\n", | |
1765 | length); | |
c4e84bde RM |
1766 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1767 | skb = sbq_desc->p.skb; | |
1768 | ql_realign_skb(skb, length); | |
1769 | skb_put(skb, length); | |
1770 | pci_unmap_single(qdev->pdev, | |
64b9b41d | 1771 | dma_unmap_addr(sbq_desc, |
c4e84bde | 1772 | mapaddr), |
64b9b41d | 1773 | dma_unmap_len(sbq_desc, |
c4e84bde RM |
1774 | maplen), |
1775 | PCI_DMA_FROMDEVICE); | |
1776 | sbq_desc->p.skb = NULL; | |
1777 | } | |
1778 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { | |
1779 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) { | |
ae9540f7 JP |
1780 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1781 | "Header in small, %d bytes in large. Chain large to small!\n", | |
1782 | length); | |
c4e84bde RM |
1783 | /* |
1784 | * The data is in a single large buffer. We | |
1785 | * chain it to the header buffer's skb and let | |
1786 | * it rip. | |
1787 | */ | |
7c734359 | 1788 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
ae9540f7 JP |
1789 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1790 | "Chaining page at offset = %d, for %d bytes to skb.\n", | |
1791 | lbq_desc->p.pg_chunk.offset, length); | |
7c734359 RM |
1792 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, |
1793 | lbq_desc->p.pg_chunk.offset, | |
1794 | length); | |
c4e84bde RM |
1795 | skb->len += length; |
1796 | skb->data_len += length; | |
1797 | skb->truesize += length; | |
c4e84bde RM |
1798 | } else { |
1799 | /* | |
1800 | * The headers and data are in a single large buffer. We | |
1801 | * copy it to a new skb and let it go. This can happen with | |
1802 | * jumbo mtu on a non-TCP/UDP frame. | |
1803 | */ | |
7c734359 | 1804 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
c4e84bde RM |
1805 | skb = netdev_alloc_skb(qdev->ndev, length); |
1806 | if (skb == NULL) { | |
ae9540f7 JP |
1807 | netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, |
1808 | "No skb available, drop the packet.\n"); | |
c4e84bde RM |
1809 | return NULL; |
1810 | } | |
4055c7d4 | 1811 | pci_unmap_page(qdev->pdev, |
64b9b41d | 1812 | dma_unmap_addr(lbq_desc, |
4055c7d4 | 1813 | mapaddr), |
64b9b41d | 1814 | dma_unmap_len(lbq_desc, maplen), |
4055c7d4 | 1815 | PCI_DMA_FROMDEVICE); |
c4e84bde | 1816 | skb_reserve(skb, NET_IP_ALIGN); |
ae9540f7 JP |
1817 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1818 | "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", | |
1819 | length); | |
7c734359 RM |
1820 | skb_fill_page_desc(skb, 0, |
1821 | lbq_desc->p.pg_chunk.page, | |
1822 | lbq_desc->p.pg_chunk.offset, | |
1823 | length); | |
c4e84bde RM |
1824 | skb->len += length; |
1825 | skb->data_len += length; | |
1826 | skb->truesize += length; | |
1827 | length -= length; | |
c4e84bde RM |
1828 | __pskb_pull_tail(skb, |
1829 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | |
1830 | VLAN_ETH_HLEN : ETH_HLEN); | |
1831 | } | |
1832 | } else { | |
1833 | /* | |
1834 | * The data is in a chain of large buffers | |
1835 | * pointed to by a small buffer. We loop | |
1836 | * thru and chain them to the our small header | |
1837 | * buffer's skb. | |
1838 | * frags: There are 18 max frags and our small | |
1839 | * buffer will hold 32 of them. The thing is, | |
1840 | * we'll use 3 max for our 9000 byte jumbo | |
1841 | * frames. If the MTU goes up we could | |
1842 | * eventually be in trouble. | |
1843 | */ | |
7c734359 | 1844 | int size, i = 0; |
c4e84bde RM |
1845 | sbq_desc = ql_get_curr_sbuf(rx_ring); |
1846 | pci_unmap_single(qdev->pdev, | |
64b9b41d FT |
1847 | dma_unmap_addr(sbq_desc, mapaddr), |
1848 | dma_unmap_len(sbq_desc, maplen), | |
c4e84bde RM |
1849 | PCI_DMA_FROMDEVICE); |
1850 | if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { | |
1851 | /* | |
1852 | * This is an non TCP/UDP IP frame, so | |
1853 | * the headers aren't split into a small | |
1854 | * buffer. We have to use the small buffer | |
1855 | * that contains our sg list as our skb to | |
1856 | * send upstairs. Copy the sg list here to | |
1857 | * a local buffer and use it to find the | |
1858 | * pages to chain. | |
1859 | */ | |
ae9540f7 JP |
1860 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1861 | "%d bytes of headers & data in chain of large.\n", | |
1862 | length); | |
c4e84bde | 1863 | skb = sbq_desc->p.skb; |
c4e84bde RM |
1864 | sbq_desc->p.skb = NULL; |
1865 | skb_reserve(skb, NET_IP_ALIGN); | |
c4e84bde RM |
1866 | } |
1867 | while (length > 0) { | |
7c734359 RM |
1868 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); |
1869 | size = (length < rx_ring->lbq_buf_size) ? length : | |
1870 | rx_ring->lbq_buf_size; | |
c4e84bde | 1871 | |
ae9540f7 JP |
1872 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1873 | "Adding page %d to skb for %d bytes.\n", | |
1874 | i, size); | |
7c734359 RM |
1875 | skb_fill_page_desc(skb, i, |
1876 | lbq_desc->p.pg_chunk.page, | |
1877 | lbq_desc->p.pg_chunk.offset, | |
1878 | size); | |
c4e84bde RM |
1879 | skb->len += size; |
1880 | skb->data_len += size; | |
1881 | skb->truesize += size; | |
1882 | length -= size; | |
c4e84bde RM |
1883 | i++; |
1884 | } | |
1885 | __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | |
1886 | VLAN_ETH_HLEN : ETH_HLEN); | |
1887 | } | |
1888 | return skb; | |
1889 | } | |
1890 | ||
1891 | /* Process an inbound completion from an rx ring. */ | |
4f848c0a | 1892 | static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, |
c4e84bde | 1893 | struct rx_ring *rx_ring, |
4f848c0a RM |
1894 | struct ib_mac_iocb_rsp *ib_mac_rsp, |
1895 | u16 vlan_id) | |
c4e84bde RM |
1896 | { |
1897 | struct net_device *ndev = qdev->ndev; | |
1898 | struct sk_buff *skb = NULL; | |
1899 | ||
1900 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | |
1901 | ||
1902 | skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); | |
1903 | if (unlikely(!skb)) { | |
ae9540f7 JP |
1904 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1905 | "No skb available, drop packet.\n"); | |
885ee398 | 1906 | rx_ring->rx_dropped++; |
c4e84bde RM |
1907 | return; |
1908 | } | |
1909 | ||
a32959cd RM |
1910 | /* Frame error, so drop the packet. */ |
1911 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | |
3b11d36e | 1912 | netif_info(qdev, drv, qdev->ndev, |
ae9540f7 | 1913 | "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2); |
a32959cd | 1914 | dev_kfree_skb_any(skb); |
885ee398 | 1915 | rx_ring->rx_errors++; |
a32959cd RM |
1916 | return; |
1917 | } | |
ec33a491 RM |
1918 | |
1919 | /* The max framesize filter on this chip is set higher than | |
1920 | * MTU since FCoE uses 2k frames. | |
1921 | */ | |
1922 | if (skb->len > ndev->mtu + ETH_HLEN) { | |
1923 | dev_kfree_skb_any(skb); | |
885ee398 | 1924 | rx_ring->rx_dropped++; |
ec33a491 RM |
1925 | return; |
1926 | } | |
1927 | ||
9dfbbaa6 RM |
1928 | /* loopback self test for ethtool */ |
1929 | if (test_bit(QL_SELFTEST, &qdev->flags)) { | |
1930 | ql_check_lb_frame(qdev, skb); | |
1931 | dev_kfree_skb_any(skb); | |
1932 | return; | |
1933 | } | |
1934 | ||
c4e84bde | 1935 | prefetch(skb->data); |
c4e84bde | 1936 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { |
ae9540f7 JP |
1937 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n", |
1938 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1939 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : | |
1940 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1941 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : | |
1942 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | |
1943 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | |
885ee398 | 1944 | rx_ring->rx_multicast++; |
c4e84bde RM |
1945 | } |
1946 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { | |
ae9540f7 JP |
1947 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1948 | "Promiscuous Packet.\n"); | |
c4e84bde | 1949 | } |
d555f592 | 1950 | |
d555f592 | 1951 | skb->protocol = eth_type_trans(skb, ndev); |
bc8acf2c | 1952 | skb_checksum_none_assert(skb); |
d555f592 RM |
1953 | |
1954 | /* If rx checksum is on, and there are no | |
1955 | * csum or frame errors. | |
1956 | */ | |
88230fd5 | 1957 | if ((ndev->features & NETIF_F_RXCSUM) && |
d555f592 RM |
1958 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { |
1959 | /* TCP frame. */ | |
1960 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | |
ae9540f7 JP |
1961 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1962 | "TCP checksum done!\n"); | |
d555f592 RM |
1963 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1964 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | |
1965 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | |
1966 | /* Unfragmented ipv4 UDP frame. */ | |
1967 | struct iphdr *iph = (struct iphdr *) skb->data; | |
1968 | if (!(iph->frag_off & | |
6d29b1ef | 1969 | ntohs(IP_MF|IP_OFFSET))) { |
d555f592 | 1970 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
ae9540f7 JP |
1971 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
1972 | "TCP checksum done!\n"); | |
d555f592 RM |
1973 | } |
1974 | } | |
c4e84bde | 1975 | } |
d555f592 | 1976 | |
885ee398 RM |
1977 | rx_ring->rx_packets++; |
1978 | rx_ring->rx_bytes += skb->len; | |
b2014ff8 | 1979 | skb_record_rx_queue(skb, rx_ring->cq_id); |
18c49b91 JP |
1980 | if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0)) |
1981 | __vlan_hwaccel_put_tag(skb, vlan_id); | |
1982 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) | |
1983 | napi_gro_receive(&rx_ring->napi, skb); | |
1984 | else | |
1985 | netif_receive_skb(skb); | |
c4e84bde RM |
1986 | } |
1987 | ||
4f848c0a RM |
1988 | /* Process an inbound completion from an rx ring. */ |
1989 | static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, | |
1990 | struct rx_ring *rx_ring, | |
1991 | struct ib_mac_iocb_rsp *ib_mac_rsp) | |
1992 | { | |
1993 | u32 length = le32_to_cpu(ib_mac_rsp->data_len); | |
1994 | u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | |
1995 | ((le16_to_cpu(ib_mac_rsp->vlan_id) & | |
1996 | IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; | |
1997 | ||
1998 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | |
1999 | ||
2000 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | |
2001 | /* The data and headers are split into | |
2002 | * separate buffers. | |
2003 | */ | |
2004 | ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, | |
2005 | vlan_id); | |
2006 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { | |
2007 | /* The data fit in a single small buffer. | |
2008 | * Allocate a new skb, copy the data and | |
2009 | * return the buffer to the free pool. | |
2010 | */ | |
2011 | ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, | |
2012 | length, vlan_id); | |
63526713 RM |
2013 | } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && |
2014 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && | |
2015 | (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { | |
2016 | /* TCP packet in a page chunk that's been checksummed. | |
2017 | * Tack it on to our GRO skb and let it go. | |
2018 | */ | |
2019 | ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, | |
2020 | length, vlan_id); | |
4f848c0a RM |
2021 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { |
2022 | /* Non-TCP packet in a page chunk. Allocate an | |
2023 | * skb, tack it on frags, and send it up. | |
2024 | */ | |
2025 | ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, | |
2026 | length, vlan_id); | |
2027 | } else { | |
c0c56955 RM |
2028 | /* Non-TCP/UDP large frames that span multiple buffers |
2029 | * can be processed corrrectly by the split frame logic. | |
2030 | */ | |
2031 | ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, | |
2032 | vlan_id); | |
4f848c0a RM |
2033 | } |
2034 | ||
2035 | return (unsigned long)length; | |
2036 | } | |
2037 | ||
c4e84bde RM |
2038 | /* Process an outbound completion from an rx ring. */ |
2039 | static void ql_process_mac_tx_intr(struct ql_adapter *qdev, | |
2040 | struct ob_mac_iocb_rsp *mac_rsp) | |
2041 | { | |
2042 | struct tx_ring *tx_ring; | |
2043 | struct tx_ring_desc *tx_ring_desc; | |
2044 | ||
2045 | QL_DUMP_OB_MAC_RSP(mac_rsp); | |
2046 | tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; | |
2047 | tx_ring_desc = &tx_ring->q[mac_rsp->tid]; | |
2048 | ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); | |
885ee398 RM |
2049 | tx_ring->tx_bytes += (tx_ring_desc->skb)->len; |
2050 | tx_ring->tx_packets++; | |
c4e84bde RM |
2051 | dev_kfree_skb(tx_ring_desc->skb); |
2052 | tx_ring_desc->skb = NULL; | |
2053 | ||
2054 | if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E | | |
2055 | OB_MAC_IOCB_RSP_S | | |
2056 | OB_MAC_IOCB_RSP_L | | |
2057 | OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) { | |
2058 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) { | |
ae9540f7 JP |
2059 | netif_warn(qdev, tx_done, qdev->ndev, |
2060 | "Total descriptor length did not match transfer length.\n"); | |
c4e84bde RM |
2061 | } |
2062 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) { | |
ae9540f7 JP |
2063 | netif_warn(qdev, tx_done, qdev->ndev, |
2064 | "Frame too short to be valid, not sent.\n"); | |
c4e84bde RM |
2065 | } |
2066 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) { | |
ae9540f7 JP |
2067 | netif_warn(qdev, tx_done, qdev->ndev, |
2068 | "Frame too long, but sent anyway.\n"); | |
c4e84bde RM |
2069 | } |
2070 | if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) { | |
ae9540f7 JP |
2071 | netif_warn(qdev, tx_done, qdev->ndev, |
2072 | "PCI backplane error. Frame not sent.\n"); | |
c4e84bde RM |
2073 | } |
2074 | } | |
2075 | atomic_inc(&tx_ring->tx_count); | |
2076 | } | |
2077 | ||
2078 | /* Fire up a handler to reset the MPI processor. */ | |
2079 | void ql_queue_fw_error(struct ql_adapter *qdev) | |
2080 | { | |
6a473308 | 2081 | ql_link_off(qdev); |
c4e84bde RM |
2082 | queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); |
2083 | } | |
2084 | ||
2085 | void ql_queue_asic_error(struct ql_adapter *qdev) | |
2086 | { | |
6a473308 | 2087 | ql_link_off(qdev); |
c4e84bde | 2088 | ql_disable_interrupts(qdev); |
6497b607 RM |
2089 | /* Clear adapter up bit to signal the recovery |
2090 | * process that it shouldn't kill the reset worker | |
2091 | * thread | |
2092 | */ | |
2093 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | |
da92b393 JK |
2094 | /* Set asic recovery bit to indicate reset process that we are |
2095 | * in fatal error recovery process rather than normal close | |
2096 | */ | |
2097 | set_bit(QL_ASIC_RECOVERY, &qdev->flags); | |
c4e84bde RM |
2098 | queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); |
2099 | } | |
2100 | ||
2101 | static void ql_process_chip_ae_intr(struct ql_adapter *qdev, | |
2102 | struct ib_ae_iocb_rsp *ib_ae_rsp) | |
2103 | { | |
2104 | switch (ib_ae_rsp->event) { | |
2105 | case MGMT_ERR_EVENT: | |
ae9540f7 JP |
2106 | netif_err(qdev, rx_err, qdev->ndev, |
2107 | "Management Processor Fatal Error.\n"); | |
c4e84bde RM |
2108 | ql_queue_fw_error(qdev); |
2109 | return; | |
2110 | ||
2111 | case CAM_LOOKUP_ERR_EVENT: | |
5069ee55 JK |
2112 | netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); |
2113 | netdev_err(qdev->ndev, "This event shouldn't occur.\n"); | |
c4e84bde RM |
2114 | ql_queue_asic_error(qdev); |
2115 | return; | |
2116 | ||
2117 | case SOFT_ECC_ERROR_EVENT: | |
5069ee55 | 2118 | netdev_err(qdev->ndev, "Soft ECC error detected.\n"); |
c4e84bde RM |
2119 | ql_queue_asic_error(qdev); |
2120 | break; | |
2121 | ||
2122 | case PCI_ERR_ANON_BUF_RD: | |
5069ee55 JK |
2123 | netdev_err(qdev->ndev, "PCI error occurred when reading " |
2124 | "anonymous buffers from rx_ring %d.\n", | |
2125 | ib_ae_rsp->q_id); | |
c4e84bde RM |
2126 | ql_queue_asic_error(qdev); |
2127 | break; | |
2128 | ||
2129 | default: | |
ae9540f7 JP |
2130 | netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", |
2131 | ib_ae_rsp->event); | |
c4e84bde RM |
2132 | ql_queue_asic_error(qdev); |
2133 | break; | |
2134 | } | |
2135 | } | |
2136 | ||
2137 | static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |
2138 | { | |
2139 | struct ql_adapter *qdev = rx_ring->qdev; | |
ba7cd3ba | 2140 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
c4e84bde RM |
2141 | struct ob_mac_iocb_rsp *net_rsp = NULL; |
2142 | int count = 0; | |
2143 | ||
1e213303 | 2144 | struct tx_ring *tx_ring; |
c4e84bde RM |
2145 | /* While there are entries in the completion queue. */ |
2146 | while (prod != rx_ring->cnsmr_idx) { | |
2147 | ||
ae9540f7 JP |
2148 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2149 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", | |
2150 | rx_ring->cq_id, prod, rx_ring->cnsmr_idx); | |
c4e84bde RM |
2151 | |
2152 | net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; | |
2153 | rmb(); | |
2154 | switch (net_rsp->opcode) { | |
2155 | ||
2156 | case OPCODE_OB_MAC_TSO_IOCB: | |
2157 | case OPCODE_OB_MAC_IOCB: | |
2158 | ql_process_mac_tx_intr(qdev, net_rsp); | |
2159 | break; | |
2160 | default: | |
ae9540f7 JP |
2161 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2162 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | |
2163 | net_rsp->opcode); | |
c4e84bde RM |
2164 | } |
2165 | count++; | |
2166 | ql_update_cq(rx_ring); | |
ba7cd3ba | 2167 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
c4e84bde | 2168 | } |
4da79504 DC |
2169 | if (!net_rsp) |
2170 | return 0; | |
c4e84bde | 2171 | ql_write_cq_idx(rx_ring); |
1e213303 | 2172 | tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; |
4da79504 | 2173 | if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { |
d0de7309 | 2174 | if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) |
c4e84bde RM |
2175 | /* |
2176 | * The queue got stopped because the tx_ring was full. | |
2177 | * Wake it up, because it's now at least 25% empty. | |
2178 | */ | |
1e213303 | 2179 | netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); |
c4e84bde RM |
2180 | } |
2181 | ||
2182 | return count; | |
2183 | } | |
2184 | ||
2185 | static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | |
2186 | { | |
2187 | struct ql_adapter *qdev = rx_ring->qdev; | |
ba7cd3ba | 2188 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
c4e84bde RM |
2189 | struct ql_net_rsp_iocb *net_rsp; |
2190 | int count = 0; | |
2191 | ||
2192 | /* While there are entries in the completion queue. */ | |
2193 | while (prod != rx_ring->cnsmr_idx) { | |
2194 | ||
ae9540f7 JP |
2195 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2196 | "cq_id = %d, prod = %d, cnsmr = %d.\n.", | |
2197 | rx_ring->cq_id, prod, rx_ring->cnsmr_idx); | |
c4e84bde RM |
2198 | |
2199 | net_rsp = rx_ring->curr_entry; | |
2200 | rmb(); | |
2201 | switch (net_rsp->opcode) { | |
2202 | case OPCODE_IB_MAC_IOCB: | |
2203 | ql_process_mac_rx_intr(qdev, rx_ring, | |
2204 | (struct ib_mac_iocb_rsp *) | |
2205 | net_rsp); | |
2206 | break; | |
2207 | ||
2208 | case OPCODE_IB_AE_IOCB: | |
2209 | ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) | |
2210 | net_rsp); | |
2211 | break; | |
2212 | default: | |
ae9540f7 JP |
2213 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2214 | "Hit default case, not handled! dropping the packet, opcode = %x.\n", | |
2215 | net_rsp->opcode); | |
2216 | break; | |
c4e84bde RM |
2217 | } |
2218 | count++; | |
2219 | ql_update_cq(rx_ring); | |
ba7cd3ba | 2220 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
c4e84bde RM |
2221 | if (count == budget) |
2222 | break; | |
2223 | } | |
2224 | ql_update_buffer_queues(qdev, rx_ring); | |
2225 | ql_write_cq_idx(rx_ring); | |
2226 | return count; | |
2227 | } | |
2228 | ||
2229 | static int ql_napi_poll_msix(struct napi_struct *napi, int budget) | |
2230 | { | |
2231 | struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); | |
2232 | struct ql_adapter *qdev = rx_ring->qdev; | |
39aa8165 RM |
2233 | struct rx_ring *trx_ring; |
2234 | int i, work_done = 0; | |
2235 | struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; | |
c4e84bde | 2236 | |
ae9540f7 JP |
2237 | netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, |
2238 | "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); | |
c4e84bde | 2239 | |
39aa8165 RM |
2240 | /* Service the TX rings first. They start |
2241 | * right after the RSS rings. */ | |
2242 | for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { | |
2243 | trx_ring = &qdev->rx_ring[i]; | |
2244 | /* If this TX completion ring belongs to this vector and | |
2245 | * it's not empty then service it. | |
2246 | */ | |
2247 | if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && | |
2248 | (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != | |
2249 | trx_ring->cnsmr_idx)) { | |
ae9540f7 JP |
2250 | netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, |
2251 | "%s: Servicing TX completion ring %d.\n", | |
2252 | __func__, trx_ring->cq_id); | |
39aa8165 RM |
2253 | ql_clean_outbound_rx_ring(trx_ring); |
2254 | } | |
2255 | } | |
2256 | ||
2257 | /* | |
2258 | * Now service the RSS ring if it's active. | |
2259 | */ | |
2260 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != | |
2261 | rx_ring->cnsmr_idx) { | |
ae9540f7 JP |
2262 | netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, |
2263 | "%s: Servicing RX completion ring %d.\n", | |
2264 | __func__, rx_ring->cq_id); | |
39aa8165 RM |
2265 | work_done = ql_clean_inbound_rx_ring(rx_ring, budget); |
2266 | } | |
2267 | ||
c4e84bde | 2268 | if (work_done < budget) { |
22bdd4f5 | 2269 | napi_complete(napi); |
c4e84bde RM |
2270 | ql_enable_completion_interrupt(qdev, rx_ring->irq); |
2271 | } | |
2272 | return work_done; | |
2273 | } | |
2274 | ||
c8f44aff | 2275 | static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) |
c4e84bde RM |
2276 | { |
2277 | struct ql_adapter *qdev = netdev_priv(ndev); | |
2278 | ||
18c49b91 | 2279 | if (features & NETIF_F_HW_VLAN_RX) { |
c4e84bde | 2280 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | |
18c49b91 | 2281 | NIC_RCV_CFG_VLAN_MATCH_AND_NON); |
c4e84bde | 2282 | } else { |
c4e84bde RM |
2283 | ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); |
2284 | } | |
2285 | } | |
2286 | ||
c8f44aff MM |
2287 | static netdev_features_t qlge_fix_features(struct net_device *ndev, |
2288 | netdev_features_t features) | |
18c49b91 JP |
2289 | { |
2290 | /* | |
2291 | * Since there is no support for separate rx/tx vlan accel | |
2292 | * enable/disable make sure tx flag is always in same state as rx. | |
2293 | */ | |
2294 | if (features & NETIF_F_HW_VLAN_RX) | |
2295 | features |= NETIF_F_HW_VLAN_TX; | |
2296 | else | |
2297 | features &= ~NETIF_F_HW_VLAN_TX; | |
2298 | ||
2299 | return features; | |
2300 | } | |
2301 | ||
c8f44aff MM |
2302 | static int qlge_set_features(struct net_device *ndev, |
2303 | netdev_features_t features) | |
18c49b91 | 2304 | { |
c8f44aff | 2305 | netdev_features_t changed = ndev->features ^ features; |
18c49b91 JP |
2306 | |
2307 | if (changed & NETIF_F_HW_VLAN_RX) | |
2308 | qlge_vlan_mode(ndev, features); | |
2309 | ||
2310 | return 0; | |
2311 | } | |
2312 | ||
8e586137 | 2313 | static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) |
c4e84bde | 2314 | { |
c4e84bde | 2315 | u32 enable_bit = MAC_ADDR_E; |
8e586137 | 2316 | int err; |
c4e84bde | 2317 | |
8e586137 JP |
2318 | err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, |
2319 | MAC_ADDR_TYPE_VLAN, vid); | |
2320 | if (err) | |
ae9540f7 JP |
2321 | netif_err(qdev, ifup, qdev->ndev, |
2322 | "Failed to init vlan address.\n"); | |
8e586137 | 2323 | return err; |
c4e84bde RM |
2324 | } |
2325 | ||
8e586137 | 2326 | static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid) |
c4e84bde RM |
2327 | { |
2328 | struct ql_adapter *qdev = netdev_priv(ndev); | |
cc288f54 | 2329 | int status; |
8e586137 | 2330 | int err; |
cc288f54 RM |
2331 | |
2332 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | |
2333 | if (status) | |
8e586137 | 2334 | return status; |
c4e84bde | 2335 | |
8e586137 | 2336 | err = __qlge_vlan_rx_add_vid(qdev, vid); |
18c49b91 JP |
2337 | set_bit(vid, qdev->active_vlans); |
2338 | ||
2339 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | |
8e586137 JP |
2340 | |
2341 | return err; | |
18c49b91 JP |
2342 | } |
2343 | ||
8e586137 | 2344 | static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) |
18c49b91 JP |
2345 | { |
2346 | u32 enable_bit = 0; | |
8e586137 | 2347 | int err; |
18c49b91 | 2348 | |
8e586137 JP |
2349 | err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit, |
2350 | MAC_ADDR_TYPE_VLAN, vid); | |
2351 | if (err) | |
ae9540f7 JP |
2352 | netif_err(qdev, ifup, qdev->ndev, |
2353 | "Failed to clear vlan address.\n"); | |
8e586137 | 2354 | return err; |
18c49b91 JP |
2355 | } |
2356 | ||
8e586137 | 2357 | static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) |
18c49b91 JP |
2358 | { |
2359 | struct ql_adapter *qdev = netdev_priv(ndev); | |
2360 | int status; | |
8e586137 | 2361 | int err; |
c4e84bde | 2362 | |
18c49b91 JP |
2363 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
2364 | if (status) | |
8e586137 | 2365 | return status; |
18c49b91 | 2366 | |
8e586137 | 2367 | err = __qlge_vlan_rx_kill_vid(qdev, vid); |
18c49b91 JP |
2368 | clear_bit(vid, qdev->active_vlans); |
2369 | ||
2370 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | |
8e586137 JP |
2371 | |
2372 | return err; | |
c4e84bde RM |
2373 | } |
2374 | ||
c1b60092 RM |
2375 | static void qlge_restore_vlan(struct ql_adapter *qdev) |
2376 | { | |
18c49b91 JP |
2377 | int status; |
2378 | u16 vid; | |
2379 | ||
2380 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); | |
2381 | if (status) | |
2382 | return; | |
2383 | ||
2384 | for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) | |
2385 | __qlge_vlan_rx_add_vid(qdev, vid); | |
2386 | ||
2387 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); | |
c1b60092 RM |
2388 | } |
2389 | ||
c4e84bde RM |
2390 | /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ |
2391 | static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id) | |
2392 | { | |
2393 | struct rx_ring *rx_ring = dev_id; | |
288379f0 | 2394 | napi_schedule(&rx_ring->napi); |
c4e84bde RM |
2395 | return IRQ_HANDLED; |
2396 | } | |
2397 | ||
c4e84bde RM |
2398 | /* This handles a fatal error, MPI activity, and the default |
2399 | * rx_ring in an MSI-X multiple vector environment. | |
2400 | * In MSI/Legacy environment it also process the rest of | |
2401 | * the rx_rings. | |
2402 | */ | |
2403 | static irqreturn_t qlge_isr(int irq, void *dev_id) | |
2404 | { | |
2405 | struct rx_ring *rx_ring = dev_id; | |
2406 | struct ql_adapter *qdev = rx_ring->qdev; | |
2407 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
2408 | u32 var; | |
c4e84bde RM |
2409 | int work_done = 0; |
2410 | ||
bb0d215c RM |
2411 | spin_lock(&qdev->hw_lock); |
2412 | if (atomic_read(&qdev->intr_context[0].irq_cnt)) { | |
ae9540f7 JP |
2413 | netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, |
2414 | "Shared Interrupt, Not ours!\n"); | |
bb0d215c RM |
2415 | spin_unlock(&qdev->hw_lock); |
2416 | return IRQ_NONE; | |
c4e84bde | 2417 | } |
bb0d215c | 2418 | spin_unlock(&qdev->hw_lock); |
c4e84bde | 2419 | |
bb0d215c | 2420 | var = ql_disable_completion_interrupt(qdev, intr_context->intr); |
c4e84bde RM |
2421 | |
2422 | /* | |
2423 | * Check for fatal error. | |
2424 | */ | |
2425 | if (var & STS_FE) { | |
2426 | ql_queue_asic_error(qdev); | |
5069ee55 | 2427 | netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); |
c4e84bde | 2428 | var = ql_read32(qdev, ERR_STS); |
5069ee55 JK |
2429 | netdev_err(qdev->ndev, "Resetting chip. " |
2430 | "Error Status Register = 0x%x\n", var); | |
c4e84bde RM |
2431 | return IRQ_HANDLED; |
2432 | } | |
2433 | ||
2434 | /* | |
2435 | * Check MPI processor activity. | |
2436 | */ | |
5ee22a5a RM |
2437 | if ((var & STS_PI) && |
2438 | (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { | |
c4e84bde RM |
2439 | /* |
2440 | * We've got an async event or mailbox completion. | |
2441 | * Handle it and clear the source of the interrupt. | |
2442 | */ | |
ae9540f7 JP |
2443 | netif_err(qdev, intr, qdev->ndev, |
2444 | "Got MPI processor interrupt.\n"); | |
c4e84bde | 2445 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
5ee22a5a RM |
2446 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); |
2447 | queue_delayed_work_on(smp_processor_id(), | |
2448 | qdev->workqueue, &qdev->mpi_work, 0); | |
c4e84bde RM |
2449 | work_done++; |
2450 | } | |
2451 | ||
2452 | /* | |
39aa8165 RM |
2453 | * Get the bit-mask that shows the active queues for this |
2454 | * pass. Compare it to the queues that this irq services | |
2455 | * and call napi if there's a match. | |
c4e84bde | 2456 | */ |
39aa8165 RM |
2457 | var = ql_read32(qdev, ISR1); |
2458 | if (var & intr_context->irq_mask) { | |
ae9540f7 JP |
2459 | netif_info(qdev, intr, qdev->ndev, |
2460 | "Waking handler for rx_ring[0].\n"); | |
39aa8165 | 2461 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
32a5b2a0 RM |
2462 | napi_schedule(&rx_ring->napi); |
2463 | work_done++; | |
2464 | } | |
bb0d215c | 2465 | ql_enable_completion_interrupt(qdev, intr_context->intr); |
c4e84bde RM |
2466 | return work_done ? IRQ_HANDLED : IRQ_NONE; |
2467 | } | |
2468 | ||
2469 | static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |
2470 | { | |
2471 | ||
2472 | if (skb_is_gso(skb)) { | |
2473 | int err; | |
2474 | if (skb_header_cloned(skb)) { | |
2475 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | |
2476 | if (err) | |
2477 | return err; | |
2478 | } | |
2479 | ||
2480 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; | |
2481 | mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; | |
2482 | mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); | |
2483 | mac_iocb_ptr->total_hdrs_len = | |
2484 | cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); | |
2485 | mac_iocb_ptr->net_trans_offset = | |
2486 | cpu_to_le16(skb_network_offset(skb) | | |
2487 | skb_transport_offset(skb) | |
2488 | << OB_MAC_TRANSPORT_HDR_SHIFT); | |
2489 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | |
2490 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; | |
2491 | if (likely(skb->protocol == htons(ETH_P_IP))) { | |
2492 | struct iphdr *iph = ip_hdr(skb); | |
2493 | iph->check = 0; | |
2494 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | |
2495 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, | |
2496 | iph->daddr, 0, | |
2497 | IPPROTO_TCP, | |
2498 | 0); | |
2499 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | |
2500 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; | |
2501 | tcp_hdr(skb)->check = | |
2502 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
2503 | &ipv6_hdr(skb)->daddr, | |
2504 | 0, IPPROTO_TCP, 0); | |
2505 | } | |
2506 | return 1; | |
2507 | } | |
2508 | return 0; | |
2509 | } | |
2510 | ||
2511 | static void ql_hw_csum_setup(struct sk_buff *skb, | |
2512 | struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |
2513 | { | |
2514 | int len; | |
2515 | struct iphdr *iph = ip_hdr(skb); | |
fd2df4f7 | 2516 | __sum16 *check; |
c4e84bde RM |
2517 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB; |
2518 | mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len); | |
2519 | mac_iocb_ptr->net_trans_offset = | |
2520 | cpu_to_le16(skb_network_offset(skb) | | |
2521 | skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); | |
2522 | ||
2523 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | |
2524 | len = (ntohs(iph->tot_len) - (iph->ihl << 2)); | |
2525 | if (likely(iph->protocol == IPPROTO_TCP)) { | |
2526 | check = &(tcp_hdr(skb)->check); | |
2527 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; | |
2528 | mac_iocb_ptr->total_hdrs_len = | |
2529 | cpu_to_le16(skb_transport_offset(skb) + | |
2530 | (tcp_hdr(skb)->doff << 2)); | |
2531 | } else { | |
2532 | check = &(udp_hdr(skb)->check); | |
2533 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; | |
2534 | mac_iocb_ptr->total_hdrs_len = | |
2535 | cpu_to_le16(skb_transport_offset(skb) + | |
2536 | sizeof(struct udphdr)); | |
2537 | } | |
2538 | *check = ~csum_tcpudp_magic(iph->saddr, | |
2539 | iph->daddr, len, iph->protocol, 0); | |
2540 | } | |
2541 | ||
61357325 | 2542 | static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) |
c4e84bde RM |
2543 | { |
2544 | struct tx_ring_desc *tx_ring_desc; | |
2545 | struct ob_mac_iocb_req *mac_iocb_ptr; | |
2546 | struct ql_adapter *qdev = netdev_priv(ndev); | |
2547 | int tso; | |
2548 | struct tx_ring *tx_ring; | |
1e213303 | 2549 | u32 tx_ring_idx = (u32) skb->queue_mapping; |
c4e84bde RM |
2550 | |
2551 | tx_ring = &qdev->tx_ring[tx_ring_idx]; | |
2552 | ||
74c50b4b RM |
2553 | if (skb_padto(skb, ETH_ZLEN)) |
2554 | return NETDEV_TX_OK; | |
2555 | ||
c4e84bde | 2556 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { |
ae9540f7 | 2557 | netif_info(qdev, tx_queued, qdev->ndev, |
41812db8 | 2558 | "%s: BUG! shutting down tx queue %d due to lack of resources.\n", |
ae9540f7 | 2559 | __func__, tx_ring_idx); |
1e213303 | 2560 | netif_stop_subqueue(ndev, tx_ring->wq_id); |
885ee398 | 2561 | tx_ring->tx_errors++; |
c4e84bde RM |
2562 | return NETDEV_TX_BUSY; |
2563 | } | |
2564 | tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; | |
2565 | mac_iocb_ptr = tx_ring_desc->queue_entry; | |
e332471c | 2566 | memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr)); |
c4e84bde RM |
2567 | |
2568 | mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB; | |
2569 | mac_iocb_ptr->tid = tx_ring_desc->index; | |
2570 | /* We use the upper 32-bits to store the tx queue for this IO. | |
2571 | * When we get the completion we can use it to establish the context. | |
2572 | */ | |
2573 | mac_iocb_ptr->txq_idx = tx_ring_idx; | |
2574 | tx_ring_desc->skb = skb; | |
2575 | ||
2576 | mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len); | |
2577 | ||
eab6d18d | 2578 | if (vlan_tx_tag_present(skb)) { |
ae9540f7 JP |
2579 | netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, |
2580 | "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb)); | |
c4e84bde RM |
2581 | mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; |
2582 | mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb)); | |
2583 | } | |
2584 | tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | |
2585 | if (tso < 0) { | |
2586 | dev_kfree_skb_any(skb); | |
2587 | return NETDEV_TX_OK; | |
2588 | } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { | |
2589 | ql_hw_csum_setup(skb, | |
2590 | (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); | |
2591 | } | |
0d979f74 RM |
2592 | if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != |
2593 | NETDEV_TX_OK) { | |
ae9540f7 JP |
2594 | netif_err(qdev, tx_queued, qdev->ndev, |
2595 | "Could not map the segments.\n"); | |
885ee398 | 2596 | tx_ring->tx_errors++; |
0d979f74 RM |
2597 | return NETDEV_TX_BUSY; |
2598 | } | |
c4e84bde RM |
2599 | QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); |
2600 | tx_ring->prod_idx++; | |
2601 | if (tx_ring->prod_idx == tx_ring->wq_len) | |
2602 | tx_ring->prod_idx = 0; | |
2603 | wmb(); | |
2604 | ||
2605 | ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); | |
ae9540f7 JP |
2606 | netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, |
2607 | "tx queued, slot %d, len %d\n", | |
2608 | tx_ring->prod_idx, skb->len); | |
c4e84bde RM |
2609 | |
2610 | atomic_dec(&tx_ring->tx_count); | |
41812db8 JK |
2611 | |
2612 | if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) { | |
2613 | netif_stop_subqueue(ndev, tx_ring->wq_id); | |
2614 | if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) | |
2615 | /* | |
2616 | * The queue got stopped because the tx_ring was full. | |
2617 | * Wake it up, because it's now at least 25% empty. | |
2618 | */ | |
2619 | netif_wake_subqueue(qdev->ndev, tx_ring->wq_id); | |
2620 | } | |
c4e84bde RM |
2621 | return NETDEV_TX_OK; |
2622 | } | |
2623 | ||
9dfbbaa6 | 2624 | |
c4e84bde RM |
2625 | static void ql_free_shadow_space(struct ql_adapter *qdev) |
2626 | { | |
2627 | if (qdev->rx_ring_shadow_reg_area) { | |
2628 | pci_free_consistent(qdev->pdev, | |
2629 | PAGE_SIZE, | |
2630 | qdev->rx_ring_shadow_reg_area, | |
2631 | qdev->rx_ring_shadow_reg_dma); | |
2632 | qdev->rx_ring_shadow_reg_area = NULL; | |
2633 | } | |
2634 | if (qdev->tx_ring_shadow_reg_area) { | |
2635 | pci_free_consistent(qdev->pdev, | |
2636 | PAGE_SIZE, | |
2637 | qdev->tx_ring_shadow_reg_area, | |
2638 | qdev->tx_ring_shadow_reg_dma); | |
2639 | qdev->tx_ring_shadow_reg_area = NULL; | |
2640 | } | |
2641 | } | |
2642 | ||
2643 | static int ql_alloc_shadow_space(struct ql_adapter *qdev) | |
2644 | { | |
2645 | qdev->rx_ring_shadow_reg_area = | |
2646 | pci_alloc_consistent(qdev->pdev, | |
2647 | PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma); | |
2648 | if (qdev->rx_ring_shadow_reg_area == NULL) { | |
ae9540f7 JP |
2649 | netif_err(qdev, ifup, qdev->ndev, |
2650 | "Allocation of RX shadow space failed.\n"); | |
c4e84bde RM |
2651 | return -ENOMEM; |
2652 | } | |
b25215d0 | 2653 | memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE); |
c4e84bde RM |
2654 | qdev->tx_ring_shadow_reg_area = |
2655 | pci_alloc_consistent(qdev->pdev, PAGE_SIZE, | |
2656 | &qdev->tx_ring_shadow_reg_dma); | |
2657 | if (qdev->tx_ring_shadow_reg_area == NULL) { | |
ae9540f7 JP |
2658 | netif_err(qdev, ifup, qdev->ndev, |
2659 | "Allocation of TX shadow space failed.\n"); | |
c4e84bde RM |
2660 | goto err_wqp_sh_area; |
2661 | } | |
b25215d0 | 2662 | memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE); |
c4e84bde RM |
2663 | return 0; |
2664 | ||
2665 | err_wqp_sh_area: | |
2666 | pci_free_consistent(qdev->pdev, | |
2667 | PAGE_SIZE, | |
2668 | qdev->rx_ring_shadow_reg_area, | |
2669 | qdev->rx_ring_shadow_reg_dma); | |
2670 | return -ENOMEM; | |
2671 | } | |
2672 | ||
2673 | static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |
2674 | { | |
2675 | struct tx_ring_desc *tx_ring_desc; | |
2676 | int i; | |
2677 | struct ob_mac_iocb_req *mac_iocb_ptr; | |
2678 | ||
2679 | mac_iocb_ptr = tx_ring->wq_base; | |
2680 | tx_ring_desc = tx_ring->q; | |
2681 | for (i = 0; i < tx_ring->wq_len; i++) { | |
2682 | tx_ring_desc->index = i; | |
2683 | tx_ring_desc->skb = NULL; | |
2684 | tx_ring_desc->queue_entry = mac_iocb_ptr; | |
2685 | mac_iocb_ptr++; | |
2686 | tx_ring_desc++; | |
2687 | } | |
2688 | atomic_set(&tx_ring->tx_count, tx_ring->wq_len); | |
c4e84bde RM |
2689 | } |
2690 | ||
2691 | static void ql_free_tx_resources(struct ql_adapter *qdev, | |
2692 | struct tx_ring *tx_ring) | |
2693 | { | |
2694 | if (tx_ring->wq_base) { | |
2695 | pci_free_consistent(qdev->pdev, tx_ring->wq_size, | |
2696 | tx_ring->wq_base, tx_ring->wq_base_dma); | |
2697 | tx_ring->wq_base = NULL; | |
2698 | } | |
2699 | kfree(tx_ring->q); | |
2700 | tx_ring->q = NULL; | |
2701 | } | |
2702 | ||
2703 | static int ql_alloc_tx_resources(struct ql_adapter *qdev, | |
2704 | struct tx_ring *tx_ring) | |
2705 | { | |
2706 | tx_ring->wq_base = | |
2707 | pci_alloc_consistent(qdev->pdev, tx_ring->wq_size, | |
2708 | &tx_ring->wq_base_dma); | |
2709 | ||
8e95a202 JP |
2710 | if ((tx_ring->wq_base == NULL) || |
2711 | tx_ring->wq_base_dma & WQ_ADDR_ALIGN) { | |
ae9540f7 | 2712 | netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n"); |
c4e84bde RM |
2713 | return -ENOMEM; |
2714 | } | |
2715 | tx_ring->q = | |
2716 | kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL); | |
2717 | if (tx_ring->q == NULL) | |
2718 | goto err; | |
2719 | ||
2720 | return 0; | |
2721 | err: | |
2722 | pci_free_consistent(qdev->pdev, tx_ring->wq_size, | |
2723 | tx_ring->wq_base, tx_ring->wq_base_dma); | |
2724 | return -ENOMEM; | |
2725 | } | |
2726 | ||
8668ae92 | 2727 | static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
c4e84bde | 2728 | { |
c4e84bde RM |
2729 | struct bq_desc *lbq_desc; |
2730 | ||
7c734359 RM |
2731 | uint32_t curr_idx, clean_idx; |
2732 | ||
2733 | curr_idx = rx_ring->lbq_curr_idx; | |
2734 | clean_idx = rx_ring->lbq_clean_idx; | |
2735 | while (curr_idx != clean_idx) { | |
2736 | lbq_desc = &rx_ring->lbq[curr_idx]; | |
2737 | ||
2738 | if (lbq_desc->p.pg_chunk.last_flag) { | |
c4e84bde | 2739 | pci_unmap_page(qdev->pdev, |
7c734359 RM |
2740 | lbq_desc->p.pg_chunk.map, |
2741 | ql_lbq_block_size(qdev), | |
c4e84bde | 2742 | PCI_DMA_FROMDEVICE); |
7c734359 | 2743 | lbq_desc->p.pg_chunk.last_flag = 0; |
c4e84bde | 2744 | } |
7c734359 RM |
2745 | |
2746 | put_page(lbq_desc->p.pg_chunk.page); | |
2747 | lbq_desc->p.pg_chunk.page = NULL; | |
2748 | ||
2749 | if (++curr_idx == rx_ring->lbq_len) | |
2750 | curr_idx = 0; | |
2751 | ||
c4e84bde RM |
2752 | } |
2753 | } | |
2754 | ||
8668ae92 | 2755 | static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) |
c4e84bde RM |
2756 | { |
2757 | int i; | |
2758 | struct bq_desc *sbq_desc; | |
2759 | ||
2760 | for (i = 0; i < rx_ring->sbq_len; i++) { | |
2761 | sbq_desc = &rx_ring->sbq[i]; | |
2762 | if (sbq_desc == NULL) { | |
ae9540f7 JP |
2763 | netif_err(qdev, ifup, qdev->ndev, |
2764 | "sbq_desc %d is NULL.\n", i); | |
c4e84bde RM |
2765 | return; |
2766 | } | |
2767 | if (sbq_desc->p.skb) { | |
2768 | pci_unmap_single(qdev->pdev, | |
64b9b41d FT |
2769 | dma_unmap_addr(sbq_desc, mapaddr), |
2770 | dma_unmap_len(sbq_desc, maplen), | |
c4e84bde RM |
2771 | PCI_DMA_FROMDEVICE); |
2772 | dev_kfree_skb(sbq_desc->p.skb); | |
2773 | sbq_desc->p.skb = NULL; | |
2774 | } | |
c4e84bde RM |
2775 | } |
2776 | } | |
2777 | ||
4545a3f2 RM |
2778 | /* Free all large and small rx buffers associated |
2779 | * with the completion queues for this device. | |
2780 | */ | |
2781 | static void ql_free_rx_buffers(struct ql_adapter *qdev) | |
2782 | { | |
2783 | int i; | |
2784 | struct rx_ring *rx_ring; | |
2785 | ||
2786 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
2787 | rx_ring = &qdev->rx_ring[i]; | |
2788 | if (rx_ring->lbq) | |
2789 | ql_free_lbq_buffers(qdev, rx_ring); | |
2790 | if (rx_ring->sbq) | |
2791 | ql_free_sbq_buffers(qdev, rx_ring); | |
2792 | } | |
2793 | } | |
2794 | ||
2795 | static void ql_alloc_rx_buffers(struct ql_adapter *qdev) | |
2796 | { | |
2797 | struct rx_ring *rx_ring; | |
2798 | int i; | |
2799 | ||
2800 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
2801 | rx_ring = &qdev->rx_ring[i]; | |
2802 | if (rx_ring->type != TX_Q) | |
2803 | ql_update_buffer_queues(qdev, rx_ring); | |
2804 | } | |
2805 | } | |
2806 | ||
2807 | static void ql_init_lbq_ring(struct ql_adapter *qdev, | |
2808 | struct rx_ring *rx_ring) | |
2809 | { | |
2810 | int i; | |
2811 | struct bq_desc *lbq_desc; | |
2812 | __le64 *bq = rx_ring->lbq_base; | |
2813 | ||
2814 | memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); | |
2815 | for (i = 0; i < rx_ring->lbq_len; i++) { | |
2816 | lbq_desc = &rx_ring->lbq[i]; | |
2817 | memset(lbq_desc, 0, sizeof(*lbq_desc)); | |
2818 | lbq_desc->index = i; | |
2819 | lbq_desc->addr = bq; | |
2820 | bq++; | |
2821 | } | |
2822 | } | |
2823 | ||
2824 | static void ql_init_sbq_ring(struct ql_adapter *qdev, | |
c4e84bde RM |
2825 | struct rx_ring *rx_ring) |
2826 | { | |
2827 | int i; | |
2828 | struct bq_desc *sbq_desc; | |
2c9a0d41 | 2829 | __le64 *bq = rx_ring->sbq_base; |
c4e84bde | 2830 | |
4545a3f2 | 2831 | memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); |
c4e84bde RM |
2832 | for (i = 0; i < rx_ring->sbq_len; i++) { |
2833 | sbq_desc = &rx_ring->sbq[i]; | |
4545a3f2 | 2834 | memset(sbq_desc, 0, sizeof(*sbq_desc)); |
c4e84bde | 2835 | sbq_desc->index = i; |
2c9a0d41 | 2836 | sbq_desc->addr = bq; |
c4e84bde RM |
2837 | bq++; |
2838 | } | |
c4e84bde RM |
2839 | } |
2840 | ||
2841 | static void ql_free_rx_resources(struct ql_adapter *qdev, | |
2842 | struct rx_ring *rx_ring) | |
2843 | { | |
c4e84bde RM |
2844 | /* Free the small buffer queue. */ |
2845 | if (rx_ring->sbq_base) { | |
2846 | pci_free_consistent(qdev->pdev, | |
2847 | rx_ring->sbq_size, | |
2848 | rx_ring->sbq_base, rx_ring->sbq_base_dma); | |
2849 | rx_ring->sbq_base = NULL; | |
2850 | } | |
2851 | ||
2852 | /* Free the small buffer queue control blocks. */ | |
2853 | kfree(rx_ring->sbq); | |
2854 | rx_ring->sbq = NULL; | |
2855 | ||
2856 | /* Free the large buffer queue. */ | |
2857 | if (rx_ring->lbq_base) { | |
2858 | pci_free_consistent(qdev->pdev, | |
2859 | rx_ring->lbq_size, | |
2860 | rx_ring->lbq_base, rx_ring->lbq_base_dma); | |
2861 | rx_ring->lbq_base = NULL; | |
2862 | } | |
2863 | ||
2864 | /* Free the large buffer queue control blocks. */ | |
2865 | kfree(rx_ring->lbq); | |
2866 | rx_ring->lbq = NULL; | |
2867 | ||
2868 | /* Free the rx queue. */ | |
2869 | if (rx_ring->cq_base) { | |
2870 | pci_free_consistent(qdev->pdev, | |
2871 | rx_ring->cq_size, | |
2872 | rx_ring->cq_base, rx_ring->cq_base_dma); | |
2873 | rx_ring->cq_base = NULL; | |
2874 | } | |
2875 | } | |
2876 | ||
2877 | /* Allocate queues and buffers for this completions queue based | |
2878 | * on the values in the parameter structure. */ | |
2879 | static int ql_alloc_rx_resources(struct ql_adapter *qdev, | |
2880 | struct rx_ring *rx_ring) | |
2881 | { | |
2882 | ||
2883 | /* | |
2884 | * Allocate the completion queue for this rx_ring. | |
2885 | */ | |
2886 | rx_ring->cq_base = | |
2887 | pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, | |
2888 | &rx_ring->cq_base_dma); | |
2889 | ||
2890 | if (rx_ring->cq_base == NULL) { | |
ae9540f7 | 2891 | netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); |
c4e84bde RM |
2892 | return -ENOMEM; |
2893 | } | |
2894 | ||
2895 | if (rx_ring->sbq_len) { | |
2896 | /* | |
2897 | * Allocate small buffer queue. | |
2898 | */ | |
2899 | rx_ring->sbq_base = | |
2900 | pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, | |
2901 | &rx_ring->sbq_base_dma); | |
2902 | ||
2903 | if (rx_ring->sbq_base == NULL) { | |
ae9540f7 JP |
2904 | netif_err(qdev, ifup, qdev->ndev, |
2905 | "Small buffer queue allocation failed.\n"); | |
c4e84bde RM |
2906 | goto err_mem; |
2907 | } | |
2908 | ||
2909 | /* | |
2910 | * Allocate small buffer queue control blocks. | |
2911 | */ | |
2912 | rx_ring->sbq = | |
2913 | kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc), | |
2914 | GFP_KERNEL); | |
2915 | if (rx_ring->sbq == NULL) { | |
ae9540f7 JP |
2916 | netif_err(qdev, ifup, qdev->ndev, |
2917 | "Small buffer queue control block allocation failed.\n"); | |
c4e84bde RM |
2918 | goto err_mem; |
2919 | } | |
2920 | ||
4545a3f2 | 2921 | ql_init_sbq_ring(qdev, rx_ring); |
c4e84bde RM |
2922 | } |
2923 | ||
2924 | if (rx_ring->lbq_len) { | |
2925 | /* | |
2926 | * Allocate large buffer queue. | |
2927 | */ | |
2928 | rx_ring->lbq_base = | |
2929 | pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, | |
2930 | &rx_ring->lbq_base_dma); | |
2931 | ||
2932 | if (rx_ring->lbq_base == NULL) { | |
ae9540f7 JP |
2933 | netif_err(qdev, ifup, qdev->ndev, |
2934 | "Large buffer queue allocation failed.\n"); | |
c4e84bde RM |
2935 | goto err_mem; |
2936 | } | |
2937 | /* | |
2938 | * Allocate large buffer queue control blocks. | |
2939 | */ | |
2940 | rx_ring->lbq = | |
2941 | kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc), | |
2942 | GFP_KERNEL); | |
2943 | if (rx_ring->lbq == NULL) { | |
ae9540f7 JP |
2944 | netif_err(qdev, ifup, qdev->ndev, |
2945 | "Large buffer queue control block allocation failed.\n"); | |
c4e84bde RM |
2946 | goto err_mem; |
2947 | } | |
2948 | ||
4545a3f2 | 2949 | ql_init_lbq_ring(qdev, rx_ring); |
c4e84bde RM |
2950 | } |
2951 | ||
2952 | return 0; | |
2953 | ||
2954 | err_mem: | |
2955 | ql_free_rx_resources(qdev, rx_ring); | |
2956 | return -ENOMEM; | |
2957 | } | |
2958 | ||
2959 | static void ql_tx_ring_clean(struct ql_adapter *qdev) | |
2960 | { | |
2961 | struct tx_ring *tx_ring; | |
2962 | struct tx_ring_desc *tx_ring_desc; | |
2963 | int i, j; | |
2964 | ||
2965 | /* | |
2966 | * Loop through all queues and free | |
2967 | * any resources. | |
2968 | */ | |
2969 | for (j = 0; j < qdev->tx_ring_count; j++) { | |
2970 | tx_ring = &qdev->tx_ring[j]; | |
2971 | for (i = 0; i < tx_ring->wq_len; i++) { | |
2972 | tx_ring_desc = &tx_ring->q[i]; | |
2973 | if (tx_ring_desc && tx_ring_desc->skb) { | |
ae9540f7 JP |
2974 | netif_err(qdev, ifdown, qdev->ndev, |
2975 | "Freeing lost SKB %p, from queue %d, index %d.\n", | |
2976 | tx_ring_desc->skb, j, | |
2977 | tx_ring_desc->index); | |
c4e84bde RM |
2978 | ql_unmap_send(qdev, tx_ring_desc, |
2979 | tx_ring_desc->map_cnt); | |
2980 | dev_kfree_skb(tx_ring_desc->skb); | |
2981 | tx_ring_desc->skb = NULL; | |
2982 | } | |
2983 | } | |
2984 | } | |
2985 | } | |
2986 | ||
c4e84bde RM |
2987 | static void ql_free_mem_resources(struct ql_adapter *qdev) |
2988 | { | |
2989 | int i; | |
2990 | ||
2991 | for (i = 0; i < qdev->tx_ring_count; i++) | |
2992 | ql_free_tx_resources(qdev, &qdev->tx_ring[i]); | |
2993 | for (i = 0; i < qdev->rx_ring_count; i++) | |
2994 | ql_free_rx_resources(qdev, &qdev->rx_ring[i]); | |
2995 | ql_free_shadow_space(qdev); | |
2996 | } | |
2997 | ||
2998 | static int ql_alloc_mem_resources(struct ql_adapter *qdev) | |
2999 | { | |
3000 | int i; | |
3001 | ||
3002 | /* Allocate space for our shadow registers and such. */ | |
3003 | if (ql_alloc_shadow_space(qdev)) | |
3004 | return -ENOMEM; | |
3005 | ||
3006 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
3007 | if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { | |
ae9540f7 JP |
3008 | netif_err(qdev, ifup, qdev->ndev, |
3009 | "RX resource allocation failed.\n"); | |
c4e84bde RM |
3010 | goto err_mem; |
3011 | } | |
3012 | } | |
3013 | /* Allocate tx queue resources */ | |
3014 | for (i = 0; i < qdev->tx_ring_count; i++) { | |
3015 | if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { | |
ae9540f7 JP |
3016 | netif_err(qdev, ifup, qdev->ndev, |
3017 | "TX resource allocation failed.\n"); | |
c4e84bde RM |
3018 | goto err_mem; |
3019 | } | |
3020 | } | |
3021 | return 0; | |
3022 | ||
3023 | err_mem: | |
3024 | ql_free_mem_resources(qdev); | |
3025 | return -ENOMEM; | |
3026 | } | |
3027 | ||
3028 | /* Set up the rx ring control block and pass it to the chip. | |
3029 | * The control block is defined as | |
3030 | * "Completion Queue Initialization Control Block", or cqicb. | |
3031 | */ | |
3032 | static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |
3033 | { | |
3034 | struct cqicb *cqicb = &rx_ring->cqicb; | |
3035 | void *shadow_reg = qdev->rx_ring_shadow_reg_area + | |
b8facca0 | 3036 | (rx_ring->cq_id * RX_RING_SHADOW_SPACE); |
c4e84bde | 3037 | u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + |
b8facca0 | 3038 | (rx_ring->cq_id * RX_RING_SHADOW_SPACE); |
c4e84bde RM |
3039 | void __iomem *doorbell_area = |
3040 | qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); | |
3041 | int err = 0; | |
3042 | u16 bq_len; | |
d4a4aba6 | 3043 | u64 tmp; |
b8facca0 RM |
3044 | __le64 *base_indirect_ptr; |
3045 | int page_entries; | |
c4e84bde RM |
3046 | |
3047 | /* Set up the shadow registers for this ring. */ | |
3048 | rx_ring->prod_idx_sh_reg = shadow_reg; | |
3049 | rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; | |
7c734359 | 3050 | *rx_ring->prod_idx_sh_reg = 0; |
c4e84bde RM |
3051 | shadow_reg += sizeof(u64); |
3052 | shadow_reg_dma += sizeof(u64); | |
3053 | rx_ring->lbq_base_indirect = shadow_reg; | |
3054 | rx_ring->lbq_base_indirect_dma = shadow_reg_dma; | |
b8facca0 RM |
3055 | shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); |
3056 | shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); | |
c4e84bde RM |
3057 | rx_ring->sbq_base_indirect = shadow_reg; |
3058 | rx_ring->sbq_base_indirect_dma = shadow_reg_dma; | |
3059 | ||
3060 | /* PCI doorbell mem area + 0x00 for consumer index register */ | |
8668ae92 | 3061 | rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; |
c4e84bde RM |
3062 | rx_ring->cnsmr_idx = 0; |
3063 | rx_ring->curr_entry = rx_ring->cq_base; | |
3064 | ||
3065 | /* PCI doorbell mem area + 0x04 for valid register */ | |
3066 | rx_ring->valid_db_reg = doorbell_area + 0x04; | |
3067 | ||
3068 | /* PCI doorbell mem area + 0x18 for large buffer consumer */ | |
8668ae92 | 3069 | rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); |
c4e84bde RM |
3070 | |
3071 | /* PCI doorbell mem area + 0x1c */ | |
8668ae92 | 3072 | rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); |
c4e84bde RM |
3073 | |
3074 | memset((void *)cqicb, 0, sizeof(struct cqicb)); | |
3075 | cqicb->msix_vect = rx_ring->irq; | |
3076 | ||
459caf5a RM |
3077 | bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; |
3078 | cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); | |
c4e84bde | 3079 | |
97345524 | 3080 | cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); |
c4e84bde | 3081 | |
97345524 | 3082 | cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); |
c4e84bde RM |
3083 | |
3084 | /* | |
3085 | * Set up the control block load flags. | |
3086 | */ | |
3087 | cqicb->flags = FLAGS_LC | /* Load queue base address */ | |
3088 | FLAGS_LV | /* Load MSI-X vector */ | |
3089 | FLAGS_LI; /* Load irq delay values */ | |
3090 | if (rx_ring->lbq_len) { | |
3091 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ | |
a419aef8 | 3092 | tmp = (u64)rx_ring->lbq_base_dma; |
43d620c8 | 3093 | base_indirect_ptr = rx_ring->lbq_base_indirect; |
b8facca0 RM |
3094 | page_entries = 0; |
3095 | do { | |
3096 | *base_indirect_ptr = cpu_to_le64(tmp); | |
3097 | tmp += DB_PAGE_SIZE; | |
3098 | base_indirect_ptr++; | |
3099 | page_entries++; | |
3100 | } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); | |
97345524 RM |
3101 | cqicb->lbq_addr = |
3102 | cpu_to_le64(rx_ring->lbq_base_indirect_dma); | |
459caf5a RM |
3103 | bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : |
3104 | (u16) rx_ring->lbq_buf_size; | |
3105 | cqicb->lbq_buf_size = cpu_to_le16(bq_len); | |
3106 | bq_len = (rx_ring->lbq_len == 65536) ? 0 : | |
3107 | (u16) rx_ring->lbq_len; | |
c4e84bde | 3108 | cqicb->lbq_len = cpu_to_le16(bq_len); |
4545a3f2 | 3109 | rx_ring->lbq_prod_idx = 0; |
c4e84bde | 3110 | rx_ring->lbq_curr_idx = 0; |
4545a3f2 RM |
3111 | rx_ring->lbq_clean_idx = 0; |
3112 | rx_ring->lbq_free_cnt = rx_ring->lbq_len; | |
c4e84bde RM |
3113 | } |
3114 | if (rx_ring->sbq_len) { | |
3115 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ | |
a419aef8 | 3116 | tmp = (u64)rx_ring->sbq_base_dma; |
43d620c8 | 3117 | base_indirect_ptr = rx_ring->sbq_base_indirect; |
b8facca0 RM |
3118 | page_entries = 0; |
3119 | do { | |
3120 | *base_indirect_ptr = cpu_to_le64(tmp); | |
3121 | tmp += DB_PAGE_SIZE; | |
3122 | base_indirect_ptr++; | |
3123 | page_entries++; | |
3124 | } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); | |
97345524 RM |
3125 | cqicb->sbq_addr = |
3126 | cpu_to_le64(rx_ring->sbq_base_indirect_dma); | |
c4e84bde | 3127 | cqicb->sbq_buf_size = |
52e55f3c | 3128 | cpu_to_le16((u16)(rx_ring->sbq_buf_size)); |
459caf5a RM |
3129 | bq_len = (rx_ring->sbq_len == 65536) ? 0 : |
3130 | (u16) rx_ring->sbq_len; | |
c4e84bde | 3131 | cqicb->sbq_len = cpu_to_le16(bq_len); |
4545a3f2 | 3132 | rx_ring->sbq_prod_idx = 0; |
c4e84bde | 3133 | rx_ring->sbq_curr_idx = 0; |
4545a3f2 RM |
3134 | rx_ring->sbq_clean_idx = 0; |
3135 | rx_ring->sbq_free_cnt = rx_ring->sbq_len; | |
c4e84bde RM |
3136 | } |
3137 | switch (rx_ring->type) { | |
3138 | case TX_Q: | |
c4e84bde RM |
3139 | cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); |
3140 | cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); | |
3141 | break; | |
c4e84bde RM |
3142 | case RX_Q: |
3143 | /* Inbound completion handling rx_rings run in | |
3144 | * separate NAPI contexts. | |
3145 | */ | |
3146 | netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, | |
3147 | 64); | |
3148 | cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); | |
3149 | cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); | |
3150 | break; | |
3151 | default: | |
ae9540f7 JP |
3152 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3153 | "Invalid rx_ring->type = %d.\n", rx_ring->type); | |
c4e84bde | 3154 | } |
c4e84bde RM |
3155 | err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), |
3156 | CFG_LCQ, rx_ring->cq_id); | |
3157 | if (err) { | |
ae9540f7 | 3158 | netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); |
c4e84bde RM |
3159 | return err; |
3160 | } | |
c4e84bde RM |
3161 | return err; |
3162 | } | |
3163 | ||
3164 | static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |
3165 | { | |
3166 | struct wqicb *wqicb = (struct wqicb *)tx_ring; | |
3167 | void __iomem *doorbell_area = | |
3168 | qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); | |
3169 | void *shadow_reg = qdev->tx_ring_shadow_reg_area + | |
3170 | (tx_ring->wq_id * sizeof(u64)); | |
3171 | u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + | |
3172 | (tx_ring->wq_id * sizeof(u64)); | |
3173 | int err = 0; | |
3174 | ||
3175 | /* | |
3176 | * Assign doorbell registers for this tx_ring. | |
3177 | */ | |
3178 | /* TX PCI doorbell mem area for tx producer index */ | |
8668ae92 | 3179 | tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area; |
c4e84bde RM |
3180 | tx_ring->prod_idx = 0; |
3181 | /* TX PCI doorbell mem area + 0x04 */ | |
3182 | tx_ring->valid_db_reg = doorbell_area + 0x04; | |
3183 | ||
3184 | /* | |
3185 | * Assign shadow registers for this tx_ring. | |
3186 | */ | |
3187 | tx_ring->cnsmr_idx_sh_reg = shadow_reg; | |
3188 | tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma; | |
3189 | ||
3190 | wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT); | |
3191 | wqicb->flags = cpu_to_le16(Q_FLAGS_LC | | |
3192 | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); | |
3193 | wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); | |
3194 | wqicb->rid = 0; | |
97345524 | 3195 | wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); |
c4e84bde | 3196 | |
97345524 | 3197 | wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); |
c4e84bde RM |
3198 | |
3199 | ql_init_tx_ring(qdev, tx_ring); | |
3200 | ||
e332471c | 3201 | err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, |
c4e84bde RM |
3202 | (u16) tx_ring->wq_id); |
3203 | if (err) { | |
ae9540f7 | 3204 | netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); |
c4e84bde RM |
3205 | return err; |
3206 | } | |
c4e84bde RM |
3207 | return err; |
3208 | } | |
3209 | ||
3210 | static void ql_disable_msix(struct ql_adapter *qdev) | |
3211 | { | |
3212 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
3213 | pci_disable_msix(qdev->pdev); | |
3214 | clear_bit(QL_MSIX_ENABLED, &qdev->flags); | |
3215 | kfree(qdev->msi_x_entry); | |
3216 | qdev->msi_x_entry = NULL; | |
3217 | } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) { | |
3218 | pci_disable_msi(qdev->pdev); | |
3219 | clear_bit(QL_MSI_ENABLED, &qdev->flags); | |
3220 | } | |
3221 | } | |
3222 | ||
a4ab6137 RM |
3223 | /* We start by trying to get the number of vectors |
3224 | * stored in qdev->intr_count. If we don't get that | |
3225 | * many then we reduce the count and try again. | |
3226 | */ | |
c4e84bde RM |
3227 | static void ql_enable_msix(struct ql_adapter *qdev) |
3228 | { | |
a4ab6137 | 3229 | int i, err; |
c4e84bde | 3230 | |
c4e84bde | 3231 | /* Get the MSIX vectors. */ |
a5a62a1c | 3232 | if (qlge_irq_type == MSIX_IRQ) { |
c4e84bde RM |
3233 | /* Try to alloc space for the msix struct, |
3234 | * if it fails then go to MSI/legacy. | |
3235 | */ | |
a4ab6137 | 3236 | qdev->msi_x_entry = kcalloc(qdev->intr_count, |
c4e84bde RM |
3237 | sizeof(struct msix_entry), |
3238 | GFP_KERNEL); | |
3239 | if (!qdev->msi_x_entry) { | |
a5a62a1c | 3240 | qlge_irq_type = MSI_IRQ; |
c4e84bde RM |
3241 | goto msi; |
3242 | } | |
3243 | ||
a4ab6137 | 3244 | for (i = 0; i < qdev->intr_count; i++) |
c4e84bde RM |
3245 | qdev->msi_x_entry[i].entry = i; |
3246 | ||
a4ab6137 RM |
3247 | /* Loop to get our vectors. We start with |
3248 | * what we want and settle for what we get. | |
3249 | */ | |
3250 | do { | |
3251 | err = pci_enable_msix(qdev->pdev, | |
3252 | qdev->msi_x_entry, qdev->intr_count); | |
3253 | if (err > 0) | |
3254 | qdev->intr_count = err; | |
3255 | } while (err > 0); | |
3256 | ||
3257 | if (err < 0) { | |
c4e84bde RM |
3258 | kfree(qdev->msi_x_entry); |
3259 | qdev->msi_x_entry = NULL; | |
ae9540f7 JP |
3260 | netif_warn(qdev, ifup, qdev->ndev, |
3261 | "MSI-X Enable failed, trying MSI.\n"); | |
a4ab6137 | 3262 | qdev->intr_count = 1; |
a5a62a1c | 3263 | qlge_irq_type = MSI_IRQ; |
a4ab6137 RM |
3264 | } else if (err == 0) { |
3265 | set_bit(QL_MSIX_ENABLED, &qdev->flags); | |
ae9540f7 JP |
3266 | netif_info(qdev, ifup, qdev->ndev, |
3267 | "MSI-X Enabled, got %d vectors.\n", | |
3268 | qdev->intr_count); | |
a4ab6137 | 3269 | return; |
c4e84bde RM |
3270 | } |
3271 | } | |
3272 | msi: | |
a4ab6137 | 3273 | qdev->intr_count = 1; |
a5a62a1c | 3274 | if (qlge_irq_type == MSI_IRQ) { |
c4e84bde RM |
3275 | if (!pci_enable_msi(qdev->pdev)) { |
3276 | set_bit(QL_MSI_ENABLED, &qdev->flags); | |
ae9540f7 JP |
3277 | netif_info(qdev, ifup, qdev->ndev, |
3278 | "Running with MSI interrupts.\n"); | |
c4e84bde RM |
3279 | return; |
3280 | } | |
3281 | } | |
a5a62a1c | 3282 | qlge_irq_type = LEG_IRQ; |
ae9540f7 JP |
3283 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3284 | "Running with legacy interrupts.\n"); | |
c4e84bde RM |
3285 | } |
3286 | ||
39aa8165 RM |
3287 | /* Each vector services 1 RSS ring and and 1 or more |
3288 | * TX completion rings. This function loops through | |
3289 | * the TX completion rings and assigns the vector that | |
3290 | * will service it. An example would be if there are | |
3291 | * 2 vectors (so 2 RSS rings) and 8 TX completion rings. | |
3292 | * This would mean that vector 0 would service RSS ring 0 | |
25985edc | 3293 | * and TX completion rings 0,1,2 and 3. Vector 1 would |
39aa8165 RM |
3294 | * service RSS ring 1 and TX completion rings 4,5,6 and 7. |
3295 | */ | |
3296 | static void ql_set_tx_vect(struct ql_adapter *qdev) | |
3297 | { | |
3298 | int i, j, vect; | |
3299 | u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; | |
3300 | ||
3301 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { | |
3302 | /* Assign irq vectors to TX rx_rings.*/ | |
3303 | for (vect = 0, j = 0, i = qdev->rss_ring_count; | |
3304 | i < qdev->rx_ring_count; i++) { | |
3305 | if (j == tx_rings_per_vector) { | |
3306 | vect++; | |
3307 | j = 0; | |
3308 | } | |
3309 | qdev->rx_ring[i].irq = vect; | |
3310 | j++; | |
3311 | } | |
3312 | } else { | |
3313 | /* For single vector all rings have an irq | |
3314 | * of zero. | |
3315 | */ | |
3316 | for (i = 0; i < qdev->rx_ring_count; i++) | |
3317 | qdev->rx_ring[i].irq = 0; | |
3318 | } | |
3319 | } | |
3320 | ||
3321 | /* Set the interrupt mask for this vector. Each vector | |
3322 | * will service 1 RSS ring and 1 or more TX completion | |
3323 | * rings. This function sets up a bit mask per vector | |
3324 | * that indicates which rings it services. | |
3325 | */ | |
3326 | static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) | |
3327 | { | |
3328 | int j, vect = ctx->intr; | |
3329 | u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; | |
3330 | ||
3331 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { | |
3332 | /* Add the RSS ring serviced by this vector | |
3333 | * to the mask. | |
3334 | */ | |
3335 | ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); | |
3336 | /* Add the TX ring(s) serviced by this vector | |
3337 | * to the mask. */ | |
3338 | for (j = 0; j < tx_rings_per_vector; j++) { | |
3339 | ctx->irq_mask |= | |
3340 | (1 << qdev->rx_ring[qdev->rss_ring_count + | |
3341 | (vect * tx_rings_per_vector) + j].cq_id); | |
3342 | } | |
3343 | } else { | |
3344 | /* For single vector we just shift each queue's | |
3345 | * ID into the mask. | |
3346 | */ | |
3347 | for (j = 0; j < qdev->rx_ring_count; j++) | |
3348 | ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); | |
3349 | } | |
3350 | } | |
3351 | ||
c4e84bde RM |
3352 | /* |
3353 | * Here we build the intr_context structures based on | |
3354 | * our rx_ring count and intr vector count. | |
3355 | * The intr_context structure is used to hook each vector | |
3356 | * to possibly different handlers. | |
3357 | */ | |
3358 | static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) | |
3359 | { | |
3360 | int i = 0; | |
3361 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
3362 | ||
c4e84bde RM |
3363 | if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { |
3364 | /* Each rx_ring has it's | |
3365 | * own intr_context since we have separate | |
3366 | * vectors for each queue. | |
c4e84bde RM |
3367 | */ |
3368 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | |
3369 | qdev->rx_ring[i].irq = i; | |
3370 | intr_context->intr = i; | |
3371 | intr_context->qdev = qdev; | |
39aa8165 RM |
3372 | /* Set up this vector's bit-mask that indicates |
3373 | * which queues it services. | |
3374 | */ | |
3375 | ql_set_irq_mask(qdev, intr_context); | |
c4e84bde RM |
3376 | /* |
3377 | * We set up each vectors enable/disable/read bits so | |
3378 | * there's no bit/mask calculations in the critical path. | |
3379 | */ | |
3380 | intr_context->intr_en_mask = | |
3381 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
3382 | INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD | |
3383 | | i; | |
3384 | intr_context->intr_dis_mask = | |
3385 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
3386 | INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | | |
3387 | INTR_EN_IHD | i; | |
3388 | intr_context->intr_read_mask = | |
3389 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
3390 | INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | | |
3391 | i; | |
39aa8165 RM |
3392 | if (i == 0) { |
3393 | /* The first vector/queue handles | |
3394 | * broadcast/multicast, fatal errors, | |
3395 | * and firmware events. This in addition | |
3396 | * to normal inbound NAPI processing. | |
c4e84bde | 3397 | */ |
39aa8165 | 3398 | intr_context->handler = qlge_isr; |
b2014ff8 RM |
3399 | sprintf(intr_context->name, "%s-rx-%d", |
3400 | qdev->ndev->name, i); | |
3401 | } else { | |
c4e84bde | 3402 | /* |
39aa8165 | 3403 | * Inbound queues handle unicast frames only. |
c4e84bde | 3404 | */ |
39aa8165 RM |
3405 | intr_context->handler = qlge_msix_rx_isr; |
3406 | sprintf(intr_context->name, "%s-rx-%d", | |
c4e84bde | 3407 | qdev->ndev->name, i); |
c4e84bde RM |
3408 | } |
3409 | } | |
3410 | } else { | |
3411 | /* | |
3412 | * All rx_rings use the same intr_context since | |
3413 | * there is only one vector. | |
3414 | */ | |
3415 | intr_context->intr = 0; | |
3416 | intr_context->qdev = qdev; | |
3417 | /* | |
3418 | * We set up each vectors enable/disable/read bits so | |
3419 | * there's no bit/mask calculations in the critical path. | |
3420 | */ | |
3421 | intr_context->intr_en_mask = | |
3422 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; | |
3423 | intr_context->intr_dis_mask = | |
3424 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | | |
3425 | INTR_EN_TYPE_DISABLE; | |
3426 | intr_context->intr_read_mask = | |
3427 | INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; | |
3428 | /* | |
3429 | * Single interrupt means one handler for all rings. | |
3430 | */ | |
3431 | intr_context->handler = qlge_isr; | |
3432 | sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name); | |
39aa8165 RM |
3433 | /* Set up this vector's bit-mask that indicates |
3434 | * which queues it services. In this case there is | |
3435 | * a single vector so it will service all RSS and | |
3436 | * TX completion rings. | |
3437 | */ | |
3438 | ql_set_irq_mask(qdev, intr_context); | |
c4e84bde | 3439 | } |
39aa8165 RM |
3440 | /* Tell the TX completion rings which MSIx vector |
3441 | * they will be using. | |
3442 | */ | |
3443 | ql_set_tx_vect(qdev); | |
c4e84bde RM |
3444 | } |
3445 | ||
3446 | static void ql_free_irq(struct ql_adapter *qdev) | |
3447 | { | |
3448 | int i; | |
3449 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
3450 | ||
3451 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | |
3452 | if (intr_context->hooked) { | |
3453 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
3454 | free_irq(qdev->msi_x_entry[i].vector, | |
3455 | &qdev->rx_ring[i]); | |
c4e84bde RM |
3456 | } else { |
3457 | free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); | |
c4e84bde RM |
3458 | } |
3459 | } | |
3460 | } | |
3461 | ql_disable_msix(qdev); | |
3462 | } | |
3463 | ||
3464 | static int ql_request_irq(struct ql_adapter *qdev) | |
3465 | { | |
3466 | int i; | |
3467 | int status = 0; | |
3468 | struct pci_dev *pdev = qdev->pdev; | |
3469 | struct intr_context *intr_context = &qdev->intr_context[0]; | |
3470 | ||
3471 | ql_resolve_queues_to_irqs(qdev); | |
3472 | ||
3473 | for (i = 0; i < qdev->intr_count; i++, intr_context++) { | |
3474 | atomic_set(&intr_context->irq_cnt, 0); | |
3475 | if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { | |
3476 | status = request_irq(qdev->msi_x_entry[i].vector, | |
3477 | intr_context->handler, | |
3478 | 0, | |
3479 | intr_context->name, | |
3480 | &qdev->rx_ring[i]); | |
3481 | if (status) { | |
ae9540f7 JP |
3482 | netif_err(qdev, ifup, qdev->ndev, |
3483 | "Failed request for MSIX interrupt %d.\n", | |
3484 | i); | |
c4e84bde | 3485 | goto err_irq; |
c4e84bde RM |
3486 | } |
3487 | } else { | |
ae9540f7 JP |
3488 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, |
3489 | "trying msi or legacy interrupts.\n"); | |
3490 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, | |
3491 | "%s: irq = %d.\n", __func__, pdev->irq); | |
3492 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, | |
3493 | "%s: context->name = %s.\n", __func__, | |
3494 | intr_context->name); | |
3495 | netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, | |
3496 | "%s: dev_id = 0x%p.\n", __func__, | |
3497 | &qdev->rx_ring[0]); | |
c4e84bde RM |
3498 | status = |
3499 | request_irq(pdev->irq, qlge_isr, | |
3500 | test_bit(QL_MSI_ENABLED, | |
3501 | &qdev-> | |
3502 | flags) ? 0 : IRQF_SHARED, | |
3503 | intr_context->name, &qdev->rx_ring[0]); | |
3504 | if (status) | |
3505 | goto err_irq; | |
3506 | ||
ae9540f7 JP |
3507 | netif_err(qdev, ifup, qdev->ndev, |
3508 | "Hooked intr %d, queue type %s, with name %s.\n", | |
3509 | i, | |
3510 | qdev->rx_ring[0].type == DEFAULT_Q ? | |
3511 | "DEFAULT_Q" : | |
3512 | qdev->rx_ring[0].type == TX_Q ? "TX_Q" : | |
3513 | qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", | |
3514 | intr_context->name); | |
c4e84bde RM |
3515 | } |
3516 | intr_context->hooked = 1; | |
3517 | } | |
3518 | return status; | |
3519 | err_irq: | |
ae9540f7 | 3520 | netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n"); |
c4e84bde RM |
3521 | ql_free_irq(qdev); |
3522 | return status; | |
3523 | } | |
3524 | ||
3525 | static int ql_start_rss(struct ql_adapter *qdev) | |
3526 | { | |
215faf9c JP |
3527 | static const u8 init_hash_seed[] = { |
3528 | 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, | |
3529 | 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, | |
3530 | 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4, | |
3531 | 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c, | |
3532 | 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa | |
3533 | }; | |
c4e84bde RM |
3534 | struct ricb *ricb = &qdev->ricb; |
3535 | int status = 0; | |
3536 | int i; | |
3537 | u8 *hash_id = (u8 *) ricb->hash_cq_id; | |
3538 | ||
e332471c | 3539 | memset((void *)ricb, 0, sizeof(*ricb)); |
c4e84bde | 3540 | |
b2014ff8 | 3541 | ricb->base_cq = RSS_L4K; |
c4e84bde | 3542 | ricb->flags = |
541ae28c RM |
3543 | (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); |
3544 | ricb->mask = cpu_to_le16((u16)(0x3ff)); | |
c4e84bde RM |
3545 | |
3546 | /* | |
3547 | * Fill out the Indirection Table. | |
3548 | */ | |
541ae28c RM |
3549 | for (i = 0; i < 1024; i++) |
3550 | hash_id[i] = (i & (qdev->rss_ring_count - 1)); | |
c4e84bde | 3551 | |
541ae28c RM |
3552 | memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); |
3553 | memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); | |
c4e84bde | 3554 | |
e332471c | 3555 | status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); |
c4e84bde | 3556 | if (status) { |
ae9540f7 | 3557 | netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); |
c4e84bde RM |
3558 | return status; |
3559 | } | |
c4e84bde RM |
3560 | return status; |
3561 | } | |
3562 | ||
a5f59dc9 | 3563 | static int ql_clear_routing_entries(struct ql_adapter *qdev) |
c4e84bde | 3564 | { |
a5f59dc9 | 3565 | int i, status = 0; |
c4e84bde | 3566 | |
8587ea35 RM |
3567 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
3568 | if (status) | |
3569 | return status; | |
c4e84bde RM |
3570 | /* Clear all the entries in the routing table. */ |
3571 | for (i = 0; i < 16; i++) { | |
3572 | status = ql_set_routing_reg(qdev, i, 0, 0); | |
3573 | if (status) { | |
ae9540f7 JP |
3574 | netif_err(qdev, ifup, qdev->ndev, |
3575 | "Failed to init routing register for CAM packets.\n"); | |
a5f59dc9 | 3576 | break; |
c4e84bde RM |
3577 | } |
3578 | } | |
a5f59dc9 RM |
3579 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
3580 | return status; | |
3581 | } | |
3582 | ||
3583 | /* Initialize the frame-to-queue routing. */ | |
3584 | static int ql_route_initialize(struct ql_adapter *qdev) | |
3585 | { | |
3586 | int status = 0; | |
3587 | ||
fd21cf52 RM |
3588 | /* Clear all the entries in the routing table. */ |
3589 | status = ql_clear_routing_entries(qdev); | |
a5f59dc9 RM |
3590 | if (status) |
3591 | return status; | |
3592 | ||
fd21cf52 | 3593 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
a5f59dc9 | 3594 | if (status) |
fd21cf52 | 3595 | return status; |
c4e84bde | 3596 | |
fbc2ac33 RM |
3597 | status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, |
3598 | RT_IDX_IP_CSUM_ERR, 1); | |
3599 | if (status) { | |
3600 | netif_err(qdev, ifup, qdev->ndev, | |
3601 | "Failed to init routing register " | |
3602 | "for IP CSUM error packets.\n"); | |
3603 | goto exit; | |
3604 | } | |
3605 | status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, | |
3606 | RT_IDX_TU_CSUM_ERR, 1); | |
c4e84bde | 3607 | if (status) { |
ae9540f7 | 3608 | netif_err(qdev, ifup, qdev->ndev, |
fbc2ac33 RM |
3609 | "Failed to init routing register " |
3610 | "for TCP/UDP CSUM error packets.\n"); | |
8587ea35 | 3611 | goto exit; |
c4e84bde RM |
3612 | } |
3613 | status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); | |
3614 | if (status) { | |
ae9540f7 JP |
3615 | netif_err(qdev, ifup, qdev->ndev, |
3616 | "Failed to init routing register for broadcast packets.\n"); | |
8587ea35 | 3617 | goto exit; |
c4e84bde RM |
3618 | } |
3619 | /* If we have more than one inbound queue, then turn on RSS in the | |
3620 | * routing block. | |
3621 | */ | |
3622 | if (qdev->rss_ring_count > 1) { | |
3623 | status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, | |
3624 | RT_IDX_RSS_MATCH, 1); | |
3625 | if (status) { | |
ae9540f7 JP |
3626 | netif_err(qdev, ifup, qdev->ndev, |
3627 | "Failed to init routing register for MATCH RSS packets.\n"); | |
8587ea35 | 3628 | goto exit; |
c4e84bde RM |
3629 | } |
3630 | } | |
3631 | ||
3632 | status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, | |
3633 | RT_IDX_CAM_HIT, 1); | |
8587ea35 | 3634 | if (status) |
ae9540f7 JP |
3635 | netif_err(qdev, ifup, qdev->ndev, |
3636 | "Failed to init routing register for CAM packets.\n"); | |
8587ea35 RM |
3637 | exit: |
3638 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); | |
c4e84bde RM |
3639 | return status; |
3640 | } | |
3641 | ||
2ee1e272 | 3642 | int ql_cam_route_initialize(struct ql_adapter *qdev) |
bb58b5b6 | 3643 | { |
7fab3bfe | 3644 | int status, set; |
bb58b5b6 | 3645 | |
7fab3bfe RM |
3646 | /* If check if the link is up and use to |
3647 | * determine if we are setting or clearing | |
3648 | * the MAC address in the CAM. | |
3649 | */ | |
3650 | set = ql_read32(qdev, STS); | |
3651 | set &= qdev->port_link_up; | |
3652 | status = ql_set_mac_addr(qdev, set); | |
bb58b5b6 | 3653 | if (status) { |
ae9540f7 | 3654 | netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); |
bb58b5b6 RM |
3655 | return status; |
3656 | } | |
3657 | ||
3658 | status = ql_route_initialize(qdev); | |
3659 | if (status) | |
ae9540f7 | 3660 | netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); |
bb58b5b6 RM |
3661 | |
3662 | return status; | |
3663 | } | |
3664 | ||
c4e84bde RM |
3665 | static int ql_adapter_initialize(struct ql_adapter *qdev) |
3666 | { | |
3667 | u32 value, mask; | |
3668 | int i; | |
3669 | int status = 0; | |
3670 | ||
3671 | /* | |
3672 | * Set up the System register to halt on errors. | |
3673 | */ | |
3674 | value = SYS_EFE | SYS_FAE; | |
3675 | mask = value << 16; | |
3676 | ql_write32(qdev, SYS, mask | value); | |
3677 | ||
c9cf0a04 RM |
3678 | /* Set the default queue, and VLAN behavior. */ |
3679 | value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV; | |
3680 | mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16); | |
c4e84bde RM |
3681 | ql_write32(qdev, NIC_RCV_CFG, (mask | value)); |
3682 | ||
3683 | /* Set the MPI interrupt to enabled. */ | |
3684 | ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); | |
3685 | ||
3686 | /* Enable the function, set pagesize, enable error checking. */ | |
3687 | value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | | |
572c526f RM |
3688 | FSC_EC | FSC_VM_PAGE_4K; |
3689 | value |= SPLT_SETTING; | |
c4e84bde RM |
3690 | |
3691 | /* Set/clear header splitting. */ | |
3692 | mask = FSC_VM_PAGESIZE_MASK | | |
3693 | FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); | |
3694 | ql_write32(qdev, FSC, mask | value); | |
3695 | ||
572c526f | 3696 | ql_write32(qdev, SPLT_HDR, SPLT_LEN); |
c4e84bde | 3697 | |
a3b71939 RM |
3698 | /* Set RX packet routing to use port/pci function on which the |
3699 | * packet arrived on in addition to usual frame routing. | |
3700 | * This is helpful on bonding where both interfaces can have | |
3701 | * the same MAC address. | |
3702 | */ | |
3703 | ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); | |
bc083ce9 RM |
3704 | /* Reroute all packets to our Interface. |
3705 | * They may have been routed to MPI firmware | |
3706 | * due to WOL. | |
3707 | */ | |
3708 | value = ql_read32(qdev, MGMT_RCV_CFG); | |
3709 | value &= ~MGMT_RCV_CFG_RM; | |
3710 | mask = 0xffff0000; | |
3711 | ||
3712 | /* Sticky reg needs clearing due to WOL. */ | |
3713 | ql_write32(qdev, MGMT_RCV_CFG, mask); | |
3714 | ql_write32(qdev, MGMT_RCV_CFG, mask | value); | |
3715 | ||
3716 | /* Default WOL is enable on Mezz cards */ | |
3717 | if (qdev->pdev->subsystem_device == 0x0068 || | |
3718 | qdev->pdev->subsystem_device == 0x0180) | |
3719 | qdev->wol = WAKE_MAGIC; | |
a3b71939 | 3720 | |
c4e84bde RM |
3721 | /* Start up the rx queues. */ |
3722 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
3723 | status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); | |
3724 | if (status) { | |
ae9540f7 JP |
3725 | netif_err(qdev, ifup, qdev->ndev, |
3726 | "Failed to start rx ring[%d].\n", i); | |
c4e84bde RM |
3727 | return status; |
3728 | } | |
3729 | } | |
3730 | ||
3731 | /* If there is more than one inbound completion queue | |
3732 | * then download a RICB to configure RSS. | |
3733 | */ | |
3734 | if (qdev->rss_ring_count > 1) { | |
3735 | status = ql_start_rss(qdev); | |
3736 | if (status) { | |
ae9540f7 | 3737 | netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); |
c4e84bde RM |
3738 | return status; |
3739 | } | |
3740 | } | |
3741 | ||
3742 | /* Start up the tx queues. */ | |
3743 | for (i = 0; i < qdev->tx_ring_count; i++) { | |
3744 | status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); | |
3745 | if (status) { | |
ae9540f7 JP |
3746 | netif_err(qdev, ifup, qdev->ndev, |
3747 | "Failed to start tx ring[%d].\n", i); | |
c4e84bde RM |
3748 | return status; |
3749 | } | |
3750 | } | |
3751 | ||
b0c2aadf RM |
3752 | /* Initialize the port and set the max framesize. */ |
3753 | status = qdev->nic_ops->port_initialize(qdev); | |
80928860 | 3754 | if (status) |
ae9540f7 | 3755 | netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); |
c4e84bde | 3756 | |
bb58b5b6 RM |
3757 | /* Set up the MAC address and frame routing filter. */ |
3758 | status = ql_cam_route_initialize(qdev); | |
c4e84bde | 3759 | if (status) { |
ae9540f7 JP |
3760 | netif_err(qdev, ifup, qdev->ndev, |
3761 | "Failed to init CAM/Routing tables.\n"); | |
c4e84bde RM |
3762 | return status; |
3763 | } | |
3764 | ||
3765 | /* Start NAPI for the RSS queues. */ | |
19257f5a | 3766 | for (i = 0; i < qdev->rss_ring_count; i++) |
c4e84bde | 3767 | napi_enable(&qdev->rx_ring[i].napi); |
c4e84bde RM |
3768 | |
3769 | return status; | |
3770 | } | |
3771 | ||
3772 | /* Issue soft reset to chip. */ | |
3773 | static int ql_adapter_reset(struct ql_adapter *qdev) | |
3774 | { | |
3775 | u32 value; | |
c4e84bde | 3776 | int status = 0; |
a5f59dc9 | 3777 | unsigned long end_jiffies; |
c4e84bde | 3778 | |
a5f59dc9 RM |
3779 | /* Clear all the entries in the routing table. */ |
3780 | status = ql_clear_routing_entries(qdev); | |
3781 | if (status) { | |
ae9540f7 | 3782 | netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); |
a5f59dc9 RM |
3783 | return status; |
3784 | } | |
3785 | ||
3786 | end_jiffies = jiffies + | |
3787 | max((unsigned long)1, usecs_to_jiffies(30)); | |
84087f4d | 3788 | |
da92b393 JK |
3789 | /* Check if bit is set then skip the mailbox command and |
3790 | * clear the bit, else we are in normal reset process. | |
3791 | */ | |
3792 | if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { | |
3793 | /* Stop management traffic. */ | |
3794 | ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); | |
3795 | ||
3796 | /* Wait for the NIC and MGMNT FIFOs to empty. */ | |
3797 | ql_wait_fifo_empty(qdev); | |
3798 | } else | |
3799 | clear_bit(QL_ASIC_RECOVERY, &qdev->flags); | |
84087f4d | 3800 | |
c4e84bde | 3801 | ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); |
a75ee7f1 | 3802 | |
c4e84bde RM |
3803 | do { |
3804 | value = ql_read32(qdev, RST_FO); | |
3805 | if ((value & RST_FO_FR) == 0) | |
3806 | break; | |
a75ee7f1 RM |
3807 | cpu_relax(); |
3808 | } while (time_before(jiffies, end_jiffies)); | |
c4e84bde | 3809 | |
c4e84bde | 3810 | if (value & RST_FO_FR) { |
ae9540f7 JP |
3811 | netif_err(qdev, ifdown, qdev->ndev, |
3812 | "ETIMEDOUT!!! errored out of resetting the chip!\n"); | |
a75ee7f1 | 3813 | status = -ETIMEDOUT; |
c4e84bde RM |
3814 | } |
3815 | ||
84087f4d RM |
3816 | /* Resume management traffic. */ |
3817 | ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); | |
c4e84bde RM |
3818 | return status; |
3819 | } | |
3820 | ||
3821 | static void ql_display_dev_info(struct net_device *ndev) | |
3822 | { | |
b16fed0a | 3823 | struct ql_adapter *qdev = netdev_priv(ndev); |
c4e84bde | 3824 | |
ae9540f7 JP |
3825 | netif_info(qdev, probe, qdev->ndev, |
3826 | "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " | |
3827 | "XG Roll = %d, XG Rev = %d.\n", | |
3828 | qdev->func, | |
3829 | qdev->port, | |
3830 | qdev->chip_rev_id & 0x0000000f, | |
3831 | qdev->chip_rev_id >> 4 & 0x0000000f, | |
3832 | qdev->chip_rev_id >> 8 & 0x0000000f, | |
3833 | qdev->chip_rev_id >> 12 & 0x0000000f); | |
3834 | netif_info(qdev, probe, qdev->ndev, | |
3835 | "MAC address %pM\n", ndev->dev_addr); | |
c4e84bde RM |
3836 | } |
3837 | ||
ac409215 | 3838 | static int ql_wol(struct ql_adapter *qdev) |
bc083ce9 RM |
3839 | { |
3840 | int status = 0; | |
3841 | u32 wol = MB_WOL_DISABLE; | |
3842 | ||
3843 | /* The CAM is still intact after a reset, but if we | |
3844 | * are doing WOL, then we may need to program the | |
3845 | * routing regs. We would also need to issue the mailbox | |
3846 | * commands to instruct the MPI what to do per the ethtool | |
3847 | * settings. | |
3848 | */ | |
3849 | ||
3850 | if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | | |
3851 | WAKE_MCAST | WAKE_BCAST)) { | |
ae9540f7 | 3852 | netif_err(qdev, ifdown, qdev->ndev, |
fd9071ec | 3853 | "Unsupported WOL parameter. qdev->wol = 0x%x.\n", |
ae9540f7 | 3854 | qdev->wol); |
bc083ce9 RM |
3855 | return -EINVAL; |
3856 | } | |
3857 | ||
3858 | if (qdev->wol & WAKE_MAGIC) { | |
3859 | status = ql_mb_wol_set_magic(qdev, 1); | |
3860 | if (status) { | |
ae9540f7 JP |
3861 | netif_err(qdev, ifdown, qdev->ndev, |
3862 | "Failed to set magic packet on %s.\n", | |
3863 | qdev->ndev->name); | |
bc083ce9 RM |
3864 | return status; |
3865 | } else | |
ae9540f7 JP |
3866 | netif_info(qdev, drv, qdev->ndev, |
3867 | "Enabled magic packet successfully on %s.\n", | |
3868 | qdev->ndev->name); | |
bc083ce9 RM |
3869 | |
3870 | wol |= MB_WOL_MAGIC_PKT; | |
3871 | } | |
3872 | ||
3873 | if (qdev->wol) { | |
bc083ce9 RM |
3874 | wol |= MB_WOL_MODE_ON; |
3875 | status = ql_mb_wol_mode(qdev, wol); | |
ae9540f7 JP |
3876 | netif_err(qdev, drv, qdev->ndev, |
3877 | "WOL %s (wol code 0x%x) on %s\n", | |
318ae2ed | 3878 | (status == 0) ? "Successfully set" : "Failed", |
ae9540f7 | 3879 | wol, qdev->ndev->name); |
bc083ce9 RM |
3880 | } |
3881 | ||
3882 | return status; | |
3883 | } | |
3884 | ||
c5dadddb | 3885 | static void ql_cancel_all_work_sync(struct ql_adapter *qdev) |
c4e84bde | 3886 | { |
c4e84bde | 3887 | |
6497b607 RM |
3888 | /* Don't kill the reset worker thread if we |
3889 | * are in the process of recovery. | |
3890 | */ | |
3891 | if (test_bit(QL_ADAPTER_UP, &qdev->flags)) | |
3892 | cancel_delayed_work_sync(&qdev->asic_reset_work); | |
c4e84bde RM |
3893 | cancel_delayed_work_sync(&qdev->mpi_reset_work); |
3894 | cancel_delayed_work_sync(&qdev->mpi_work); | |
2ee1e272 | 3895 | cancel_delayed_work_sync(&qdev->mpi_idc_work); |
8aae2600 | 3896 | cancel_delayed_work_sync(&qdev->mpi_core_to_log); |
bcc2cb3b | 3897 | cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); |
c5dadddb BL |
3898 | } |
3899 | ||
3900 | static int ql_adapter_down(struct ql_adapter *qdev) | |
3901 | { | |
3902 | int i, status = 0; | |
3903 | ||
3904 | ql_link_off(qdev); | |
3905 | ||
3906 | ql_cancel_all_work_sync(qdev); | |
c4e84bde | 3907 | |
39aa8165 RM |
3908 | for (i = 0; i < qdev->rss_ring_count; i++) |
3909 | napi_disable(&qdev->rx_ring[i].napi); | |
c4e84bde RM |
3910 | |
3911 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | |
3912 | ||
3913 | ql_disable_interrupts(qdev); | |
3914 | ||
3915 | ql_tx_ring_clean(qdev); | |
3916 | ||
6b318cb3 RM |
3917 | /* Call netif_napi_del() from common point. |
3918 | */ | |
b2014ff8 | 3919 | for (i = 0; i < qdev->rss_ring_count; i++) |
6b318cb3 RM |
3920 | netif_napi_del(&qdev->rx_ring[i].napi); |
3921 | ||
c4e84bde RM |
3922 | status = ql_adapter_reset(qdev); |
3923 | if (status) | |
ae9540f7 JP |
3924 | netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", |
3925 | qdev->func); | |
fe5f0980 BL |
3926 | ql_free_rx_buffers(qdev); |
3927 | ||
c4e84bde RM |
3928 | return status; |
3929 | } | |
3930 | ||
3931 | static int ql_adapter_up(struct ql_adapter *qdev) | |
3932 | { | |
3933 | int err = 0; | |
3934 | ||
c4e84bde RM |
3935 | err = ql_adapter_initialize(qdev); |
3936 | if (err) { | |
ae9540f7 | 3937 | netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); |
c4e84bde RM |
3938 | goto err_init; |
3939 | } | |
c4e84bde | 3940 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
4545a3f2 | 3941 | ql_alloc_rx_buffers(qdev); |
8b007de1 RM |
3942 | /* If the port is initialized and the |
3943 | * link is up the turn on the carrier. | |
3944 | */ | |
3945 | if ((ql_read32(qdev, STS) & qdev->port_init) && | |
3946 | (ql_read32(qdev, STS) & qdev->port_link_up)) | |
6a473308 | 3947 | ql_link_on(qdev); |
f2c05004 RM |
3948 | /* Restore rx mode. */ |
3949 | clear_bit(QL_ALLMULTI, &qdev->flags); | |
3950 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | |
3951 | qlge_set_multicast_list(qdev->ndev); | |
3952 | ||
c1b60092 RM |
3953 | /* Restore vlan setting. */ |
3954 | qlge_restore_vlan(qdev); | |
3955 | ||
c4e84bde RM |
3956 | ql_enable_interrupts(qdev); |
3957 | ql_enable_all_completion_interrupts(qdev); | |
1e213303 | 3958 | netif_tx_start_all_queues(qdev->ndev); |
c4e84bde RM |
3959 | |
3960 | return 0; | |
3961 | err_init: | |
3962 | ql_adapter_reset(qdev); | |
3963 | return err; | |
3964 | } | |
3965 | ||
c4e84bde RM |
3966 | static void ql_release_adapter_resources(struct ql_adapter *qdev) |
3967 | { | |
3968 | ql_free_mem_resources(qdev); | |
3969 | ql_free_irq(qdev); | |
3970 | } | |
3971 | ||
3972 | static int ql_get_adapter_resources(struct ql_adapter *qdev) | |
3973 | { | |
3974 | int status = 0; | |
3975 | ||
3976 | if (ql_alloc_mem_resources(qdev)) { | |
ae9540f7 | 3977 | netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); |
c4e84bde RM |
3978 | return -ENOMEM; |
3979 | } | |
3980 | status = ql_request_irq(qdev); | |
c4e84bde RM |
3981 | return status; |
3982 | } | |
3983 | ||
3984 | static int qlge_close(struct net_device *ndev) | |
3985 | { | |
3986 | struct ql_adapter *qdev = netdev_priv(ndev); | |
3987 | ||
4bbd1a19 RM |
3988 | /* If we hit pci_channel_io_perm_failure |
3989 | * failure condition, then we already | |
3990 | * brought the adapter down. | |
3991 | */ | |
3992 | if (test_bit(QL_EEH_FATAL, &qdev->flags)) { | |
ae9540f7 | 3993 | netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n"); |
4bbd1a19 RM |
3994 | clear_bit(QL_EEH_FATAL, &qdev->flags); |
3995 | return 0; | |
3996 | } | |
3997 | ||
c4e84bde RM |
3998 | /* |
3999 | * Wait for device to recover from a reset. | |
4000 | * (Rarely happens, but possible.) | |
4001 | */ | |
4002 | while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) | |
4003 | msleep(1); | |
4004 | ql_adapter_down(qdev); | |
4005 | ql_release_adapter_resources(qdev); | |
c4e84bde RM |
4006 | return 0; |
4007 | } | |
4008 | ||
4009 | static int ql_configure_rings(struct ql_adapter *qdev) | |
4010 | { | |
4011 | int i; | |
4012 | struct rx_ring *rx_ring; | |
4013 | struct tx_ring *tx_ring; | |
a4ab6137 | 4014 | int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); |
7c734359 RM |
4015 | unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ? |
4016 | LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; | |
4017 | ||
4018 | qdev->lbq_buf_order = get_order(lbq_buf_len); | |
a4ab6137 RM |
4019 | |
4020 | /* In a perfect world we have one RSS ring for each CPU | |
4021 | * and each has it's own vector. To do that we ask for | |
4022 | * cpu_cnt vectors. ql_enable_msix() will adjust the | |
4023 | * vector count to what we actually get. We then | |
4024 | * allocate an RSS ring for each. | |
4025 | * Essentially, we are doing min(cpu_count, msix_vector_count). | |
c4e84bde | 4026 | */ |
a4ab6137 RM |
4027 | qdev->intr_count = cpu_cnt; |
4028 | ql_enable_msix(qdev); | |
4029 | /* Adjust the RSS ring count to the actual vector count. */ | |
4030 | qdev->rss_ring_count = qdev->intr_count; | |
c4e84bde | 4031 | qdev->tx_ring_count = cpu_cnt; |
b2014ff8 | 4032 | qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count; |
c4e84bde | 4033 | |
c4e84bde RM |
4034 | for (i = 0; i < qdev->tx_ring_count; i++) { |
4035 | tx_ring = &qdev->tx_ring[i]; | |
e332471c | 4036 | memset((void *)tx_ring, 0, sizeof(*tx_ring)); |
c4e84bde RM |
4037 | tx_ring->qdev = qdev; |
4038 | tx_ring->wq_id = i; | |
4039 | tx_ring->wq_len = qdev->tx_ring_size; | |
4040 | tx_ring->wq_size = | |
4041 | tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); | |
4042 | ||
4043 | /* | |
4044 | * The completion queue ID for the tx rings start | |
39aa8165 | 4045 | * immediately after the rss rings. |
c4e84bde | 4046 | */ |
39aa8165 | 4047 | tx_ring->cq_id = qdev->rss_ring_count + i; |
c4e84bde RM |
4048 | } |
4049 | ||
4050 | for (i = 0; i < qdev->rx_ring_count; i++) { | |
4051 | rx_ring = &qdev->rx_ring[i]; | |
e332471c | 4052 | memset((void *)rx_ring, 0, sizeof(*rx_ring)); |
c4e84bde RM |
4053 | rx_ring->qdev = qdev; |
4054 | rx_ring->cq_id = i; | |
4055 | rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ | |
b2014ff8 | 4056 | if (i < qdev->rss_ring_count) { |
39aa8165 RM |
4057 | /* |
4058 | * Inbound (RSS) queues. | |
4059 | */ | |
c4e84bde RM |
4060 | rx_ring->cq_len = qdev->rx_ring_size; |
4061 | rx_ring->cq_size = | |
4062 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | |
4063 | rx_ring->lbq_len = NUM_LARGE_BUFFERS; | |
4064 | rx_ring->lbq_size = | |
2c9a0d41 | 4065 | rx_ring->lbq_len * sizeof(__le64); |
7c734359 | 4066 | rx_ring->lbq_buf_size = (u16)lbq_buf_len; |
c4e84bde RM |
4067 | rx_ring->sbq_len = NUM_SMALL_BUFFERS; |
4068 | rx_ring->sbq_size = | |
2c9a0d41 | 4069 | rx_ring->sbq_len * sizeof(__le64); |
52e55f3c | 4070 | rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; |
b2014ff8 RM |
4071 | rx_ring->type = RX_Q; |
4072 | } else { | |
c4e84bde RM |
4073 | /* |
4074 | * Outbound queue handles outbound completions only. | |
4075 | */ | |
4076 | /* outbound cq is same size as tx_ring it services. */ | |
4077 | rx_ring->cq_len = qdev->tx_ring_size; | |
4078 | rx_ring->cq_size = | |
4079 | rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); | |
4080 | rx_ring->lbq_len = 0; | |
4081 | rx_ring->lbq_size = 0; | |
4082 | rx_ring->lbq_buf_size = 0; | |
4083 | rx_ring->sbq_len = 0; | |
4084 | rx_ring->sbq_size = 0; | |
4085 | rx_ring->sbq_buf_size = 0; | |
4086 | rx_ring->type = TX_Q; | |
c4e84bde RM |
4087 | } |
4088 | } | |
4089 | return 0; | |
4090 | } | |
4091 | ||
4092 | static int qlge_open(struct net_device *ndev) | |
4093 | { | |
4094 | int err = 0; | |
4095 | struct ql_adapter *qdev = netdev_priv(ndev); | |
4096 | ||
74e12435 RM |
4097 | err = ql_adapter_reset(qdev); |
4098 | if (err) | |
4099 | return err; | |
4100 | ||
c4e84bde RM |
4101 | err = ql_configure_rings(qdev); |
4102 | if (err) | |
4103 | return err; | |
4104 | ||
4105 | err = ql_get_adapter_resources(qdev); | |
4106 | if (err) | |
4107 | goto error_up; | |
4108 | ||
4109 | err = ql_adapter_up(qdev); | |
4110 | if (err) | |
4111 | goto error_up; | |
4112 | ||
4113 | return err; | |
4114 | ||
4115 | error_up: | |
4116 | ql_release_adapter_resources(qdev); | |
c4e84bde RM |
4117 | return err; |
4118 | } | |
4119 | ||
7c734359 RM |
4120 | static int ql_change_rx_buffers(struct ql_adapter *qdev) |
4121 | { | |
4122 | struct rx_ring *rx_ring; | |
4123 | int i, status; | |
4124 | u32 lbq_buf_len; | |
4125 | ||
25985edc | 4126 | /* Wait for an outstanding reset to complete. */ |
7c734359 RM |
4127 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { |
4128 | int i = 3; | |
4129 | while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | |
ae9540f7 JP |
4130 | netif_err(qdev, ifup, qdev->ndev, |
4131 | "Waiting for adapter UP...\n"); | |
7c734359 RM |
4132 | ssleep(1); |
4133 | } | |
4134 | ||
4135 | if (!i) { | |
ae9540f7 JP |
4136 | netif_err(qdev, ifup, qdev->ndev, |
4137 | "Timed out waiting for adapter UP\n"); | |
7c734359 RM |
4138 | return -ETIMEDOUT; |
4139 | } | |
4140 | } | |
4141 | ||
4142 | status = ql_adapter_down(qdev); | |
4143 | if (status) | |
4144 | goto error; | |
4145 | ||
4146 | /* Get the new rx buffer size. */ | |
4147 | lbq_buf_len = (qdev->ndev->mtu > 1500) ? | |
4148 | LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE; | |
4149 | qdev->lbq_buf_order = get_order(lbq_buf_len); | |
4150 | ||
4151 | for (i = 0; i < qdev->rss_ring_count; i++) { | |
4152 | rx_ring = &qdev->rx_ring[i]; | |
4153 | /* Set the new size. */ | |
4154 | rx_ring->lbq_buf_size = lbq_buf_len; | |
4155 | } | |
4156 | ||
4157 | status = ql_adapter_up(qdev); | |
4158 | if (status) | |
4159 | goto error; | |
4160 | ||
4161 | return status; | |
4162 | error: | |
ae9540f7 JP |
4163 | netif_alert(qdev, ifup, qdev->ndev, |
4164 | "Driver up/down cycle failed, closing device.\n"); | |
7c734359 RM |
4165 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
4166 | dev_close(qdev->ndev); | |
4167 | return status; | |
4168 | } | |
4169 | ||
c4e84bde RM |
4170 | static int qlge_change_mtu(struct net_device *ndev, int new_mtu) |
4171 | { | |
4172 | struct ql_adapter *qdev = netdev_priv(ndev); | |
7c734359 | 4173 | int status; |
c4e84bde RM |
4174 | |
4175 | if (ndev->mtu == 1500 && new_mtu == 9000) { | |
ae9540f7 | 4176 | netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n"); |
c4e84bde | 4177 | } else if (ndev->mtu == 9000 && new_mtu == 1500) { |
ae9540f7 | 4178 | netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n"); |
c4e84bde RM |
4179 | } else |
4180 | return -EINVAL; | |
7c734359 RM |
4181 | |
4182 | queue_delayed_work(qdev->workqueue, | |
4183 | &qdev->mpi_port_cfg_work, 3*HZ); | |
4184 | ||
746079da BL |
4185 | ndev->mtu = new_mtu; |
4186 | ||
7c734359 | 4187 | if (!netif_running(qdev->ndev)) { |
7c734359 RM |
4188 | return 0; |
4189 | } | |
4190 | ||
7c734359 RM |
4191 | status = ql_change_rx_buffers(qdev); |
4192 | if (status) { | |
ae9540f7 JP |
4193 | netif_err(qdev, ifup, qdev->ndev, |
4194 | "Changing MTU failed.\n"); | |
7c734359 RM |
4195 | } |
4196 | ||
4197 | return status; | |
c4e84bde RM |
4198 | } |
4199 | ||
4200 | static struct net_device_stats *qlge_get_stats(struct net_device | |
4201 | *ndev) | |
4202 | { | |
885ee398 RM |
4203 | struct ql_adapter *qdev = netdev_priv(ndev); |
4204 | struct rx_ring *rx_ring = &qdev->rx_ring[0]; | |
4205 | struct tx_ring *tx_ring = &qdev->tx_ring[0]; | |
4206 | unsigned long pkts, mcast, dropped, errors, bytes; | |
4207 | int i; | |
4208 | ||
4209 | /* Get RX stats. */ | |
4210 | pkts = mcast = dropped = errors = bytes = 0; | |
4211 | for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { | |
4212 | pkts += rx_ring->rx_packets; | |
4213 | bytes += rx_ring->rx_bytes; | |
4214 | dropped += rx_ring->rx_dropped; | |
4215 | errors += rx_ring->rx_errors; | |
4216 | mcast += rx_ring->rx_multicast; | |
4217 | } | |
4218 | ndev->stats.rx_packets = pkts; | |
4219 | ndev->stats.rx_bytes = bytes; | |
4220 | ndev->stats.rx_dropped = dropped; | |
4221 | ndev->stats.rx_errors = errors; | |
4222 | ndev->stats.multicast = mcast; | |
4223 | ||
4224 | /* Get TX stats. */ | |
4225 | pkts = errors = bytes = 0; | |
4226 | for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) { | |
4227 | pkts += tx_ring->tx_packets; | |
4228 | bytes += tx_ring->tx_bytes; | |
4229 | errors += tx_ring->tx_errors; | |
4230 | } | |
4231 | ndev->stats.tx_packets = pkts; | |
4232 | ndev->stats.tx_bytes = bytes; | |
4233 | ndev->stats.tx_errors = errors; | |
bcc90f55 | 4234 | return &ndev->stats; |
c4e84bde RM |
4235 | } |
4236 | ||
ac409215 | 4237 | static void qlge_set_multicast_list(struct net_device *ndev) |
c4e84bde | 4238 | { |
b16fed0a | 4239 | struct ql_adapter *qdev = netdev_priv(ndev); |
22bedad3 | 4240 | struct netdev_hw_addr *ha; |
cc288f54 | 4241 | int i, status; |
c4e84bde | 4242 | |
cc288f54 RM |
4243 | status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); |
4244 | if (status) | |
4245 | return; | |
c4e84bde RM |
4246 | /* |
4247 | * Set or clear promiscuous mode if a | |
4248 | * transition is taking place. | |
4249 | */ | |
4250 | if (ndev->flags & IFF_PROMISC) { | |
4251 | if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { | |
4252 | if (ql_set_routing_reg | |
4253 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { | |
ae9540f7 | 4254 | netif_err(qdev, hw, qdev->ndev, |
25985edc | 4255 | "Failed to set promiscuous mode.\n"); |
c4e84bde RM |
4256 | } else { |
4257 | set_bit(QL_PROMISCUOUS, &qdev->flags); | |
4258 | } | |
4259 | } | |
4260 | } else { | |
4261 | if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { | |
4262 | if (ql_set_routing_reg | |
4263 | (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { | |
ae9540f7 | 4264 | netif_err(qdev, hw, qdev->ndev, |
25985edc | 4265 | "Failed to clear promiscuous mode.\n"); |
c4e84bde RM |
4266 | } else { |
4267 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | |
4268 | } | |
4269 | } | |
4270 | } | |
4271 | ||
4272 | /* | |
4273 | * Set or clear all multicast mode if a | |
4274 | * transition is taking place. | |
4275 | */ | |
4276 | if ((ndev->flags & IFF_ALLMULTI) || | |
4cd24eaf | 4277 | (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { |
c4e84bde RM |
4278 | if (!test_bit(QL_ALLMULTI, &qdev->flags)) { |
4279 | if (ql_set_routing_reg | |
4280 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { | |
ae9540f7 JP |
4281 | netif_err(qdev, hw, qdev->ndev, |
4282 | "Failed to set all-multi mode.\n"); | |
c4e84bde RM |
4283 | } else { |
4284 | set_bit(QL_ALLMULTI, &qdev->flags); | |
4285 | } | |
4286 | } | |
4287 | } else { | |
4288 | if (test_bit(QL_ALLMULTI, &qdev->flags)) { | |
4289 | if (ql_set_routing_reg | |
4290 | (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { | |
ae9540f7 JP |
4291 | netif_err(qdev, hw, qdev->ndev, |
4292 | "Failed to clear all-multi mode.\n"); | |
c4e84bde RM |
4293 | } else { |
4294 | clear_bit(QL_ALLMULTI, &qdev->flags); | |
4295 | } | |
4296 | } | |
4297 | } | |
4298 | ||
4cd24eaf | 4299 | if (!netdev_mc_empty(ndev)) { |
cc288f54 RM |
4300 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
4301 | if (status) | |
4302 | goto exit; | |
f9dcbcc9 | 4303 | i = 0; |
22bedad3 JP |
4304 | netdev_for_each_mc_addr(ha, ndev) { |
4305 | if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, | |
c4e84bde | 4306 | MAC_ADDR_TYPE_MULTI_MAC, i)) { |
ae9540f7 JP |
4307 | netif_err(qdev, hw, qdev->ndev, |
4308 | "Failed to loadmulticast address.\n"); | |
cc288f54 | 4309 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
c4e84bde RM |
4310 | goto exit; |
4311 | } | |
f9dcbcc9 JP |
4312 | i++; |
4313 | } | |
cc288f54 | 4314 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
c4e84bde RM |
4315 | if (ql_set_routing_reg |
4316 | (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { | |
ae9540f7 JP |
4317 | netif_err(qdev, hw, qdev->ndev, |
4318 | "Failed to set multicast match mode.\n"); | |
c4e84bde RM |
4319 | } else { |
4320 | set_bit(QL_ALLMULTI, &qdev->flags); | |
4321 | } | |
4322 | } | |
4323 | exit: | |
8587ea35 | 4324 | ql_sem_unlock(qdev, SEM_RT_IDX_MASK); |
c4e84bde RM |
4325 | } |
4326 | ||
4327 | static int qlge_set_mac_address(struct net_device *ndev, void *p) | |
4328 | { | |
b16fed0a | 4329 | struct ql_adapter *qdev = netdev_priv(ndev); |
c4e84bde | 4330 | struct sockaddr *addr = p; |
cc288f54 | 4331 | int status; |
c4e84bde | 4332 | |
c4e84bde RM |
4333 | if (!is_valid_ether_addr(addr->sa_data)) |
4334 | return -EADDRNOTAVAIL; | |
4335 | memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); | |
801e9096 RM |
4336 | /* Update local copy of current mac address. */ |
4337 | memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); | |
c4e84bde | 4338 | |
cc288f54 RM |
4339 | status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); |
4340 | if (status) | |
4341 | return status; | |
cc288f54 RM |
4342 | status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, |
4343 | MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); | |
cc288f54 | 4344 | if (status) |
ae9540f7 | 4345 | netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); |
cc288f54 RM |
4346 | ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); |
4347 | return status; | |
c4e84bde RM |
4348 | } |
4349 | ||
4350 | static void qlge_tx_timeout(struct net_device *ndev) | |
4351 | { | |
b16fed0a | 4352 | struct ql_adapter *qdev = netdev_priv(ndev); |
6497b607 | 4353 | ql_queue_asic_error(qdev); |
c4e84bde RM |
4354 | } |
4355 | ||
4356 | static void ql_asic_reset_work(struct work_struct *work) | |
4357 | { | |
4358 | struct ql_adapter *qdev = | |
4359 | container_of(work, struct ql_adapter, asic_reset_work.work); | |
db98812f | 4360 | int status; |
f2c0d8df | 4361 | rtnl_lock(); |
db98812f RM |
4362 | status = ql_adapter_down(qdev); |
4363 | if (status) | |
4364 | goto error; | |
4365 | ||
4366 | status = ql_adapter_up(qdev); | |
4367 | if (status) | |
4368 | goto error; | |
2cd6dbaa RM |
4369 | |
4370 | /* Restore rx mode. */ | |
4371 | clear_bit(QL_ALLMULTI, &qdev->flags); | |
4372 | clear_bit(QL_PROMISCUOUS, &qdev->flags); | |
4373 | qlge_set_multicast_list(qdev->ndev); | |
4374 | ||
f2c0d8df | 4375 | rtnl_unlock(); |
db98812f RM |
4376 | return; |
4377 | error: | |
ae9540f7 JP |
4378 | netif_alert(qdev, ifup, qdev->ndev, |
4379 | "Driver up/down cycle failed, closing device\n"); | |
f2c0d8df | 4380 | |
db98812f RM |
4381 | set_bit(QL_ADAPTER_UP, &qdev->flags); |
4382 | dev_close(qdev->ndev); | |
4383 | rtnl_unlock(); | |
c4e84bde RM |
4384 | } |
4385 | ||
ef9c7ab4 | 4386 | static const struct nic_operations qla8012_nic_ops = { |
b0c2aadf RM |
4387 | .get_flash = ql_get_8012_flash_params, |
4388 | .port_initialize = ql_8012_port_initialize, | |
4389 | }; | |
4390 | ||
ef9c7ab4 | 4391 | static const struct nic_operations qla8000_nic_ops = { |
cdca8d02 RM |
4392 | .get_flash = ql_get_8000_flash_params, |
4393 | .port_initialize = ql_8000_port_initialize, | |
4394 | }; | |
4395 | ||
e4552f51 RM |
4396 | /* Find the pcie function number for the other NIC |
4397 | * on this chip. Since both NIC functions share a | |
4398 | * common firmware we have the lowest enabled function | |
4399 | * do any common work. Examples would be resetting | |
4400 | * after a fatal firmware error, or doing a firmware | |
4401 | * coredump. | |
4402 | */ | |
4403 | static int ql_get_alt_pcie_func(struct ql_adapter *qdev) | |
4404 | { | |
4405 | int status = 0; | |
4406 | u32 temp; | |
4407 | u32 nic_func1, nic_func2; | |
4408 | ||
4409 | status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, | |
4410 | &temp); | |
4411 | if (status) | |
4412 | return status; | |
4413 | ||
4414 | nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & | |
4415 | MPI_TEST_NIC_FUNC_MASK); | |
4416 | nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & | |
4417 | MPI_TEST_NIC_FUNC_MASK); | |
4418 | ||
4419 | if (qdev->func == nic_func1) | |
4420 | qdev->alt_func = nic_func2; | |
4421 | else if (qdev->func == nic_func2) | |
4422 | qdev->alt_func = nic_func1; | |
4423 | else | |
4424 | status = -EIO; | |
4425 | ||
4426 | return status; | |
4427 | } | |
b0c2aadf | 4428 | |
e4552f51 | 4429 | static int ql_get_board_info(struct ql_adapter *qdev) |
c4e84bde | 4430 | { |
e4552f51 | 4431 | int status; |
c4e84bde RM |
4432 | qdev->func = |
4433 | (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; | |
e4552f51 RM |
4434 | if (qdev->func > 3) |
4435 | return -EIO; | |
4436 | ||
4437 | status = ql_get_alt_pcie_func(qdev); | |
4438 | if (status) | |
4439 | return status; | |
4440 | ||
4441 | qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1; | |
4442 | if (qdev->port) { | |
c4e84bde RM |
4443 | qdev->xg_sem_mask = SEM_XGMAC1_MASK; |
4444 | qdev->port_link_up = STS_PL1; | |
4445 | qdev->port_init = STS_PI1; | |
4446 | qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI; | |
4447 | qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO; | |
4448 | } else { | |
4449 | qdev->xg_sem_mask = SEM_XGMAC0_MASK; | |
4450 | qdev->port_link_up = STS_PL0; | |
4451 | qdev->port_init = STS_PI0; | |
4452 | qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; | |
4453 | qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; | |
4454 | } | |
4455 | qdev->chip_rev_id = ql_read32(qdev, REV_ID); | |
b0c2aadf RM |
4456 | qdev->device_id = qdev->pdev->device; |
4457 | if (qdev->device_id == QLGE_DEVICE_ID_8012) | |
4458 | qdev->nic_ops = &qla8012_nic_ops; | |
cdca8d02 RM |
4459 | else if (qdev->device_id == QLGE_DEVICE_ID_8000) |
4460 | qdev->nic_ops = &qla8000_nic_ops; | |
e4552f51 | 4461 | return status; |
c4e84bde RM |
4462 | } |
4463 | ||
4464 | static void ql_release_all(struct pci_dev *pdev) | |
4465 | { | |
4466 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4467 | struct ql_adapter *qdev = netdev_priv(ndev); | |
4468 | ||
4469 | if (qdev->workqueue) { | |
4470 | destroy_workqueue(qdev->workqueue); | |
4471 | qdev->workqueue = NULL; | |
4472 | } | |
39aa8165 | 4473 | |
c4e84bde | 4474 | if (qdev->reg_base) |
8668ae92 | 4475 | iounmap(qdev->reg_base); |
c4e84bde RM |
4476 | if (qdev->doorbell_area) |
4477 | iounmap(qdev->doorbell_area); | |
8aae2600 | 4478 | vfree(qdev->mpi_coredump); |
c4e84bde RM |
4479 | pci_release_regions(pdev); |
4480 | pci_set_drvdata(pdev, NULL); | |
4481 | } | |
4482 | ||
4483 | static int __devinit ql_init_device(struct pci_dev *pdev, | |
4484 | struct net_device *ndev, int cards_found) | |
4485 | { | |
4486 | struct ql_adapter *qdev = netdev_priv(ndev); | |
1d1023d0 | 4487 | int err = 0; |
c4e84bde | 4488 | |
e332471c | 4489 | memset((void *)qdev, 0, sizeof(*qdev)); |
c4e84bde RM |
4490 | err = pci_enable_device(pdev); |
4491 | if (err) { | |
4492 | dev_err(&pdev->dev, "PCI device enable failed.\n"); | |
4493 | return err; | |
4494 | } | |
4495 | ||
ebd6e774 RM |
4496 | qdev->ndev = ndev; |
4497 | qdev->pdev = pdev; | |
4498 | pci_set_drvdata(pdev, ndev); | |
c4e84bde | 4499 | |
bc9167f3 RM |
4500 | /* Set PCIe read request size */ |
4501 | err = pcie_set_readrq(pdev, 4096); | |
4502 | if (err) { | |
4503 | dev_err(&pdev->dev, "Set readrq failed.\n"); | |
4f9a91c8 | 4504 | goto err_out1; |
bc9167f3 RM |
4505 | } |
4506 | ||
c4e84bde RM |
4507 | err = pci_request_regions(pdev, DRV_NAME); |
4508 | if (err) { | |
4509 | dev_err(&pdev->dev, "PCI region request failed.\n"); | |
ebd6e774 | 4510 | return err; |
c4e84bde RM |
4511 | } |
4512 | ||
4513 | pci_set_master(pdev); | |
6a35528a | 4514 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { |
c4e84bde | 4515 | set_bit(QL_DMA64, &qdev->flags); |
6a35528a | 4516 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
c4e84bde | 4517 | } else { |
284901a9 | 4518 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
c4e84bde | 4519 | if (!err) |
284901a9 | 4520 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
c4e84bde RM |
4521 | } |
4522 | ||
4523 | if (err) { | |
4524 | dev_err(&pdev->dev, "No usable DMA configuration.\n"); | |
4f9a91c8 | 4525 | goto err_out2; |
c4e84bde RM |
4526 | } |
4527 | ||
73475339 RM |
4528 | /* Set PCIe reset type for EEH to fundamental. */ |
4529 | pdev->needs_freset = 1; | |
6d190c6e | 4530 | pci_save_state(pdev); |
c4e84bde RM |
4531 | qdev->reg_base = |
4532 | ioremap_nocache(pci_resource_start(pdev, 1), | |
4533 | pci_resource_len(pdev, 1)); | |
4534 | if (!qdev->reg_base) { | |
4535 | dev_err(&pdev->dev, "Register mapping failed.\n"); | |
4536 | err = -ENOMEM; | |
4f9a91c8 | 4537 | goto err_out2; |
c4e84bde RM |
4538 | } |
4539 | ||
4540 | qdev->doorbell_area_size = pci_resource_len(pdev, 3); | |
4541 | qdev->doorbell_area = | |
4542 | ioremap_nocache(pci_resource_start(pdev, 3), | |
4543 | pci_resource_len(pdev, 3)); | |
4544 | if (!qdev->doorbell_area) { | |
4545 | dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); | |
4546 | err = -ENOMEM; | |
4f9a91c8 | 4547 | goto err_out2; |
c4e84bde RM |
4548 | } |
4549 | ||
e4552f51 RM |
4550 | err = ql_get_board_info(qdev); |
4551 | if (err) { | |
4552 | dev_err(&pdev->dev, "Register access failed.\n"); | |
4553 | err = -EIO; | |
4f9a91c8 | 4554 | goto err_out2; |
e4552f51 | 4555 | } |
c4e84bde RM |
4556 | qdev->msg_enable = netif_msg_init(debug, default_msg); |
4557 | spin_lock_init(&qdev->hw_lock); | |
4558 | spin_lock_init(&qdev->stats_lock); | |
4559 | ||
8aae2600 RM |
4560 | if (qlge_mpi_coredump) { |
4561 | qdev->mpi_coredump = | |
4562 | vmalloc(sizeof(struct ql_mpi_coredump)); | |
4563 | if (qdev->mpi_coredump == NULL) { | |
4564 | dev_err(&pdev->dev, "Coredump alloc failed.\n"); | |
4565 | err = -ENOMEM; | |
ce96bc86 | 4566 | goto err_out2; |
8aae2600 | 4567 | } |
d5c1da56 RM |
4568 | if (qlge_force_coredump) |
4569 | set_bit(QL_FRC_COREDUMP, &qdev->flags); | |
8aae2600 | 4570 | } |
c4e84bde | 4571 | /* make sure the EEPROM is good */ |
b0c2aadf | 4572 | err = qdev->nic_ops->get_flash(qdev); |
c4e84bde RM |
4573 | if (err) { |
4574 | dev_err(&pdev->dev, "Invalid FLASH.\n"); | |
4f9a91c8 | 4575 | goto err_out2; |
c4e84bde RM |
4576 | } |
4577 | ||
c4e84bde | 4578 | memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); |
801e9096 RM |
4579 | /* Keep local copy of current mac address. */ |
4580 | memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); | |
c4e84bde RM |
4581 | |
4582 | /* Set up the default ring sizes. */ | |
4583 | qdev->tx_ring_size = NUM_TX_RING_ENTRIES; | |
4584 | qdev->rx_ring_size = NUM_RX_RING_ENTRIES; | |
4585 | ||
4586 | /* Set up the coalescing parameters. */ | |
4587 | qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT; | |
4588 | qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT; | |
4589 | qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; | |
4590 | qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT; | |
4591 | ||
4592 | /* | |
4593 | * Set up the operating parameters. | |
4594 | */ | |
c4e84bde RM |
4595 | qdev->workqueue = create_singlethread_workqueue(ndev->name); |
4596 | INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); | |
4597 | INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); | |
4598 | INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); | |
bcc2cb3b | 4599 | INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); |
2ee1e272 | 4600 | INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); |
8aae2600 | 4601 | INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); |
bcc2cb3b | 4602 | init_completion(&qdev->ide_completion); |
4d7b6b5d | 4603 | mutex_init(&qdev->mpi_mutex); |
c4e84bde RM |
4604 | |
4605 | if (!cards_found) { | |
4606 | dev_info(&pdev->dev, "%s\n", DRV_STRING); | |
4607 | dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n", | |
4608 | DRV_NAME, DRV_VERSION); | |
4609 | } | |
4610 | return 0; | |
4f9a91c8 | 4611 | err_out2: |
c4e84bde | 4612 | ql_release_all(pdev); |
4f9a91c8 | 4613 | err_out1: |
c4e84bde RM |
4614 | pci_disable_device(pdev); |
4615 | return err; | |
4616 | } | |
4617 | ||
25ed7849 SH |
4618 | static const struct net_device_ops qlge_netdev_ops = { |
4619 | .ndo_open = qlge_open, | |
4620 | .ndo_stop = qlge_close, | |
4621 | .ndo_start_xmit = qlge_send, | |
4622 | .ndo_change_mtu = qlge_change_mtu, | |
4623 | .ndo_get_stats = qlge_get_stats, | |
afc4b13d | 4624 | .ndo_set_rx_mode = qlge_set_multicast_list, |
25ed7849 SH |
4625 | .ndo_set_mac_address = qlge_set_mac_address, |
4626 | .ndo_validate_addr = eth_validate_addr, | |
4627 | .ndo_tx_timeout = qlge_tx_timeout, | |
18c49b91 JP |
4628 | .ndo_fix_features = qlge_fix_features, |
4629 | .ndo_set_features = qlge_set_features, | |
01e6b953 RM |
4630 | .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid, |
4631 | .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, | |
25ed7849 SH |
4632 | }; |
4633 | ||
15c052fc RM |
4634 | static void ql_timer(unsigned long data) |
4635 | { | |
4636 | struct ql_adapter *qdev = (struct ql_adapter *)data; | |
4637 | u32 var = 0; | |
4638 | ||
4639 | var = ql_read32(qdev, STS); | |
4640 | if (pci_channel_offline(qdev->pdev)) { | |
ae9540f7 | 4641 | netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); |
15c052fc RM |
4642 | return; |
4643 | } | |
4644 | ||
72046d84 | 4645 | mod_timer(&qdev->timer, jiffies + (5*HZ)); |
15c052fc RM |
4646 | } |
4647 | ||
c4e84bde RM |
4648 | static int __devinit qlge_probe(struct pci_dev *pdev, |
4649 | const struct pci_device_id *pci_entry) | |
4650 | { | |
4651 | struct net_device *ndev = NULL; | |
4652 | struct ql_adapter *qdev = NULL; | |
4653 | static int cards_found = 0; | |
4654 | int err = 0; | |
4655 | ||
1e213303 | 4656 | ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), |
9eb8738d | 4657 | min(MAX_CPUS, netif_get_num_default_rss_queues())); |
c4e84bde RM |
4658 | if (!ndev) |
4659 | return -ENOMEM; | |
4660 | ||
4661 | err = ql_init_device(pdev, ndev, cards_found); | |
4662 | if (err < 0) { | |
4663 | free_netdev(ndev); | |
4664 | return err; | |
4665 | } | |
4666 | ||
4667 | qdev = netdev_priv(ndev); | |
4668 | SET_NETDEV_DEV(ndev, &pdev->dev); | |
88230fd5 MM |
4669 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | |
4670 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | | |
4671 | NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM; | |
4672 | ndev->features = ndev->hw_features | | |
4673 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | |
c4e84bde RM |
4674 | |
4675 | if (test_bit(QL_DMA64, &qdev->flags)) | |
4676 | ndev->features |= NETIF_F_HIGHDMA; | |
4677 | ||
4678 | /* | |
4679 | * Set up net_device structure. | |
4680 | */ | |
4681 | ndev->tx_queue_len = qdev->tx_ring_size; | |
4682 | ndev->irq = pdev->irq; | |
25ed7849 SH |
4683 | |
4684 | ndev->netdev_ops = &qlge_netdev_ops; | |
c4e84bde | 4685 | SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops); |
c4e84bde | 4686 | ndev->watchdog_timeo = 10 * HZ; |
25ed7849 | 4687 | |
c4e84bde RM |
4688 | err = register_netdev(ndev); |
4689 | if (err) { | |
4690 | dev_err(&pdev->dev, "net device registration failed.\n"); | |
4691 | ql_release_all(pdev); | |
4692 | pci_disable_device(pdev); | |
4693 | return err; | |
4694 | } | |
15c052fc RM |
4695 | /* Start up the timer to trigger EEH if |
4696 | * the bus goes dead | |
4697 | */ | |
4698 | init_timer_deferrable(&qdev->timer); | |
4699 | qdev->timer.data = (unsigned long)qdev; | |
4700 | qdev->timer.function = ql_timer; | |
4701 | qdev->timer.expires = jiffies + (5*HZ); | |
4702 | add_timer(&qdev->timer); | |
6a473308 | 4703 | ql_link_off(qdev); |
c4e84bde | 4704 | ql_display_dev_info(ndev); |
9dfbbaa6 | 4705 | atomic_set(&qdev->lb_count, 0); |
c4e84bde RM |
4706 | cards_found++; |
4707 | return 0; | |
4708 | } | |
4709 | ||
9dfbbaa6 RM |
4710 | netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) |
4711 | { | |
4712 | return qlge_send(skb, ndev); | |
4713 | } | |
4714 | ||
4715 | int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) | |
4716 | { | |
4717 | return ql_clean_inbound_rx_ring(rx_ring, budget); | |
4718 | } | |
4719 | ||
c4e84bde RM |
4720 | static void __devexit qlge_remove(struct pci_dev *pdev) |
4721 | { | |
4722 | struct net_device *ndev = pci_get_drvdata(pdev); | |
15c052fc RM |
4723 | struct ql_adapter *qdev = netdev_priv(ndev); |
4724 | del_timer_sync(&qdev->timer); | |
c5dadddb | 4725 | ql_cancel_all_work_sync(qdev); |
c4e84bde RM |
4726 | unregister_netdev(ndev); |
4727 | ql_release_all(pdev); | |
4728 | pci_disable_device(pdev); | |
4729 | free_netdev(ndev); | |
4730 | } | |
4731 | ||
6d190c6e RM |
4732 | /* Clean up resources without touching hardware. */ |
4733 | static void ql_eeh_close(struct net_device *ndev) | |
4734 | { | |
4735 | int i; | |
4736 | struct ql_adapter *qdev = netdev_priv(ndev); | |
4737 | ||
4738 | if (netif_carrier_ok(ndev)) { | |
4739 | netif_carrier_off(ndev); | |
4740 | netif_stop_queue(ndev); | |
4741 | } | |
4742 | ||
7ae80abd BL |
4743 | /* Disabling the timer */ |
4744 | del_timer_sync(&qdev->timer); | |
c5dadddb | 4745 | ql_cancel_all_work_sync(qdev); |
6d190c6e RM |
4746 | |
4747 | for (i = 0; i < qdev->rss_ring_count; i++) | |
4748 | netif_napi_del(&qdev->rx_ring[i].napi); | |
4749 | ||
4750 | clear_bit(QL_ADAPTER_UP, &qdev->flags); | |
4751 | ql_tx_ring_clean(qdev); | |
4752 | ql_free_rx_buffers(qdev); | |
4753 | ql_release_adapter_resources(qdev); | |
4754 | } | |
4755 | ||
c4e84bde RM |
4756 | /* |
4757 | * This callback is called by the PCI subsystem whenever | |
4758 | * a PCI bus error is detected. | |
4759 | */ | |
4760 | static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |
4761 | enum pci_channel_state state) | |
4762 | { | |
4763 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4bbd1a19 | 4764 | struct ql_adapter *qdev = netdev_priv(ndev); |
fbc663ce | 4765 | |
6d190c6e RM |
4766 | switch (state) { |
4767 | case pci_channel_io_normal: | |
4768 | return PCI_ERS_RESULT_CAN_RECOVER; | |
4769 | case pci_channel_io_frozen: | |
4770 | netif_device_detach(ndev); | |
4771 | if (netif_running(ndev)) | |
4772 | ql_eeh_close(ndev); | |
4773 | pci_disable_device(pdev); | |
4774 | return PCI_ERS_RESULT_NEED_RESET; | |
4775 | case pci_channel_io_perm_failure: | |
4776 | dev_err(&pdev->dev, | |
4777 | "%s: pci_channel_io_perm_failure.\n", __func__); | |
4bbd1a19 RM |
4778 | ql_eeh_close(ndev); |
4779 | set_bit(QL_EEH_FATAL, &qdev->flags); | |
fbc663ce | 4780 | return PCI_ERS_RESULT_DISCONNECT; |
6d190c6e | 4781 | } |
c4e84bde RM |
4782 | |
4783 | /* Request a slot reset. */ | |
4784 | return PCI_ERS_RESULT_NEED_RESET; | |
4785 | } | |
4786 | ||
4787 | /* | |
4788 | * This callback is called after the PCI buss has been reset. | |
4789 | * Basically, this tries to restart the card from scratch. | |
4790 | * This is a shortened version of the device probe/discovery code, | |
4791 | * it resembles the first-half of the () routine. | |
4792 | */ | |
4793 | static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) | |
4794 | { | |
4795 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4796 | struct ql_adapter *qdev = netdev_priv(ndev); | |
4797 | ||
6d190c6e RM |
4798 | pdev->error_state = pci_channel_io_normal; |
4799 | ||
4800 | pci_restore_state(pdev); | |
c4e84bde | 4801 | if (pci_enable_device(pdev)) { |
ae9540f7 JP |
4802 | netif_err(qdev, ifup, qdev->ndev, |
4803 | "Cannot re-enable PCI device after reset.\n"); | |
c4e84bde RM |
4804 | return PCI_ERS_RESULT_DISCONNECT; |
4805 | } | |
c4e84bde | 4806 | pci_set_master(pdev); |
a112fd4c RM |
4807 | |
4808 | if (ql_adapter_reset(qdev)) { | |
ae9540f7 | 4809 | netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); |
4bbd1a19 | 4810 | set_bit(QL_EEH_FATAL, &qdev->flags); |
a112fd4c RM |
4811 | return PCI_ERS_RESULT_DISCONNECT; |
4812 | } | |
4813 | ||
c4e84bde RM |
4814 | return PCI_ERS_RESULT_RECOVERED; |
4815 | } | |
4816 | ||
4817 | static void qlge_io_resume(struct pci_dev *pdev) | |
4818 | { | |
4819 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4820 | struct ql_adapter *qdev = netdev_priv(ndev); | |
6d190c6e | 4821 | int err = 0; |
c4e84bde | 4822 | |
c4e84bde | 4823 | if (netif_running(ndev)) { |
6d190c6e RM |
4824 | err = qlge_open(ndev); |
4825 | if (err) { | |
ae9540f7 JP |
4826 | netif_err(qdev, ifup, qdev->ndev, |
4827 | "Device initialization failed after reset.\n"); | |
c4e84bde RM |
4828 | return; |
4829 | } | |
6d190c6e | 4830 | } else { |
ae9540f7 JP |
4831 | netif_err(qdev, ifup, qdev->ndev, |
4832 | "Device was not running prior to EEH.\n"); | |
c4e84bde | 4833 | } |
72046d84 | 4834 | mod_timer(&qdev->timer, jiffies + (5*HZ)); |
c4e84bde RM |
4835 | netif_device_attach(ndev); |
4836 | } | |
4837 | ||
4838 | static struct pci_error_handlers qlge_err_handler = { | |
4839 | .error_detected = qlge_io_error_detected, | |
4840 | .slot_reset = qlge_io_slot_reset, | |
4841 | .resume = qlge_io_resume, | |
4842 | }; | |
4843 | ||
4844 | static int qlge_suspend(struct pci_dev *pdev, pm_message_t state) | |
4845 | { | |
4846 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4847 | struct ql_adapter *qdev = netdev_priv(ndev); | |
6b318cb3 | 4848 | int err; |
c4e84bde RM |
4849 | |
4850 | netif_device_detach(ndev); | |
15c052fc | 4851 | del_timer_sync(&qdev->timer); |
c4e84bde RM |
4852 | |
4853 | if (netif_running(ndev)) { | |
4854 | err = ql_adapter_down(qdev); | |
4855 | if (!err) | |
4856 | return err; | |
4857 | } | |
4858 | ||
bc083ce9 | 4859 | ql_wol(qdev); |
c4e84bde RM |
4860 | err = pci_save_state(pdev); |
4861 | if (err) | |
4862 | return err; | |
4863 | ||
4864 | pci_disable_device(pdev); | |
4865 | ||
4866 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
4867 | ||
4868 | return 0; | |
4869 | } | |
4870 | ||
04da2cf9 | 4871 | #ifdef CONFIG_PM |
c4e84bde RM |
4872 | static int qlge_resume(struct pci_dev *pdev) |
4873 | { | |
4874 | struct net_device *ndev = pci_get_drvdata(pdev); | |
4875 | struct ql_adapter *qdev = netdev_priv(ndev); | |
4876 | int err; | |
4877 | ||
4878 | pci_set_power_state(pdev, PCI_D0); | |
4879 | pci_restore_state(pdev); | |
4880 | err = pci_enable_device(pdev); | |
4881 | if (err) { | |
ae9540f7 | 4882 | netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n"); |
c4e84bde RM |
4883 | return err; |
4884 | } | |
4885 | pci_set_master(pdev); | |
4886 | ||
4887 | pci_enable_wake(pdev, PCI_D3hot, 0); | |
4888 | pci_enable_wake(pdev, PCI_D3cold, 0); | |
4889 | ||
4890 | if (netif_running(ndev)) { | |
4891 | err = ql_adapter_up(qdev); | |
4892 | if (err) | |
4893 | return err; | |
4894 | } | |
4895 | ||
72046d84 | 4896 | mod_timer(&qdev->timer, jiffies + (5*HZ)); |
c4e84bde RM |
4897 | netif_device_attach(ndev); |
4898 | ||
4899 | return 0; | |
4900 | } | |
04da2cf9 | 4901 | #endif /* CONFIG_PM */ |
c4e84bde RM |
4902 | |
4903 | static void qlge_shutdown(struct pci_dev *pdev) | |
4904 | { | |
4905 | qlge_suspend(pdev, PMSG_SUSPEND); | |
4906 | } | |
4907 | ||
4908 | static struct pci_driver qlge_driver = { | |
4909 | .name = DRV_NAME, | |
4910 | .id_table = qlge_pci_tbl, | |
4911 | .probe = qlge_probe, | |
4912 | .remove = __devexit_p(qlge_remove), | |
4913 | #ifdef CONFIG_PM | |
4914 | .suspend = qlge_suspend, | |
4915 | .resume = qlge_resume, | |
4916 | #endif | |
4917 | .shutdown = qlge_shutdown, | |
4918 | .err_handler = &qlge_err_handler | |
4919 | }; | |
4920 | ||
4921 | static int __init qlge_init_module(void) | |
4922 | { | |
4923 | return pci_register_driver(&qlge_driver); | |
4924 | } | |
4925 | ||
4926 | static void __exit qlge_exit(void) | |
4927 | { | |
4928 | pci_unregister_driver(&qlge_driver); | |
4929 | } | |
4930 | ||
4931 | module_init(qlge_init_module); | |
4932 | module_exit(qlge_exit); |