Commit | Line | Data |
---|---|---|
01f2e4ea SF |
1 | /* |
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | |
4 | * | |
5 | * This program is free software; you may redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
16 | * SOFTWARE. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/workqueue.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/netdevice.h> | |
29 | #include <linux/etherdevice.h> | |
30 | #include <linux/if_ether.h> | |
31 | #include <linux/if_vlan.h> | |
32 | #include <linux/ethtool.h> | |
33 | #include <linux/in.h> | |
34 | #include <linux/ip.h> | |
35 | #include <linux/ipv6.h> | |
36 | #include <linux/tcp.h> | |
37 | ||
38 | #include "cq_enet_desc.h" | |
39 | #include "vnic_dev.h" | |
40 | #include "vnic_intr.h" | |
41 | #include "vnic_stats.h" | |
42 | #include "enic_res.h" | |
43 | #include "enic.h" | |
44 | ||
45 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | |
46 | #define ENIC_JUMBO_FIRST_BUF_SIZE 256 | |
47 | ||
48 | /* Supported devices */ | |
49 | static struct pci_device_id enic_id_table[] = { | |
50 | { PCI_VDEVICE(CISCO, 0x0043) }, | |
51 | { 0, } /* end of table */ | |
52 | }; | |
53 | ||
54 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
55 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); | |
56 | MODULE_LICENSE("GPL"); | |
57 | MODULE_VERSION(DRV_VERSION); | |
58 | MODULE_DEVICE_TABLE(pci, enic_id_table); | |
59 | ||
60 | struct enic_stat { | |
61 | char name[ETH_GSTRING_LEN]; | |
62 | unsigned int offset; | |
63 | }; | |
64 | ||
65 | #define ENIC_TX_STAT(stat) \ | |
66 | { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } | |
67 | #define ENIC_RX_STAT(stat) \ | |
68 | { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } | |
69 | ||
70 | static const struct enic_stat enic_tx_stats[] = { | |
71 | ENIC_TX_STAT(tx_frames_ok), | |
72 | ENIC_TX_STAT(tx_unicast_frames_ok), | |
73 | ENIC_TX_STAT(tx_multicast_frames_ok), | |
74 | ENIC_TX_STAT(tx_broadcast_frames_ok), | |
75 | ENIC_TX_STAT(tx_bytes_ok), | |
76 | ENIC_TX_STAT(tx_unicast_bytes_ok), | |
77 | ENIC_TX_STAT(tx_multicast_bytes_ok), | |
78 | ENIC_TX_STAT(tx_broadcast_bytes_ok), | |
79 | ENIC_TX_STAT(tx_drops), | |
80 | ENIC_TX_STAT(tx_errors), | |
81 | ENIC_TX_STAT(tx_tso), | |
82 | }; | |
83 | ||
84 | static const struct enic_stat enic_rx_stats[] = { | |
85 | ENIC_RX_STAT(rx_frames_ok), | |
86 | ENIC_RX_STAT(rx_frames_total), | |
87 | ENIC_RX_STAT(rx_unicast_frames_ok), | |
88 | ENIC_RX_STAT(rx_multicast_frames_ok), | |
89 | ENIC_RX_STAT(rx_broadcast_frames_ok), | |
90 | ENIC_RX_STAT(rx_bytes_ok), | |
91 | ENIC_RX_STAT(rx_unicast_bytes_ok), | |
92 | ENIC_RX_STAT(rx_multicast_bytes_ok), | |
93 | ENIC_RX_STAT(rx_broadcast_bytes_ok), | |
94 | ENIC_RX_STAT(rx_drop), | |
95 | ENIC_RX_STAT(rx_no_bufs), | |
96 | ENIC_RX_STAT(rx_errors), | |
97 | ENIC_RX_STAT(rx_rss), | |
98 | ENIC_RX_STAT(rx_crc_errors), | |
99 | ENIC_RX_STAT(rx_frames_64), | |
100 | ENIC_RX_STAT(rx_frames_127), | |
101 | ENIC_RX_STAT(rx_frames_255), | |
102 | ENIC_RX_STAT(rx_frames_511), | |
103 | ENIC_RX_STAT(rx_frames_1023), | |
104 | ENIC_RX_STAT(rx_frames_1518), | |
105 | ENIC_RX_STAT(rx_frames_to_max), | |
106 | }; | |
107 | ||
108 | static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); | |
109 | static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); | |
110 | ||
111 | static int enic_get_settings(struct net_device *netdev, | |
112 | struct ethtool_cmd *ecmd) | |
113 | { | |
114 | struct enic *enic = netdev_priv(netdev); | |
115 | ||
116 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | |
117 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | |
118 | ecmd->port = PORT_FIBRE; | |
119 | ecmd->transceiver = XCVR_EXTERNAL; | |
120 | ||
121 | if (netif_carrier_ok(netdev)) { | |
122 | ecmd->speed = vnic_dev_port_speed(enic->vdev); | |
123 | ecmd->duplex = DUPLEX_FULL; | |
124 | } else { | |
125 | ecmd->speed = -1; | |
126 | ecmd->duplex = -1; | |
127 | } | |
128 | ||
129 | ecmd->autoneg = AUTONEG_DISABLE; | |
130 | ||
131 | return 0; | |
132 | } | |
133 | ||
134 | static void enic_get_drvinfo(struct net_device *netdev, | |
135 | struct ethtool_drvinfo *drvinfo) | |
136 | { | |
137 | struct enic *enic = netdev_priv(netdev); | |
138 | struct vnic_devcmd_fw_info *fw_info; | |
139 | ||
140 | spin_lock(&enic->devcmd_lock); | |
141 | vnic_dev_fw_info(enic->vdev, &fw_info); | |
142 | spin_unlock(&enic->devcmd_lock); | |
143 | ||
144 | strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | |
145 | strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | |
146 | strncpy(drvinfo->fw_version, fw_info->fw_version, | |
147 | sizeof(drvinfo->fw_version)); | |
148 | strncpy(drvinfo->bus_info, pci_name(enic->pdev), | |
149 | sizeof(drvinfo->bus_info)); | |
150 | } | |
151 | ||
152 | static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |
153 | { | |
154 | unsigned int i; | |
155 | ||
156 | switch (stringset) { | |
157 | case ETH_SS_STATS: | |
158 | for (i = 0; i < enic_n_tx_stats; i++) { | |
159 | memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); | |
160 | data += ETH_GSTRING_LEN; | |
161 | } | |
162 | for (i = 0; i < enic_n_rx_stats; i++) { | |
163 | memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); | |
164 | data += ETH_GSTRING_LEN; | |
165 | } | |
166 | break; | |
167 | } | |
168 | } | |
169 | ||
170 | static int enic_get_stats_count(struct net_device *netdev) | |
171 | { | |
172 | return enic_n_tx_stats + enic_n_rx_stats; | |
173 | } | |
174 | ||
175 | static void enic_get_ethtool_stats(struct net_device *netdev, | |
176 | struct ethtool_stats *stats, u64 *data) | |
177 | { | |
178 | struct enic *enic = netdev_priv(netdev); | |
179 | struct vnic_stats *vstats; | |
180 | unsigned int i; | |
181 | ||
182 | spin_lock(&enic->devcmd_lock); | |
183 | vnic_dev_stats_dump(enic->vdev, &vstats); | |
184 | spin_unlock(&enic->devcmd_lock); | |
185 | ||
186 | for (i = 0; i < enic_n_tx_stats; i++) | |
187 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; | |
188 | for (i = 0; i < enic_n_rx_stats; i++) | |
189 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | |
190 | } | |
191 | ||
192 | static u32 enic_get_rx_csum(struct net_device *netdev) | |
193 | { | |
194 | struct enic *enic = netdev_priv(netdev); | |
195 | return enic->csum_rx_enabled; | |
196 | } | |
197 | ||
198 | static int enic_set_rx_csum(struct net_device *netdev, u32 data) | |
199 | { | |
200 | struct enic *enic = netdev_priv(netdev); | |
201 | ||
202 | enic->csum_rx_enabled = | |
203 | (data && ENIC_SETTING(enic, RXCSUM)) ? 1 : 0; | |
204 | ||
205 | return 0; | |
206 | } | |
207 | ||
208 | static int enic_set_tx_csum(struct net_device *netdev, u32 data) | |
209 | { | |
210 | struct enic *enic = netdev_priv(netdev); | |
211 | ||
212 | if (data && ENIC_SETTING(enic, TXCSUM)) | |
213 | netdev->features |= NETIF_F_HW_CSUM; | |
214 | else | |
215 | netdev->features &= ~NETIF_F_HW_CSUM; | |
216 | ||
217 | return 0; | |
218 | } | |
219 | ||
220 | static int enic_set_tso(struct net_device *netdev, u32 data) | |
221 | { | |
222 | struct enic *enic = netdev_priv(netdev); | |
223 | ||
224 | if (data && ENIC_SETTING(enic, TSO)) | |
225 | netdev->features |= | |
226 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
227 | else | |
228 | netdev->features &= | |
229 | ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | static u32 enic_get_msglevel(struct net_device *netdev) | |
235 | { | |
236 | struct enic *enic = netdev_priv(netdev); | |
237 | return enic->msg_enable; | |
238 | } | |
239 | ||
240 | static void enic_set_msglevel(struct net_device *netdev, u32 value) | |
241 | { | |
242 | struct enic *enic = netdev_priv(netdev); | |
243 | enic->msg_enable = value; | |
244 | } | |
245 | ||
246 | static struct ethtool_ops enic_ethtool_ops = { | |
247 | .get_settings = enic_get_settings, | |
248 | .get_drvinfo = enic_get_drvinfo, | |
249 | .get_msglevel = enic_get_msglevel, | |
250 | .set_msglevel = enic_set_msglevel, | |
251 | .get_link = ethtool_op_get_link, | |
252 | .get_strings = enic_get_strings, | |
253 | .get_stats_count = enic_get_stats_count, | |
254 | .get_ethtool_stats = enic_get_ethtool_stats, | |
255 | .get_rx_csum = enic_get_rx_csum, | |
256 | .set_rx_csum = enic_set_rx_csum, | |
257 | .get_tx_csum = ethtool_op_get_tx_csum, | |
258 | .set_tx_csum = enic_set_tx_csum, | |
259 | .get_sg = ethtool_op_get_sg, | |
260 | .set_sg = ethtool_op_set_sg, | |
261 | .get_tso = ethtool_op_get_tso, | |
262 | .set_tso = enic_set_tso, | |
263 | }; | |
264 | ||
265 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | |
266 | { | |
267 | struct enic *enic = vnic_dev_priv(wq->vdev); | |
268 | ||
269 | if (buf->sop) | |
270 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
271 | buf->len, PCI_DMA_TODEVICE); | |
272 | else | |
273 | pci_unmap_page(enic->pdev, buf->dma_addr, | |
274 | buf->len, PCI_DMA_TODEVICE); | |
275 | ||
276 | if (buf->os_buf) | |
277 | dev_kfree_skb_any(buf->os_buf); | |
278 | } | |
279 | ||
280 | static void enic_wq_free_buf(struct vnic_wq *wq, | |
281 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) | |
282 | { | |
283 | enic_free_wq_buf(wq, buf); | |
284 | } | |
285 | ||
286 | static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
287 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
288 | { | |
289 | struct enic *enic = vnic_dev_priv(vdev); | |
290 | ||
291 | spin_lock(&enic->wq_lock[q_number]); | |
292 | ||
293 | vnic_wq_service(&enic->wq[q_number], cq_desc, | |
294 | completed_index, enic_wq_free_buf, | |
295 | opaque); | |
296 | ||
297 | if (netif_queue_stopped(enic->netdev) && | |
298 | vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1) | |
299 | netif_wake_queue(enic->netdev); | |
300 | ||
301 | spin_unlock(&enic->wq_lock[q_number]); | |
302 | ||
303 | return 0; | |
304 | } | |
305 | ||
306 | static void enic_log_q_error(struct enic *enic) | |
307 | { | |
308 | unsigned int i; | |
309 | u32 error_status; | |
310 | ||
311 | for (i = 0; i < enic->wq_count; i++) { | |
312 | error_status = vnic_wq_error_status(&enic->wq[i]); | |
313 | if (error_status) | |
314 | printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", | |
315 | enic->netdev->name, i, error_status); | |
316 | } | |
317 | ||
318 | for (i = 0; i < enic->rq_count; i++) { | |
319 | error_status = vnic_rq_error_status(&enic->rq[i]); | |
320 | if (error_status) | |
321 | printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", | |
322 | enic->netdev->name, i, error_status); | |
323 | } | |
324 | } | |
325 | ||
326 | static void enic_link_check(struct enic *enic) | |
327 | { | |
328 | int link_status = vnic_dev_link_status(enic->vdev); | |
329 | int carrier_ok = netif_carrier_ok(enic->netdev); | |
330 | ||
331 | if (link_status && !carrier_ok) { | |
332 | printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); | |
333 | netif_carrier_on(enic->netdev); | |
334 | } else if (!link_status && carrier_ok) { | |
335 | printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); | |
336 | netif_carrier_off(enic->netdev); | |
337 | } | |
338 | } | |
339 | ||
340 | static void enic_mtu_check(struct enic *enic) | |
341 | { | |
342 | u32 mtu = vnic_dev_mtu(enic->vdev); | |
343 | ||
344 | if (mtu != enic->port_mtu) { | |
345 | if (mtu < enic->netdev->mtu) | |
346 | printk(KERN_WARNING PFX | |
347 | "%s: interface MTU (%d) set higher " | |
348 | "than switch port MTU (%d)\n", | |
349 | enic->netdev->name, enic->netdev->mtu, mtu); | |
350 | enic->port_mtu = mtu; | |
351 | } | |
352 | } | |
353 | ||
354 | static void enic_msglvl_check(struct enic *enic) | |
355 | { | |
356 | u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); | |
357 | ||
358 | if (msg_enable != enic->msg_enable) { | |
359 | printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", | |
360 | enic->netdev->name, enic->msg_enable, msg_enable); | |
361 | enic->msg_enable = msg_enable; | |
362 | } | |
363 | } | |
364 | ||
365 | static void enic_notify_check(struct enic *enic) | |
366 | { | |
367 | enic_msglvl_check(enic); | |
368 | enic_mtu_check(enic); | |
369 | enic_link_check(enic); | |
370 | } | |
371 | ||
372 | #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) | |
373 | ||
374 | static irqreturn_t enic_isr_legacy(int irq, void *data) | |
375 | { | |
376 | struct net_device *netdev = data; | |
377 | struct enic *enic = netdev_priv(netdev); | |
378 | u32 pba; | |
379 | ||
380 | vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
381 | ||
382 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | |
383 | if (!pba) { | |
384 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
385 | return IRQ_NONE; /* not our interrupt */ | |
386 | } | |
387 | ||
388 | if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) | |
389 | enic_notify_check(enic); | |
390 | ||
391 | if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { | |
392 | enic_log_q_error(enic); | |
393 | /* schedule recovery from WQ/RQ error */ | |
394 | schedule_work(&enic->reset); | |
395 | return IRQ_HANDLED; | |
396 | } | |
397 | ||
398 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | |
399 | if (netif_rx_schedule_prep(netdev, &enic->napi)) | |
400 | __netif_rx_schedule(netdev, &enic->napi); | |
401 | } else { | |
402 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
403 | } | |
404 | ||
405 | return IRQ_HANDLED; | |
406 | } | |
407 | ||
408 | static irqreturn_t enic_isr_msi(int irq, void *data) | |
409 | { | |
410 | struct enic *enic = data; | |
411 | ||
412 | /* With MSI, there is no sharing of interrupts, so this is | |
413 | * our interrupt and there is no need to ack it. The device | |
414 | * is not providing per-vector masking, so the OS will not | |
415 | * write to PCI config space to mask/unmask the interrupt. | |
416 | * We're using mask_on_assertion for MSI, so the device | |
417 | * automatically masks the interrupt when the interrupt is | |
418 | * generated. Later, when exiting polling, the interrupt | |
419 | * will be unmasked (see enic_poll). | |
420 | * | |
421 | * Also, the device uses the same PCIe Traffic Class (TC) | |
422 | * for Memory Write data and MSI, so there are no ordering | |
423 | * issues; the MSI will always arrive at the Root Complex | |
424 | * _after_ corresponding Memory Writes (i.e. descriptor | |
425 | * writes). | |
426 | */ | |
427 | ||
428 | netif_rx_schedule(enic->netdev, &enic->napi); | |
429 | ||
430 | return IRQ_HANDLED; | |
431 | } | |
432 | ||
433 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | |
434 | { | |
435 | struct enic *enic = data; | |
436 | ||
437 | /* schedule NAPI polling for RQ cleanup */ | |
438 | netif_rx_schedule(enic->netdev, &enic->napi); | |
439 | ||
440 | return IRQ_HANDLED; | |
441 | } | |
442 | ||
443 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | |
444 | { | |
445 | struct enic *enic = data; | |
446 | unsigned int wq_work_to_do = -1; /* no limit */ | |
447 | unsigned int wq_work_done; | |
448 | ||
449 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
450 | wq_work_to_do, enic_wq_service, NULL); | |
451 | ||
452 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], | |
453 | wq_work_done, | |
454 | 1 /* unmask intr */, | |
455 | 1 /* reset intr timer */); | |
456 | ||
457 | return IRQ_HANDLED; | |
458 | } | |
459 | ||
460 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | |
461 | { | |
462 | struct enic *enic = data; | |
463 | ||
464 | enic_log_q_error(enic); | |
465 | ||
466 | /* schedule recovery from WQ/RQ error */ | |
467 | schedule_work(&enic->reset); | |
468 | ||
469 | return IRQ_HANDLED; | |
470 | } | |
471 | ||
472 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |
473 | { | |
474 | struct enic *enic = data; | |
475 | ||
476 | enic_notify_check(enic); | |
477 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]); | |
478 | ||
479 | return IRQ_HANDLED; | |
480 | } | |
481 | ||
482 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | |
483 | struct vnic_wq *wq, struct sk_buff *skb, | |
484 | unsigned int len_left) | |
485 | { | |
486 | skb_frag_t *frag; | |
487 | ||
488 | /* Queue additional data fragments */ | |
489 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
490 | len_left -= frag->size; | |
491 | enic_queue_wq_desc_cont(wq, skb, | |
492 | pci_map_page(enic->pdev, frag->page, | |
493 | frag->page_offset, frag->size, | |
494 | PCI_DMA_TODEVICE), | |
495 | frag->size, | |
496 | (len_left == 0)); /* EOP? */ | |
497 | } | |
498 | } | |
499 | ||
500 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | |
501 | struct vnic_wq *wq, struct sk_buff *skb, | |
502 | int vlan_tag_insert, unsigned int vlan_tag) | |
503 | { | |
504 | unsigned int head_len = skb_headlen(skb); | |
505 | unsigned int len_left = skb->len - head_len; | |
506 | int eop = (len_left == 0); | |
507 | ||
508 | /* Queue the main skb fragment */ | |
509 | enic_queue_wq_desc(wq, skb, | |
510 | pci_map_single(enic->pdev, skb->data, | |
511 | head_len, PCI_DMA_TODEVICE), | |
512 | head_len, | |
513 | vlan_tag_insert, vlan_tag, | |
514 | eop); | |
515 | ||
516 | if (!eop) | |
517 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
518 | } | |
519 | ||
520 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | |
521 | struct vnic_wq *wq, struct sk_buff *skb, | |
522 | int vlan_tag_insert, unsigned int vlan_tag) | |
523 | { | |
524 | unsigned int head_len = skb_headlen(skb); | |
525 | unsigned int len_left = skb->len - head_len; | |
526 | unsigned int hdr_len = skb_transport_offset(skb); | |
527 | unsigned int csum_offset = hdr_len + skb->csum_offset; | |
528 | int eop = (len_left == 0); | |
529 | ||
530 | /* Queue the main skb fragment */ | |
531 | enic_queue_wq_desc_csum_l4(wq, skb, | |
532 | pci_map_single(enic->pdev, skb->data, | |
533 | head_len, PCI_DMA_TODEVICE), | |
534 | head_len, | |
535 | csum_offset, | |
536 | hdr_len, | |
537 | vlan_tag_insert, vlan_tag, | |
538 | eop); | |
539 | ||
540 | if (!eop) | |
541 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
542 | } | |
543 | ||
544 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | |
545 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | |
546 | int vlan_tag_insert, unsigned int vlan_tag) | |
547 | { | |
548 | unsigned int head_len = skb_headlen(skb); | |
549 | unsigned int len_left = skb->len - head_len; | |
550 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
551 | int eop = (len_left == 0); | |
552 | ||
553 | /* Preload TCP csum field with IP pseudo hdr calculated | |
554 | * with IP length set to zero. HW will later add in length | |
555 | * to each TCP segment resulting from the TSO. | |
556 | */ | |
557 | ||
558 | if (skb->protocol == __constant_htons(ETH_P_IP)) { | |
559 | ip_hdr(skb)->check = 0; | |
560 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
561 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
562 | } else if (skb->protocol == __constant_htons(ETH_P_IPV6)) { | |
563 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | |
564 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
565 | } | |
566 | ||
567 | /* Queue the main skb fragment */ | |
568 | enic_queue_wq_desc_tso(wq, skb, | |
569 | pci_map_single(enic->pdev, skb->data, | |
570 | head_len, PCI_DMA_TODEVICE), | |
571 | head_len, | |
572 | mss, hdr_len, | |
573 | vlan_tag_insert, vlan_tag, | |
574 | eop); | |
575 | ||
576 | if (!eop) | |
577 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
578 | } | |
579 | ||
580 | static inline void enic_queue_wq_skb(struct enic *enic, | |
581 | struct vnic_wq *wq, struct sk_buff *skb) | |
582 | { | |
583 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
584 | unsigned int vlan_tag = 0; | |
585 | int vlan_tag_insert = 0; | |
586 | ||
587 | if (enic->vlan_group && vlan_tx_tag_present(skb)) { | |
588 | /* VLAN tag from trunking driver */ | |
589 | vlan_tag_insert = 1; | |
590 | vlan_tag = vlan_tx_tag_get(skb); | |
591 | } | |
592 | ||
593 | if (mss) | |
594 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | |
595 | vlan_tag_insert, vlan_tag); | |
596 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | |
597 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | |
598 | vlan_tag_insert, vlan_tag); | |
599 | else | |
600 | enic_queue_wq_skb_vlan(enic, wq, skb, | |
601 | vlan_tag_insert, vlan_tag); | |
602 | } | |
603 | ||
604 | /* netif_tx_lock held, process context with BHs disabled */ | |
605 | static int enic_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |
606 | { | |
607 | struct enic *enic = netdev_priv(netdev); | |
608 | struct vnic_wq *wq = &enic->wq[0]; | |
609 | unsigned long flags; | |
610 | ||
611 | if (skb->len <= 0) { | |
612 | dev_kfree_skb(skb); | |
613 | return NETDEV_TX_OK; | |
614 | } | |
615 | ||
616 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | |
617 | * which is very likely. In the off chance it's going to take | |
618 | * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. | |
619 | */ | |
620 | ||
621 | if (skb_shinfo(skb)->gso_size == 0 && | |
622 | skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && | |
623 | skb_linearize(skb)) { | |
624 | dev_kfree_skb(skb); | |
625 | return NETDEV_TX_OK; | |
626 | } | |
627 | ||
628 | spin_lock_irqsave(&enic->wq_lock[0], flags); | |
629 | ||
630 | if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) { | |
631 | netif_stop_queue(netdev); | |
632 | /* This is a hard error, log it */ | |
633 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " | |
634 | "queue awake!\n", netdev->name); | |
635 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | |
636 | return NETDEV_TX_BUSY; | |
637 | } | |
638 | ||
639 | enic_queue_wq_skb(enic, wq, skb); | |
640 | ||
641 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1) | |
642 | netif_stop_queue(netdev); | |
643 | ||
644 | netdev->trans_start = jiffies; | |
645 | ||
646 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | |
647 | ||
648 | return NETDEV_TX_OK; | |
649 | } | |
650 | ||
651 | /* dev_base_lock rwlock held, nominally process context */ | |
652 | static struct net_device_stats *enic_get_stats(struct net_device *netdev) | |
653 | { | |
654 | struct enic *enic = netdev_priv(netdev); | |
655 | struct vnic_stats *stats; | |
656 | ||
657 | spin_lock(&enic->devcmd_lock); | |
658 | vnic_dev_stats_dump(enic->vdev, &stats); | |
659 | spin_unlock(&enic->devcmd_lock); | |
660 | ||
661 | enic->net_stats.tx_packets = stats->tx.tx_frames_ok; | |
662 | enic->net_stats.tx_bytes = stats->tx.tx_bytes_ok; | |
663 | enic->net_stats.tx_errors = stats->tx.tx_errors; | |
664 | enic->net_stats.tx_dropped = stats->tx.tx_drops; | |
665 | ||
666 | enic->net_stats.rx_packets = stats->rx.rx_frames_ok; | |
667 | enic->net_stats.rx_bytes = stats->rx.rx_bytes_ok; | |
668 | enic->net_stats.rx_errors = stats->rx.rx_errors; | |
669 | enic->net_stats.multicast = stats->rx.rx_multicast_frames_ok; | |
670 | enic->net_stats.rx_crc_errors = stats->rx.rx_crc_errors; | |
671 | enic->net_stats.rx_dropped = stats->rx.rx_no_bufs; | |
672 | ||
673 | return &enic->net_stats; | |
674 | } | |
675 | ||
676 | static void enic_reset_mcaddrs(struct enic *enic) | |
677 | { | |
678 | enic->mc_count = 0; | |
679 | } | |
680 | ||
681 | static int enic_set_mac_addr(struct net_device *netdev, char *addr) | |
682 | { | |
683 | if (!is_valid_ether_addr(addr)) | |
684 | return -EADDRNOTAVAIL; | |
685 | ||
686 | memcpy(netdev->dev_addr, addr, netdev->addr_len); | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
691 | /* netif_tx_lock held, BHs disabled */ | |
692 | static void enic_set_multicast_list(struct net_device *netdev) | |
693 | { | |
694 | struct enic *enic = netdev_priv(netdev); | |
695 | struct dev_mc_list *list = netdev->mc_list; | |
696 | int directed = 1; | |
697 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | |
698 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | |
699 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | |
700 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | |
701 | (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); | |
702 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | |
703 | unsigned int mc_count = netdev->mc_count; | |
704 | unsigned int i, j; | |
705 | ||
706 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | |
707 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | |
708 | ||
709 | spin_lock(&enic->devcmd_lock); | |
710 | ||
711 | vnic_dev_packet_filter(enic->vdev, directed, | |
712 | multicast, broadcast, promisc, allmulti); | |
713 | ||
714 | /* Is there an easier way? Trying to minimize to | |
715 | * calls to add/del multicast addrs. We keep the | |
716 | * addrs from the last call in enic->mc_addr and | |
717 | * look for changes to add/del. | |
718 | */ | |
719 | ||
720 | for (i = 0; list && i < mc_count; i++) { | |
721 | memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); | |
722 | list = list->next; | |
723 | } | |
724 | ||
725 | for (i = 0; i < enic->mc_count; i++) { | |
726 | for (j = 0; j < mc_count; j++) | |
727 | if (compare_ether_addr(enic->mc_addr[i], | |
728 | mc_addr[j]) == 0) | |
729 | break; | |
730 | if (j == mc_count) | |
731 | enic_del_multicast_addr(enic, enic->mc_addr[i]); | |
732 | } | |
733 | ||
734 | for (i = 0; i < mc_count; i++) { | |
735 | for (j = 0; j < enic->mc_count; j++) | |
736 | if (compare_ether_addr(mc_addr[i], | |
737 | enic->mc_addr[j]) == 0) | |
738 | break; | |
739 | if (j == enic->mc_count) | |
740 | enic_add_multicast_addr(enic, mc_addr[i]); | |
741 | } | |
742 | ||
743 | /* Save the list to compare against next time | |
744 | */ | |
745 | ||
746 | for (i = 0; i < mc_count; i++) | |
747 | memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); | |
748 | ||
749 | enic->mc_count = mc_count; | |
750 | ||
751 | spin_unlock(&enic->devcmd_lock); | |
752 | } | |
753 | ||
754 | /* rtnl lock is held */ | |
755 | static void enic_vlan_rx_register(struct net_device *netdev, | |
756 | struct vlan_group *vlan_group) | |
757 | { | |
758 | struct enic *enic = netdev_priv(netdev); | |
759 | enic->vlan_group = vlan_group; | |
760 | } | |
761 | ||
762 | /* rtnl lock is held */ | |
763 | static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
764 | { | |
765 | struct enic *enic = netdev_priv(netdev); | |
766 | ||
767 | spin_lock(&enic->devcmd_lock); | |
768 | enic_add_vlan(enic, vid); | |
769 | spin_unlock(&enic->devcmd_lock); | |
770 | } | |
771 | ||
772 | /* rtnl lock is held */ | |
773 | static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
774 | { | |
775 | struct enic *enic = netdev_priv(netdev); | |
776 | ||
777 | spin_lock(&enic->devcmd_lock); | |
778 | enic_del_vlan(enic, vid); | |
779 | spin_unlock(&enic->devcmd_lock); | |
780 | } | |
781 | ||
782 | /* netif_tx_lock held, BHs disabled */ | |
783 | static void enic_tx_timeout(struct net_device *netdev) | |
784 | { | |
785 | struct enic *enic = netdev_priv(netdev); | |
786 | schedule_work(&enic->reset); | |
787 | } | |
788 | ||
789 | static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |
790 | { | |
791 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
792 | ||
793 | if (!buf->os_buf) | |
794 | return; | |
795 | ||
796 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
797 | buf->len, PCI_DMA_FROMDEVICE); | |
798 | dev_kfree_skb_any(buf->os_buf); | |
799 | } | |
800 | ||
801 | static inline struct sk_buff *enic_rq_alloc_skb(unsigned int size) | |
802 | { | |
803 | struct sk_buff *skb; | |
804 | ||
805 | skb = dev_alloc_skb(size + NET_IP_ALIGN); | |
806 | ||
807 | if (skb) | |
808 | skb_reserve(skb, NET_IP_ALIGN); | |
809 | ||
810 | return skb; | |
811 | } | |
812 | ||
813 | static int enic_rq_alloc_buf(struct vnic_rq *rq) | |
814 | { | |
815 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
816 | struct sk_buff *skb; | |
817 | unsigned int len = enic->netdev->mtu + ETH_HLEN; | |
818 | unsigned int os_buf_index = 0; | |
819 | dma_addr_t dma_addr; | |
820 | ||
821 | skb = enic_rq_alloc_skb(len); | |
822 | if (!skb) | |
823 | return -ENOMEM; | |
824 | ||
825 | dma_addr = pci_map_single(enic->pdev, skb->data, | |
826 | len, PCI_DMA_FROMDEVICE); | |
827 | ||
828 | enic_queue_rq_desc(rq, skb, os_buf_index, | |
829 | dma_addr, len); | |
830 | ||
831 | return 0; | |
832 | } | |
833 | ||
834 | static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, | |
835 | void **tcph, u64 *hdr_flags, void *priv) | |
836 | { | |
837 | struct cq_enet_rq_desc *cq_desc = priv; | |
838 | unsigned int ip_len; | |
839 | struct iphdr *iph; | |
840 | ||
841 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
842 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
843 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
844 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
845 | u8 packet_error; | |
846 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
847 | u32 rss_hash; | |
848 | ||
849 | cq_enet_rq_desc_dec(cq_desc, | |
850 | &type, &color, &q_number, &completed_index, | |
851 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
852 | &csum_not_calc, &rss_hash, &bytes_written, | |
853 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
854 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
855 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
856 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
857 | &fcs_ok); | |
858 | ||
859 | if (!(ipv4 && tcp && !ipv4_fragment)) | |
860 | return -1; | |
861 | ||
862 | skb_reset_network_header(skb); | |
863 | iph = ip_hdr(skb); | |
864 | ||
865 | ip_len = ip_hdrlen(skb); | |
866 | skb_set_transport_header(skb, ip_len); | |
867 | ||
868 | /* check if ip header and tcp header are complete */ | |
869 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) | |
870 | return -1; | |
871 | ||
872 | *hdr_flags = LRO_IPV4 | LRO_TCP; | |
873 | *tcph = tcp_hdr(skb); | |
874 | *iphdr = iph; | |
875 | ||
876 | return 0; | |
877 | } | |
878 | ||
879 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | |
880 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
881 | int skipped, void *opaque) | |
882 | { | |
883 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
884 | struct sk_buff *skb; | |
885 | ||
886 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
887 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
888 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
889 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
890 | u8 packet_error; | |
891 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
892 | u32 rss_hash; | |
893 | ||
894 | if (skipped) | |
895 | return; | |
896 | ||
897 | skb = buf->os_buf; | |
898 | prefetch(skb->data - NET_IP_ALIGN); | |
899 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
900 | buf->len, PCI_DMA_FROMDEVICE); | |
901 | ||
902 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | |
903 | &type, &color, &q_number, &completed_index, | |
904 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
905 | &csum_not_calc, &rss_hash, &bytes_written, | |
906 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
907 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
908 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
909 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
910 | &fcs_ok); | |
911 | ||
912 | if (packet_error) { | |
913 | ||
914 | if (bytes_written > 0 && !fcs_ok) { | |
915 | if (net_ratelimit()) | |
916 | printk(KERN_ERR PFX | |
917 | "%s: packet error: bad FCS\n", | |
918 | enic->netdev->name); | |
919 | } | |
920 | ||
921 | dev_kfree_skb_any(skb); | |
922 | ||
923 | return; | |
924 | } | |
925 | ||
926 | if (eop && bytes_written > 0) { | |
927 | ||
928 | /* Good receive | |
929 | */ | |
930 | ||
931 | skb_put(skb, bytes_written); | |
932 | skb->protocol = eth_type_trans(skb, enic->netdev); | |
933 | ||
934 | if (enic->csum_rx_enabled && !csum_not_calc) { | |
935 | skb->csum = htons(checksum); | |
936 | skb->ip_summed = CHECKSUM_COMPLETE; | |
937 | } | |
938 | ||
939 | skb->dev = enic->netdev; | |
940 | enic->netdev->last_rx = jiffies; | |
941 | ||
942 | if (enic->vlan_group && vlan_stripped) { | |
943 | ||
944 | if (ENIC_SETTING(enic, LRO)) | |
945 | lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, | |
946 | skb, enic->vlan_group, | |
947 | vlan, cq_desc); | |
948 | else | |
949 | vlan_hwaccel_receive_skb(skb, | |
950 | enic->vlan_group, vlan); | |
951 | ||
952 | } else { | |
953 | ||
954 | if (ENIC_SETTING(enic, LRO)) | |
955 | lro_receive_skb(&enic->lro_mgr, skb, cq_desc); | |
956 | else | |
957 | netif_receive_skb(skb); | |
958 | ||
959 | } | |
960 | ||
961 | } else { | |
962 | ||
963 | /* Buffer overflow | |
964 | */ | |
965 | ||
966 | dev_kfree_skb_any(skb); | |
967 | } | |
968 | } | |
969 | ||
970 | static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
971 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
972 | { | |
973 | struct enic *enic = vnic_dev_priv(vdev); | |
974 | ||
975 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
976 | completed_index, VNIC_RQ_RETURN_DESC, | |
977 | enic_rq_indicate_buf, opaque); | |
978 | ||
979 | return 0; | |
980 | } | |
981 | ||
982 | static void enic_rq_drop_buf(struct vnic_rq *rq, | |
983 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
984 | int skipped, void *opaque) | |
985 | { | |
986 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
987 | struct sk_buff *skb = buf->os_buf; | |
988 | ||
989 | if (skipped) | |
990 | return; | |
991 | ||
992 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
993 | buf->len, PCI_DMA_FROMDEVICE); | |
994 | ||
995 | dev_kfree_skb_any(skb); | |
996 | } | |
997 | ||
998 | static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
999 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
1000 | { | |
1001 | struct enic *enic = vnic_dev_priv(vdev); | |
1002 | ||
1003 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
1004 | completed_index, VNIC_RQ_RETURN_DESC, | |
1005 | enic_rq_drop_buf, opaque); | |
1006 | ||
1007 | return 0; | |
1008 | } | |
1009 | ||
1010 | static int enic_poll(struct napi_struct *napi, int budget) | |
1011 | { | |
1012 | struct enic *enic = container_of(napi, struct enic, napi); | |
1013 | struct net_device *netdev = enic->netdev; | |
1014 | unsigned int rq_work_to_do = budget; | |
1015 | unsigned int wq_work_to_do = -1; /* no limit */ | |
1016 | unsigned int work_done, rq_work_done, wq_work_done; | |
1017 | ||
1018 | /* Service RQ (first) and WQ | |
1019 | */ | |
1020 | ||
1021 | rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1022 | rq_work_to_do, enic_rq_service, NULL); | |
1023 | ||
1024 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1025 | wq_work_to_do, enic_wq_service, NULL); | |
1026 | ||
1027 | /* Accumulate intr event credits for this polling | |
1028 | * cycle. An intr event is the completion of a | |
1029 | * a WQ or RQ packet. | |
1030 | */ | |
1031 | ||
1032 | work_done = rq_work_done + wq_work_done; | |
1033 | ||
1034 | if (work_done > 0) | |
1035 | vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], | |
1036 | work_done, | |
1037 | 0 /* don't unmask intr */, | |
1038 | 0 /* don't reset intr timer */); | |
1039 | ||
1040 | if (rq_work_done > 0) { | |
1041 | ||
1042 | /* Replenish RQ | |
1043 | */ | |
1044 | ||
1045 | vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); | |
1046 | ||
1047 | } else { | |
1048 | ||
1049 | /* If no work done, flush all LROs and exit polling | |
1050 | */ | |
1051 | ||
1052 | if (ENIC_SETTING(enic, LRO)) | |
1053 | lro_flush_all(&enic->lro_mgr); | |
1054 | ||
1055 | netif_rx_complete(netdev, napi); | |
1056 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | |
1057 | } | |
1058 | ||
1059 | return rq_work_done; | |
1060 | } | |
1061 | ||
1062 | static int enic_poll_msix(struct napi_struct *napi, int budget) | |
1063 | { | |
1064 | struct enic *enic = container_of(napi, struct enic, napi); | |
1065 | struct net_device *netdev = enic->netdev; | |
1066 | unsigned int work_to_do = budget; | |
1067 | unsigned int work_done; | |
1068 | ||
1069 | /* Service RQ | |
1070 | */ | |
1071 | ||
1072 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1073 | work_to_do, enic_rq_service, NULL); | |
1074 | ||
1075 | if (work_done > 0) { | |
1076 | ||
1077 | /* Replenish RQ | |
1078 | */ | |
1079 | ||
1080 | vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); | |
1081 | ||
1082 | /* Accumulate intr event credits for this polling | |
1083 | * cycle. An intr event is the completion of a | |
1084 | * a WQ or RQ packet. | |
1085 | */ | |
1086 | ||
1087 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], | |
1088 | work_done, | |
1089 | 0 /* don't unmask intr */, | |
1090 | 0 /* don't reset intr timer */); | |
1091 | } else { | |
1092 | ||
1093 | /* If no work done, flush all LROs and exit polling | |
1094 | */ | |
1095 | ||
1096 | if (ENIC_SETTING(enic, LRO)) | |
1097 | lro_flush_all(&enic->lro_mgr); | |
1098 | ||
1099 | netif_rx_complete(netdev, napi); | |
1100 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); | |
1101 | } | |
1102 | ||
1103 | return work_done; | |
1104 | } | |
1105 | ||
1106 | static void enic_notify_timer(unsigned long data) | |
1107 | { | |
1108 | struct enic *enic = (struct enic *)data; | |
1109 | ||
1110 | enic_notify_check(enic); | |
1111 | ||
1112 | mod_timer(&enic->notify_timer, round_jiffies(ENIC_NOTIFY_TIMER_PERIOD)); | |
1113 | } | |
1114 | ||
1115 | static void enic_free_intr(struct enic *enic) | |
1116 | { | |
1117 | struct net_device *netdev = enic->netdev; | |
1118 | unsigned int i; | |
1119 | ||
1120 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1121 | case VNIC_DEV_INTR_MODE_INTX: | |
1122 | case VNIC_DEV_INTR_MODE_MSI: | |
1123 | free_irq(enic->pdev->irq, netdev); | |
1124 | break; | |
1125 | case VNIC_DEV_INTR_MODE_MSIX: | |
1126 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | |
1127 | if (enic->msix[i].requested) | |
1128 | free_irq(enic->msix_entry[i].vector, | |
1129 | enic->msix[i].devid); | |
1130 | break; | |
1131 | default: | |
1132 | break; | |
1133 | } | |
1134 | } | |
1135 | ||
1136 | static int enic_request_intr(struct enic *enic) | |
1137 | { | |
1138 | struct net_device *netdev = enic->netdev; | |
1139 | unsigned int i; | |
1140 | int err = 0; | |
1141 | ||
1142 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1143 | ||
1144 | case VNIC_DEV_INTR_MODE_INTX: | |
1145 | ||
1146 | err = request_irq(enic->pdev->irq, enic_isr_legacy, | |
1147 | IRQF_SHARED, netdev->name, netdev); | |
1148 | break; | |
1149 | ||
1150 | case VNIC_DEV_INTR_MODE_MSI: | |
1151 | ||
1152 | err = request_irq(enic->pdev->irq, enic_isr_msi, | |
1153 | 0, netdev->name, enic); | |
1154 | break; | |
1155 | ||
1156 | case VNIC_DEV_INTR_MODE_MSIX: | |
1157 | ||
1158 | sprintf(enic->msix[ENIC_MSIX_RQ].devname, | |
1159 | "%.11s-rx", netdev->name); | |
1160 | enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; | |
1161 | enic->msix[ENIC_MSIX_RQ].devid = enic; | |
1162 | ||
1163 | sprintf(enic->msix[ENIC_MSIX_WQ].devname, | |
1164 | "%.11s-tx", netdev->name); | |
1165 | enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; | |
1166 | enic->msix[ENIC_MSIX_WQ].devid = enic; | |
1167 | ||
1168 | sprintf(enic->msix[ENIC_MSIX_ERR].devname, | |
1169 | "%.11s-err", netdev->name); | |
1170 | enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; | |
1171 | enic->msix[ENIC_MSIX_ERR].devid = enic; | |
1172 | ||
1173 | sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, | |
1174 | "%.11s-notify", netdev->name); | |
1175 | enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; | |
1176 | enic->msix[ENIC_MSIX_NOTIFY].devid = enic; | |
1177 | ||
1178 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { | |
1179 | err = request_irq(enic->msix_entry[i].vector, | |
1180 | enic->msix[i].isr, 0, | |
1181 | enic->msix[i].devname, | |
1182 | enic->msix[i].devid); | |
1183 | if (err) { | |
1184 | enic_free_intr(enic); | |
1185 | break; | |
1186 | } | |
1187 | enic->msix[i].requested = 1; | |
1188 | } | |
1189 | ||
1190 | break; | |
1191 | ||
1192 | default: | |
1193 | break; | |
1194 | } | |
1195 | ||
1196 | return err; | |
1197 | } | |
1198 | ||
1199 | static int enic_notify_set(struct enic *enic) | |
1200 | { | |
1201 | int err; | |
1202 | ||
1203 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1204 | case VNIC_DEV_INTR_MODE_INTX: | |
1205 | err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); | |
1206 | break; | |
1207 | case VNIC_DEV_INTR_MODE_MSIX: | |
1208 | err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); | |
1209 | break; | |
1210 | default: | |
1211 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | |
1212 | break; | |
1213 | } | |
1214 | ||
1215 | return err; | |
1216 | } | |
1217 | ||
1218 | static void enic_notify_timer_start(struct enic *enic) | |
1219 | { | |
1220 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1221 | case VNIC_DEV_INTR_MODE_MSI: | |
1222 | mod_timer(&enic->notify_timer, jiffies); | |
1223 | break; | |
1224 | default: | |
1225 | /* Using intr for notification for INTx/MSI-X */ | |
1226 | break; | |
1227 | }; | |
1228 | } | |
1229 | ||
1230 | /* rtnl lock is held, process context */ | |
1231 | static int enic_open(struct net_device *netdev) | |
1232 | { | |
1233 | struct enic *enic = netdev_priv(netdev); | |
1234 | unsigned int i; | |
1235 | int err; | |
1236 | ||
1237 | for (i = 0; i < enic->rq_count; i++) { | |
1238 | err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); | |
1239 | if (err) { | |
1240 | printk(KERN_ERR PFX | |
1241 | "%s: Unable to alloc receive buffers.\n", | |
1242 | netdev->name); | |
1243 | return err; | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | for (i = 0; i < enic->wq_count; i++) | |
1248 | vnic_wq_enable(&enic->wq[i]); | |
1249 | for (i = 0; i < enic->rq_count; i++) | |
1250 | vnic_rq_enable(&enic->rq[i]); | |
1251 | ||
1252 | enic_add_station_addr(enic); | |
1253 | enic_set_multicast_list(netdev); | |
1254 | ||
1255 | netif_wake_queue(netdev); | |
1256 | napi_enable(&enic->napi); | |
1257 | vnic_dev_enable(enic->vdev); | |
1258 | ||
1259 | for (i = 0; i < enic->intr_count; i++) | |
1260 | vnic_intr_unmask(&enic->intr[i]); | |
1261 | ||
1262 | enic_notify_timer_start(enic); | |
1263 | ||
1264 | return 0; | |
1265 | } | |
1266 | ||
1267 | /* rtnl lock is held, process context */ | |
1268 | static int enic_stop(struct net_device *netdev) | |
1269 | { | |
1270 | struct enic *enic = netdev_priv(netdev); | |
1271 | unsigned int i; | |
1272 | int err; | |
1273 | ||
1274 | del_timer_sync(&enic->notify_timer); | |
1275 | ||
1276 | vnic_dev_disable(enic->vdev); | |
1277 | napi_disable(&enic->napi); | |
1278 | netif_stop_queue(netdev); | |
1279 | ||
1280 | for (i = 0; i < enic->intr_count; i++) | |
1281 | vnic_intr_mask(&enic->intr[i]); | |
1282 | ||
1283 | for (i = 0; i < enic->wq_count; i++) { | |
1284 | err = vnic_wq_disable(&enic->wq[i]); | |
1285 | if (err) | |
1286 | return err; | |
1287 | } | |
1288 | for (i = 0; i < enic->rq_count; i++) { | |
1289 | err = vnic_rq_disable(&enic->rq[i]); | |
1290 | if (err) | |
1291 | return err; | |
1292 | } | |
1293 | ||
1294 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1295 | -1, enic_rq_service_drop, NULL); | |
1296 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1297 | -1, enic_wq_service, NULL); | |
1298 | ||
1299 | for (i = 0; i < enic->wq_count; i++) | |
1300 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | |
1301 | for (i = 0; i < enic->rq_count; i++) | |
1302 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | |
1303 | for (i = 0; i < enic->cq_count; i++) | |
1304 | vnic_cq_clean(&enic->cq[i]); | |
1305 | for (i = 0; i < enic->intr_count; i++) | |
1306 | vnic_intr_clean(&enic->intr[i]); | |
1307 | ||
1308 | return 0; | |
1309 | } | |
1310 | ||
1311 | static int enic_change_mtu(struct net_device *netdev, int new_mtu) | |
1312 | { | |
1313 | struct enic *enic = netdev_priv(netdev); | |
1314 | int running = netif_running(netdev); | |
1315 | ||
1316 | if (running) | |
1317 | enic_stop(netdev); | |
1318 | ||
1319 | if (new_mtu < ENIC_MIN_MTU) | |
1320 | new_mtu = ENIC_MIN_MTU; | |
1321 | if (new_mtu > ENIC_MAX_MTU) | |
1322 | new_mtu = ENIC_MAX_MTU; | |
1323 | ||
1324 | netdev->mtu = new_mtu; | |
1325 | ||
1326 | if (netdev->mtu > enic->port_mtu) | |
1327 | printk(KERN_WARNING PFX | |
1328 | "%s: interface MTU (%d) set higher " | |
1329 | "than port MTU (%d)\n", | |
1330 | netdev->name, netdev->mtu, enic->port_mtu); | |
1331 | ||
1332 | if (running) | |
1333 | enic_open(netdev); | |
1334 | ||
1335 | return 0; | |
1336 | } | |
1337 | ||
1338 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1339 | static void enic_poll_controller(struct net_device *netdev) | |
1340 | { | |
1341 | struct enic *enic = netdev_priv(netdev); | |
1342 | struct vnic_dev *vdev = enic->vdev; | |
1343 | ||
1344 | switch (vnic_dev_get_intr_mode(vdev)) { | |
1345 | case VNIC_DEV_INTR_MODE_MSIX: | |
1346 | enic_isr_msix_rq(enic->pdev->irq, enic); | |
1347 | enic_isr_msix_wq(enic->pdev->irq, enic); | |
1348 | break; | |
1349 | case VNIC_DEV_INTR_MODE_MSI: | |
1350 | enic_isr_msi(enic->pdev->irq, enic); | |
1351 | break; | |
1352 | case VNIC_DEV_INTR_MODE_INTX: | |
1353 | enic_isr_legacy(enic->pdev->irq, netdev); | |
1354 | break; | |
1355 | default: | |
1356 | break; | |
1357 | } | |
1358 | } | |
1359 | #endif | |
1360 | ||
1361 | static int enic_dev_wait(struct vnic_dev *vdev, | |
1362 | int (*start)(struct vnic_dev *, int), | |
1363 | int (*finished)(struct vnic_dev *, int *), | |
1364 | int arg) | |
1365 | { | |
1366 | unsigned long time; | |
1367 | int done; | |
1368 | int err; | |
1369 | ||
1370 | BUG_ON(in_interrupt()); | |
1371 | ||
1372 | err = start(vdev, arg); | |
1373 | if (err) | |
1374 | return err; | |
1375 | ||
1376 | /* Wait for func to complete...2 seconds max | |
1377 | */ | |
1378 | ||
1379 | time = jiffies + (HZ * 2); | |
1380 | do { | |
1381 | ||
1382 | err = finished(vdev, &done); | |
1383 | if (err) | |
1384 | return err; | |
1385 | ||
1386 | if (done) | |
1387 | return 0; | |
1388 | ||
1389 | schedule_timeout_uninterruptible(HZ / 10); | |
1390 | ||
1391 | } while (time_after(time, jiffies)); | |
1392 | ||
1393 | return -ETIMEDOUT; | |
1394 | } | |
1395 | ||
1396 | static int enic_dev_open(struct enic *enic) | |
1397 | { | |
1398 | int err; | |
1399 | ||
1400 | err = enic_dev_wait(enic->vdev, vnic_dev_open, | |
1401 | vnic_dev_open_done, 0); | |
1402 | if (err) | |
1403 | printk(KERN_ERR PFX | |
1404 | "vNIC device open failed, err %d.\n", err); | |
1405 | ||
1406 | return err; | |
1407 | } | |
1408 | ||
1409 | static int enic_dev_soft_reset(struct enic *enic) | |
1410 | { | |
1411 | int err; | |
1412 | ||
1413 | err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, | |
1414 | vnic_dev_soft_reset_done, 0); | |
1415 | if (err) | |
1416 | printk(KERN_ERR PFX | |
1417 | "vNIC soft reset failed, err %d.\n", err); | |
1418 | ||
1419 | return err; | |
1420 | } | |
1421 | ||
1422 | static void enic_reset(struct work_struct *work) | |
1423 | { | |
1424 | struct enic *enic = container_of(work, struct enic, reset); | |
1425 | ||
1426 | if (!netif_running(enic->netdev)) | |
1427 | return; | |
1428 | ||
1429 | rtnl_lock(); | |
1430 | ||
1431 | spin_lock(&enic->devcmd_lock); | |
1432 | vnic_dev_hang_notify(enic->vdev); | |
1433 | spin_unlock(&enic->devcmd_lock); | |
1434 | ||
1435 | enic_stop(enic->netdev); | |
1436 | enic_dev_soft_reset(enic); | |
1437 | enic_reset_mcaddrs(enic); | |
1438 | enic_init_vnic_resources(enic); | |
1439 | enic_open(enic->netdev); | |
1440 | ||
1441 | rtnl_unlock(); | |
1442 | } | |
1443 | ||
1444 | static int enic_set_intr_mode(struct enic *enic) | |
1445 | { | |
1446 | unsigned int n = ARRAY_SIZE(enic->rq); | |
1447 | unsigned int m = ARRAY_SIZE(enic->wq); | |
1448 | unsigned int i; | |
1449 | ||
1450 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | |
1451 | * system capabilities. | |
1452 | * | |
1453 | * Try MSI-X first | |
1454 | * | |
1455 | * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs | |
1456 | * (the second to last INTR is used for WQ/RQ errors) | |
1457 | * (the last INTR is used for notifications) | |
1458 | */ | |
1459 | ||
1460 | BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); | |
1461 | for (i = 0; i < n + m + 2; i++) | |
1462 | enic->msix_entry[i].entry = i; | |
1463 | ||
1464 | if (enic->config.intr_mode < 1 && | |
1465 | enic->rq_count >= n && | |
1466 | enic->wq_count >= m && | |
1467 | enic->cq_count >= n + m && | |
1468 | enic->intr_count >= n + m + 2 && | |
1469 | !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | |
1470 | ||
1471 | enic->rq_count = n; | |
1472 | enic->wq_count = m; | |
1473 | enic->cq_count = n + m; | |
1474 | enic->intr_count = n + m + 2; | |
1475 | ||
1476 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); | |
1477 | ||
1478 | return 0; | |
1479 | } | |
1480 | ||
1481 | /* Next try MSI | |
1482 | * | |
1483 | * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR | |
1484 | */ | |
1485 | ||
1486 | if (enic->config.intr_mode < 2 && | |
1487 | enic->rq_count >= 1 && | |
1488 | enic->wq_count >= 1 && | |
1489 | enic->cq_count >= 2 && | |
1490 | enic->intr_count >= 1 && | |
1491 | !pci_enable_msi(enic->pdev)) { | |
1492 | ||
1493 | enic->rq_count = 1; | |
1494 | enic->wq_count = 1; | |
1495 | enic->cq_count = 2; | |
1496 | enic->intr_count = 1; | |
1497 | ||
1498 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); | |
1499 | ||
1500 | return 0; | |
1501 | } | |
1502 | ||
1503 | /* Next try INTx | |
1504 | * | |
1505 | * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs | |
1506 | * (the first INTR is used for WQ/RQ) | |
1507 | * (the second INTR is used for WQ/RQ errors) | |
1508 | * (the last INTR is used for notifications) | |
1509 | */ | |
1510 | ||
1511 | if (enic->config.intr_mode < 3 && | |
1512 | enic->rq_count >= 1 && | |
1513 | enic->wq_count >= 1 && | |
1514 | enic->cq_count >= 2 && | |
1515 | enic->intr_count >= 3) { | |
1516 | ||
1517 | enic->rq_count = 1; | |
1518 | enic->wq_count = 1; | |
1519 | enic->cq_count = 2; | |
1520 | enic->intr_count = 3; | |
1521 | ||
1522 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); | |
1523 | ||
1524 | return 0; | |
1525 | } | |
1526 | ||
1527 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1528 | ||
1529 | return -EINVAL; | |
1530 | } | |
1531 | ||
1532 | static void enic_clear_intr_mode(struct enic *enic) | |
1533 | { | |
1534 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1535 | case VNIC_DEV_INTR_MODE_MSIX: | |
1536 | pci_disable_msix(enic->pdev); | |
1537 | break; | |
1538 | case VNIC_DEV_INTR_MODE_MSI: | |
1539 | pci_disable_msi(enic->pdev); | |
1540 | break; | |
1541 | default: | |
1542 | break; | |
1543 | } | |
1544 | ||
1545 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1546 | } | |
1547 | ||
1548 | static void enic_iounmap(struct enic *enic) | |
1549 | { | |
1550 | if (enic->bar0.vaddr) | |
1551 | iounmap(enic->bar0.vaddr); | |
1552 | } | |
1553 | ||
1554 | static int __devinit enic_probe(struct pci_dev *pdev, | |
1555 | const struct pci_device_id *ent) | |
1556 | { | |
1557 | struct net_device *netdev; | |
1558 | struct enic *enic; | |
1559 | int using_dac = 0; | |
1560 | unsigned int i; | |
1561 | int err; | |
1562 | ||
1563 | const u8 rss_default_cpu = 0; | |
1564 | const u8 rss_hash_type = 0; | |
1565 | const u8 rss_hash_bits = 0; | |
1566 | const u8 rss_base_cpu = 0; | |
1567 | const u8 rss_enable = 0; | |
1568 | const u8 tso_ipid_split_en = 0; | |
1569 | const u8 ig_vlan_strip_en = 1; | |
1570 | ||
1571 | /* Allocate net device structure and initialize. Private | |
1572 | * instance data is initialized to zero. | |
1573 | */ | |
1574 | ||
1575 | netdev = alloc_etherdev(sizeof(struct enic)); | |
1576 | if (!netdev) { | |
1577 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | |
1578 | return -ENOMEM; | |
1579 | } | |
1580 | ||
1581 | /* Set the netdev name early so intr vectors are properly | |
1582 | * named and any error msgs can include netdev->name | |
1583 | */ | |
1584 | ||
1585 | rtnl_lock(); | |
1586 | err = dev_alloc_name(netdev, netdev->name); | |
1587 | rtnl_unlock(); | |
1588 | if (err < 0) { | |
1589 | printk(KERN_ERR PFX "Unable to allocate netdev name.\n"); | |
1590 | goto err_out_free_netdev; | |
1591 | } | |
1592 | ||
1593 | pci_set_drvdata(pdev, netdev); | |
1594 | ||
1595 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1596 | ||
1597 | enic = netdev_priv(netdev); | |
1598 | enic->netdev = netdev; | |
1599 | enic->pdev = pdev; | |
1600 | ||
1601 | /* Setup PCI resources | |
1602 | */ | |
1603 | ||
1604 | err = pci_enable_device(pdev); | |
1605 | if (err) { | |
1606 | printk(KERN_ERR PFX | |
1607 | "%s: Cannot enable PCI device, aborting.\n", | |
1608 | netdev->name); | |
1609 | goto err_out_free_netdev; | |
1610 | } | |
1611 | ||
1612 | err = pci_request_regions(pdev, DRV_NAME); | |
1613 | if (err) { | |
1614 | printk(KERN_ERR PFX | |
1615 | "%s: Cannot request PCI regions, aborting.\n", | |
1616 | netdev->name); | |
1617 | goto err_out_disable_device; | |
1618 | } | |
1619 | ||
1620 | pci_set_master(pdev); | |
1621 | ||
1622 | /* Query PCI controller on system for DMA addressing | |
1623 | * limitation for the device. Try 40-bit first, and | |
1624 | * fail to 32-bit. | |
1625 | */ | |
1626 | ||
1627 | err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); | |
1628 | if (err) { | |
1629 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | |
1630 | if (err) { | |
1631 | printk(KERN_ERR PFX | |
1632 | "%s: No usable DMA configuration, aborting.\n", | |
1633 | netdev->name); | |
1634 | goto err_out_release_regions; | |
1635 | } | |
1636 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | |
1637 | if (err) { | |
1638 | printk(KERN_ERR PFX | |
1639 | "%s: Unable to obtain 32-bit DMA " | |
1640 | "for consistent allocations, aborting.\n", | |
1641 | netdev->name); | |
1642 | goto err_out_release_regions; | |
1643 | } | |
1644 | } else { | |
1645 | err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); | |
1646 | if (err) { | |
1647 | printk(KERN_ERR PFX | |
1648 | "%s: Unable to obtain 40-bit DMA " | |
1649 | "for consistent allocations, aborting.\n", | |
1650 | netdev->name); | |
1651 | goto err_out_release_regions; | |
1652 | } | |
1653 | using_dac = 1; | |
1654 | } | |
1655 | ||
1656 | /* Map vNIC resources from BAR0 | |
1657 | */ | |
1658 | ||
1659 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | |
1660 | printk(KERN_ERR PFX | |
1661 | "%s: BAR0 not memory-map'able, aborting.\n", | |
1662 | netdev->name); | |
1663 | err = -ENODEV; | |
1664 | goto err_out_release_regions; | |
1665 | } | |
1666 | ||
1667 | enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len); | |
1668 | enic->bar0.bus_addr = pci_resource_start(pdev, 0); | |
1669 | enic->bar0.len = pci_resource_len(pdev, 0); | |
1670 | ||
1671 | if (!enic->bar0.vaddr) { | |
1672 | printk(KERN_ERR PFX | |
1673 | "%s: Cannot memory-map BAR0 res hdr, aborting.\n", | |
1674 | netdev->name); | |
1675 | err = -ENODEV; | |
1676 | goto err_out_release_regions; | |
1677 | } | |
1678 | ||
1679 | /* Register vNIC device | |
1680 | */ | |
1681 | ||
1682 | enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0); | |
1683 | if (!enic->vdev) { | |
1684 | printk(KERN_ERR PFX | |
1685 | "%s: vNIC registration failed, aborting.\n", | |
1686 | netdev->name); | |
1687 | err = -ENODEV; | |
1688 | goto err_out_iounmap; | |
1689 | } | |
1690 | ||
1691 | /* Issue device open to get device in known state | |
1692 | */ | |
1693 | ||
1694 | err = enic_dev_open(enic); | |
1695 | if (err) { | |
1696 | printk(KERN_ERR PFX | |
1697 | "%s: vNIC dev open failed, aborting.\n", | |
1698 | netdev->name); | |
1699 | goto err_out_vnic_unregister; | |
1700 | } | |
1701 | ||
1702 | /* Issue device init to initialize the vnic-to-switch link. | |
1703 | * We'll start with carrier off and wait for link UP | |
1704 | * notification later to turn on carrier. We don't need | |
1705 | * to wait here for the vnic-to-switch link initialization | |
1706 | * to complete; link UP notification is the indication that | |
1707 | * the process is complete. | |
1708 | */ | |
1709 | ||
1710 | netif_carrier_off(netdev); | |
1711 | ||
1712 | err = vnic_dev_init(enic->vdev, 0); | |
1713 | if (err) { | |
1714 | printk(KERN_ERR PFX | |
1715 | "%s: vNIC dev init failed, aborting.\n", | |
1716 | netdev->name); | |
1717 | goto err_out_dev_close; | |
1718 | } | |
1719 | ||
1720 | /* Get vNIC configuration | |
1721 | */ | |
1722 | ||
1723 | err = enic_get_vnic_config(enic); | |
1724 | if (err) { | |
1725 | printk(KERN_ERR PFX | |
1726 | "%s: Get vNIC configuration failed, aborting.\n", | |
1727 | netdev->name); | |
1728 | goto err_out_dev_close; | |
1729 | } | |
1730 | ||
1731 | /* Get available resource counts | |
1732 | */ | |
1733 | ||
1734 | enic_get_res_counts(enic); | |
1735 | ||
1736 | /* Set interrupt mode based on resource counts and system | |
1737 | * capabilities | |
1738 | */ | |
1739 | ||
1740 | err = enic_set_intr_mode(enic); | |
1741 | if (err) { | |
1742 | printk(KERN_ERR PFX | |
1743 | "%s: Failed to set intr mode, aborting.\n", | |
1744 | netdev->name); | |
1745 | goto err_out_dev_close; | |
1746 | } | |
1747 | ||
1748 | /* Request interrupt vector(s) | |
1749 | */ | |
1750 | ||
1751 | err = enic_request_intr(enic); | |
1752 | if (err) { | |
1753 | printk(KERN_ERR PFX "%s: Unable to request irq.\n", | |
1754 | netdev->name); | |
1755 | goto err_out_dev_close; | |
1756 | } | |
1757 | ||
1758 | /* Allocate and configure vNIC resources | |
1759 | */ | |
1760 | ||
1761 | err = enic_alloc_vnic_resources(enic); | |
1762 | if (err) { | |
1763 | printk(KERN_ERR PFX | |
1764 | "%s: Failed to alloc vNIC resources, aborting.\n", | |
1765 | netdev->name); | |
1766 | goto err_out_free_vnic_resources; | |
1767 | } | |
1768 | ||
1769 | enic_init_vnic_resources(enic); | |
1770 | ||
1771 | /* Enable VLAN tag stripping. RSS not enabled (yet). | |
1772 | */ | |
1773 | ||
1774 | err = enic_set_nic_cfg(enic, | |
1775 | rss_default_cpu, rss_hash_type, | |
1776 | rss_hash_bits, rss_base_cpu, | |
1777 | rss_enable, tso_ipid_split_en, | |
1778 | ig_vlan_strip_en); | |
1779 | if (err) { | |
1780 | printk(KERN_ERR PFX | |
1781 | "%s: Failed to config nic, aborting.\n", | |
1782 | netdev->name); | |
1783 | goto err_out_free_vnic_resources; | |
1784 | } | |
1785 | ||
1786 | /* Setup notification buffer area | |
1787 | */ | |
1788 | ||
1789 | err = enic_notify_set(enic); | |
1790 | if (err) { | |
1791 | printk(KERN_ERR PFX | |
1792 | "%s: Failed to alloc notify buffer, aborting.\n", | |
1793 | netdev->name); | |
1794 | goto err_out_free_vnic_resources; | |
1795 | } | |
1796 | ||
1797 | /* Setup notification timer, HW reset task, and locks | |
1798 | */ | |
1799 | ||
1800 | init_timer(&enic->notify_timer); | |
1801 | enic->notify_timer.function = enic_notify_timer; | |
1802 | enic->notify_timer.data = (unsigned long)enic; | |
1803 | ||
1804 | INIT_WORK(&enic->reset, enic_reset); | |
1805 | ||
1806 | for (i = 0; i < enic->wq_count; i++) | |
1807 | spin_lock_init(&enic->wq_lock[i]); | |
1808 | ||
1809 | spin_lock_init(&enic->devcmd_lock); | |
1810 | ||
1811 | /* Register net device | |
1812 | */ | |
1813 | ||
1814 | enic->port_mtu = enic->config.mtu; | |
1815 | (void)enic_change_mtu(netdev, enic->port_mtu); | |
1816 | ||
1817 | err = enic_set_mac_addr(netdev, enic->mac_addr); | |
1818 | if (err) { | |
1819 | printk(KERN_ERR PFX | |
1820 | "%s: Invalid MAC address, aborting.\n", | |
1821 | netdev->name); | |
1822 | goto err_out_notify_unset; | |
1823 | } | |
1824 | ||
1825 | netdev->open = enic_open; | |
1826 | netdev->stop = enic_stop; | |
1827 | netdev->hard_start_xmit = enic_hard_start_xmit; | |
1828 | netdev->get_stats = enic_get_stats; | |
1829 | netdev->set_multicast_list = enic_set_multicast_list; | |
1830 | netdev->change_mtu = enic_change_mtu; | |
1831 | netdev->vlan_rx_register = enic_vlan_rx_register; | |
1832 | netdev->vlan_rx_add_vid = enic_vlan_rx_add_vid; | |
1833 | netdev->vlan_rx_kill_vid = enic_vlan_rx_kill_vid; | |
1834 | netdev->tx_timeout = enic_tx_timeout; | |
1835 | netdev->watchdog_timeo = 2 * HZ; | |
1836 | netdev->ethtool_ops = &enic_ethtool_ops; | |
1837 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1838 | netdev->poll_controller = enic_poll_controller; | |
1839 | #endif | |
1840 | ||
1841 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1842 | default: | |
1843 | netif_napi_add(netdev, &enic->napi, enic_poll, 64); | |
1844 | break; | |
1845 | case VNIC_DEV_INTR_MODE_MSIX: | |
1846 | netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); | |
1847 | break; | |
1848 | } | |
1849 | ||
1850 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | |
1851 | if (ENIC_SETTING(enic, TXCSUM)) | |
1852 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | |
1853 | if (ENIC_SETTING(enic, TSO)) | |
1854 | netdev->features |= NETIF_F_TSO | | |
1855 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
1856 | if (using_dac) | |
1857 | netdev->features |= NETIF_F_HIGHDMA; | |
1858 | ||
1859 | ||
1860 | enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); | |
1861 | ||
1862 | if (ENIC_SETTING(enic, LRO)) { | |
1863 | enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; | |
1864 | enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; | |
1865 | enic->lro_mgr.lro_arr = enic->lro_desc; | |
1866 | enic->lro_mgr.get_skb_header = enic_get_skb_header; | |
1867 | enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | |
1868 | enic->lro_mgr.dev = netdev; | |
1869 | enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; | |
1870 | enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
1871 | } | |
1872 | ||
1873 | err = register_netdev(netdev); | |
1874 | if (err) { | |
1875 | printk(KERN_ERR PFX | |
1876 | "%s: Cannot register net device, aborting.\n", | |
1877 | netdev->name); | |
1878 | goto err_out_notify_unset; | |
1879 | } | |
1880 | ||
1881 | return 0; | |
1882 | ||
1883 | err_out_notify_unset: | |
1884 | vnic_dev_notify_unset(enic->vdev); | |
1885 | err_out_free_vnic_resources: | |
1886 | enic_free_vnic_resources(enic); | |
1887 | enic_free_intr(enic); | |
1888 | err_out_dev_close: | |
1889 | vnic_dev_close(enic->vdev); | |
1890 | err_out_vnic_unregister: | |
1891 | enic_clear_intr_mode(enic); | |
1892 | vnic_dev_unregister(enic->vdev); | |
1893 | err_out_iounmap: | |
1894 | enic_iounmap(enic); | |
1895 | err_out_release_regions: | |
1896 | pci_release_regions(pdev); | |
1897 | err_out_disable_device: | |
1898 | pci_disable_device(pdev); | |
1899 | err_out_free_netdev: | |
1900 | pci_set_drvdata(pdev, NULL); | |
1901 | free_netdev(netdev); | |
1902 | ||
1903 | return err; | |
1904 | } | |
1905 | ||
1906 | static void __devexit enic_remove(struct pci_dev *pdev) | |
1907 | { | |
1908 | struct net_device *netdev = pci_get_drvdata(pdev); | |
1909 | ||
1910 | if (netdev) { | |
1911 | struct enic *enic = netdev_priv(netdev); | |
1912 | ||
1913 | flush_scheduled_work(); | |
1914 | unregister_netdev(netdev); | |
1915 | vnic_dev_notify_unset(enic->vdev); | |
1916 | enic_free_vnic_resources(enic); | |
1917 | enic_free_intr(enic); | |
1918 | vnic_dev_close(enic->vdev); | |
1919 | enic_clear_intr_mode(enic); | |
1920 | vnic_dev_unregister(enic->vdev); | |
1921 | enic_iounmap(enic); | |
1922 | pci_release_regions(pdev); | |
1923 | pci_disable_device(pdev); | |
1924 | pci_set_drvdata(pdev, NULL); | |
1925 | free_netdev(netdev); | |
1926 | } | |
1927 | } | |
1928 | ||
1929 | static struct pci_driver enic_driver = { | |
1930 | .name = DRV_NAME, | |
1931 | .id_table = enic_id_table, | |
1932 | .probe = enic_probe, | |
1933 | .remove = __devexit_p(enic_remove), | |
1934 | }; | |
1935 | ||
1936 | static int __init enic_init_module(void) | |
1937 | { | |
1938 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | |
1939 | ||
1940 | return pci_register_driver(&enic_driver); | |
1941 | } | |
1942 | ||
1943 | static void __exit enic_cleanup_module(void) | |
1944 | { | |
1945 | pci_unregister_driver(&enic_driver); | |
1946 | } | |
1947 | ||
1948 | module_init(enic_init_module); | |
1949 | module_exit(enic_cleanup_module); |