Commit | Line | Data |
---|---|---|
01f2e4ea SF |
1 | /* |
2 | * Copyright 2008 Cisco Systems, Inc. All rights reserved. | |
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | |
4 | * | |
5 | * This program is free software; you may redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
16 | * SOFTWARE. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/module.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/errno.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/workqueue.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/netdevice.h> | |
29 | #include <linux/etherdevice.h> | |
30 | #include <linux/if_ether.h> | |
31 | #include <linux/if_vlan.h> | |
32 | #include <linux/ethtool.h> | |
33 | #include <linux/in.h> | |
34 | #include <linux/ip.h> | |
35 | #include <linux/ipv6.h> | |
36 | #include <linux/tcp.h> | |
b7c6bfb7 | 37 | #include <net/ip6_checksum.h> |
01f2e4ea SF |
38 | |
39 | #include "cq_enet_desc.h" | |
40 | #include "vnic_dev.h" | |
41 | #include "vnic_intr.h" | |
42 | #include "vnic_stats.h" | |
43 | #include "enic_res.h" | |
44 | #include "enic.h" | |
45 | ||
46 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | |
ea0d7d91 SF |
47 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) |
48 | #define MAX_TSO (1 << 16) | |
49 | #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) | |
50 | ||
51 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ | |
01f2e4ea SF |
52 | |
53 | /* Supported devices */ | |
54 | static struct pci_device_id enic_id_table[] = { | |
ea0d7d91 | 55 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, |
01f2e4ea SF |
56 | { 0, } /* end of table */ |
57 | }; | |
58 | ||
59 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | |
60 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); | |
61 | MODULE_LICENSE("GPL"); | |
62 | MODULE_VERSION(DRV_VERSION); | |
63 | MODULE_DEVICE_TABLE(pci, enic_id_table); | |
64 | ||
65 | struct enic_stat { | |
66 | char name[ETH_GSTRING_LEN]; | |
67 | unsigned int offset; | |
68 | }; | |
69 | ||
70 | #define ENIC_TX_STAT(stat) \ | |
71 | { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } | |
72 | #define ENIC_RX_STAT(stat) \ | |
73 | { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } | |
74 | ||
75 | static const struct enic_stat enic_tx_stats[] = { | |
76 | ENIC_TX_STAT(tx_frames_ok), | |
77 | ENIC_TX_STAT(tx_unicast_frames_ok), | |
78 | ENIC_TX_STAT(tx_multicast_frames_ok), | |
79 | ENIC_TX_STAT(tx_broadcast_frames_ok), | |
80 | ENIC_TX_STAT(tx_bytes_ok), | |
81 | ENIC_TX_STAT(tx_unicast_bytes_ok), | |
82 | ENIC_TX_STAT(tx_multicast_bytes_ok), | |
83 | ENIC_TX_STAT(tx_broadcast_bytes_ok), | |
84 | ENIC_TX_STAT(tx_drops), | |
85 | ENIC_TX_STAT(tx_errors), | |
86 | ENIC_TX_STAT(tx_tso), | |
87 | }; | |
88 | ||
89 | static const struct enic_stat enic_rx_stats[] = { | |
90 | ENIC_RX_STAT(rx_frames_ok), | |
91 | ENIC_RX_STAT(rx_frames_total), | |
92 | ENIC_RX_STAT(rx_unicast_frames_ok), | |
93 | ENIC_RX_STAT(rx_multicast_frames_ok), | |
94 | ENIC_RX_STAT(rx_broadcast_frames_ok), | |
95 | ENIC_RX_STAT(rx_bytes_ok), | |
96 | ENIC_RX_STAT(rx_unicast_bytes_ok), | |
97 | ENIC_RX_STAT(rx_multicast_bytes_ok), | |
98 | ENIC_RX_STAT(rx_broadcast_bytes_ok), | |
99 | ENIC_RX_STAT(rx_drop), | |
100 | ENIC_RX_STAT(rx_no_bufs), | |
101 | ENIC_RX_STAT(rx_errors), | |
102 | ENIC_RX_STAT(rx_rss), | |
103 | ENIC_RX_STAT(rx_crc_errors), | |
104 | ENIC_RX_STAT(rx_frames_64), | |
105 | ENIC_RX_STAT(rx_frames_127), | |
106 | ENIC_RX_STAT(rx_frames_255), | |
107 | ENIC_RX_STAT(rx_frames_511), | |
108 | ENIC_RX_STAT(rx_frames_1023), | |
109 | ENIC_RX_STAT(rx_frames_1518), | |
110 | ENIC_RX_STAT(rx_frames_to_max), | |
111 | }; | |
112 | ||
113 | static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); | |
114 | static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); | |
115 | ||
116 | static int enic_get_settings(struct net_device *netdev, | |
117 | struct ethtool_cmd *ecmd) | |
118 | { | |
119 | struct enic *enic = netdev_priv(netdev); | |
120 | ||
121 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | |
122 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | |
123 | ecmd->port = PORT_FIBRE; | |
124 | ecmd->transceiver = XCVR_EXTERNAL; | |
125 | ||
126 | if (netif_carrier_ok(netdev)) { | |
127 | ecmd->speed = vnic_dev_port_speed(enic->vdev); | |
128 | ecmd->duplex = DUPLEX_FULL; | |
129 | } else { | |
130 | ecmd->speed = -1; | |
131 | ecmd->duplex = -1; | |
132 | } | |
133 | ||
134 | ecmd->autoneg = AUTONEG_DISABLE; | |
135 | ||
136 | return 0; | |
137 | } | |
138 | ||
139 | static void enic_get_drvinfo(struct net_device *netdev, | |
140 | struct ethtool_drvinfo *drvinfo) | |
141 | { | |
142 | struct enic *enic = netdev_priv(netdev); | |
143 | struct vnic_devcmd_fw_info *fw_info; | |
144 | ||
145 | spin_lock(&enic->devcmd_lock); | |
146 | vnic_dev_fw_info(enic->vdev, &fw_info); | |
147 | spin_unlock(&enic->devcmd_lock); | |
148 | ||
149 | strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | |
150 | strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | |
151 | strncpy(drvinfo->fw_version, fw_info->fw_version, | |
152 | sizeof(drvinfo->fw_version)); | |
153 | strncpy(drvinfo->bus_info, pci_name(enic->pdev), | |
154 | sizeof(drvinfo->bus_info)); | |
155 | } | |
156 | ||
157 | static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | |
158 | { | |
159 | unsigned int i; | |
160 | ||
161 | switch (stringset) { | |
162 | case ETH_SS_STATS: | |
163 | for (i = 0; i < enic_n_tx_stats; i++) { | |
164 | memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); | |
165 | data += ETH_GSTRING_LEN; | |
166 | } | |
167 | for (i = 0; i < enic_n_rx_stats; i++) { | |
168 | memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); | |
169 | data += ETH_GSTRING_LEN; | |
170 | } | |
171 | break; | |
172 | } | |
173 | } | |
174 | ||
25f0a061 | 175 | static int enic_get_sset_count(struct net_device *netdev, int sset) |
01f2e4ea | 176 | { |
25f0a061 SF |
177 | switch (sset) { |
178 | case ETH_SS_STATS: | |
179 | return enic_n_tx_stats + enic_n_rx_stats; | |
180 | default: | |
181 | return -EOPNOTSUPP; | |
182 | } | |
01f2e4ea SF |
183 | } |
184 | ||
185 | static void enic_get_ethtool_stats(struct net_device *netdev, | |
186 | struct ethtool_stats *stats, u64 *data) | |
187 | { | |
188 | struct enic *enic = netdev_priv(netdev); | |
189 | struct vnic_stats *vstats; | |
190 | unsigned int i; | |
191 | ||
192 | spin_lock(&enic->devcmd_lock); | |
193 | vnic_dev_stats_dump(enic->vdev, &vstats); | |
194 | spin_unlock(&enic->devcmd_lock); | |
195 | ||
196 | for (i = 0; i < enic_n_tx_stats; i++) | |
197 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; | |
198 | for (i = 0; i < enic_n_rx_stats; i++) | |
199 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | |
200 | } | |
201 | ||
202 | static u32 enic_get_rx_csum(struct net_device *netdev) | |
203 | { | |
204 | struct enic *enic = netdev_priv(netdev); | |
205 | return enic->csum_rx_enabled; | |
206 | } | |
207 | ||
208 | static int enic_set_rx_csum(struct net_device *netdev, u32 data) | |
209 | { | |
210 | struct enic *enic = netdev_priv(netdev); | |
211 | ||
25f0a061 SF |
212 | if (data && !ENIC_SETTING(enic, RXCSUM)) |
213 | return -EINVAL; | |
214 | ||
215 | enic->csum_rx_enabled = !!data; | |
01f2e4ea SF |
216 | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static int enic_set_tx_csum(struct net_device *netdev, u32 data) | |
221 | { | |
222 | struct enic *enic = netdev_priv(netdev); | |
223 | ||
25f0a061 SF |
224 | if (data && !ENIC_SETTING(enic, TXCSUM)) |
225 | return -EINVAL; | |
226 | ||
227 | if (data) | |
01f2e4ea SF |
228 | netdev->features |= NETIF_F_HW_CSUM; |
229 | else | |
230 | netdev->features &= ~NETIF_F_HW_CSUM; | |
231 | ||
232 | return 0; | |
233 | } | |
234 | ||
235 | static int enic_set_tso(struct net_device *netdev, u32 data) | |
236 | { | |
237 | struct enic *enic = netdev_priv(netdev); | |
238 | ||
25f0a061 SF |
239 | if (data && !ENIC_SETTING(enic, TSO)) |
240 | return -EINVAL; | |
241 | ||
242 | if (data) | |
01f2e4ea SF |
243 | netdev->features |= |
244 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
245 | else | |
246 | netdev->features &= | |
247 | ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN); | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | static u32 enic_get_msglevel(struct net_device *netdev) | |
253 | { | |
254 | struct enic *enic = netdev_priv(netdev); | |
255 | return enic->msg_enable; | |
256 | } | |
257 | ||
258 | static void enic_set_msglevel(struct net_device *netdev, u32 value) | |
259 | { | |
260 | struct enic *enic = netdev_priv(netdev); | |
261 | enic->msg_enable = value; | |
262 | } | |
263 | ||
0fc0b732 | 264 | static const struct ethtool_ops enic_ethtool_ops = { |
01f2e4ea SF |
265 | .get_settings = enic_get_settings, |
266 | .get_drvinfo = enic_get_drvinfo, | |
267 | .get_msglevel = enic_get_msglevel, | |
268 | .set_msglevel = enic_set_msglevel, | |
269 | .get_link = ethtool_op_get_link, | |
270 | .get_strings = enic_get_strings, | |
25f0a061 | 271 | .get_sset_count = enic_get_sset_count, |
01f2e4ea SF |
272 | .get_ethtool_stats = enic_get_ethtool_stats, |
273 | .get_rx_csum = enic_get_rx_csum, | |
274 | .set_rx_csum = enic_set_rx_csum, | |
275 | .get_tx_csum = ethtool_op_get_tx_csum, | |
276 | .set_tx_csum = enic_set_tx_csum, | |
277 | .get_sg = ethtool_op_get_sg, | |
278 | .set_sg = ethtool_op_set_sg, | |
279 | .get_tso = ethtool_op_get_tso, | |
280 | .set_tso = enic_set_tso, | |
86ca9db7 SF |
281 | .get_flags = ethtool_op_get_flags, |
282 | .set_flags = ethtool_op_set_flags, | |
01f2e4ea SF |
283 | }; |
284 | ||
285 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | |
286 | { | |
287 | struct enic *enic = vnic_dev_priv(wq->vdev); | |
288 | ||
289 | if (buf->sop) | |
290 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
291 | buf->len, PCI_DMA_TODEVICE); | |
292 | else | |
293 | pci_unmap_page(enic->pdev, buf->dma_addr, | |
294 | buf->len, PCI_DMA_TODEVICE); | |
295 | ||
296 | if (buf->os_buf) | |
297 | dev_kfree_skb_any(buf->os_buf); | |
298 | } | |
299 | ||
300 | static void enic_wq_free_buf(struct vnic_wq *wq, | |
301 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) | |
302 | { | |
303 | enic_free_wq_buf(wq, buf); | |
304 | } | |
305 | ||
306 | static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
307 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
308 | { | |
309 | struct enic *enic = vnic_dev_priv(vdev); | |
310 | ||
311 | spin_lock(&enic->wq_lock[q_number]); | |
312 | ||
313 | vnic_wq_service(&enic->wq[q_number], cq_desc, | |
314 | completed_index, enic_wq_free_buf, | |
315 | opaque); | |
316 | ||
317 | if (netif_queue_stopped(enic->netdev) && | |
ea0d7d91 SF |
318 | vnic_wq_desc_avail(&enic->wq[q_number]) >= |
319 | (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) | |
01f2e4ea SF |
320 | netif_wake_queue(enic->netdev); |
321 | ||
322 | spin_unlock(&enic->wq_lock[q_number]); | |
323 | ||
324 | return 0; | |
325 | } | |
326 | ||
327 | static void enic_log_q_error(struct enic *enic) | |
328 | { | |
329 | unsigned int i; | |
330 | u32 error_status; | |
331 | ||
332 | for (i = 0; i < enic->wq_count; i++) { | |
333 | error_status = vnic_wq_error_status(&enic->wq[i]); | |
334 | if (error_status) | |
335 | printk(KERN_ERR PFX "%s: WQ[%d] error_status %d\n", | |
336 | enic->netdev->name, i, error_status); | |
337 | } | |
338 | ||
339 | for (i = 0; i < enic->rq_count; i++) { | |
340 | error_status = vnic_rq_error_status(&enic->rq[i]); | |
341 | if (error_status) | |
342 | printk(KERN_ERR PFX "%s: RQ[%d] error_status %d\n", | |
343 | enic->netdev->name, i, error_status); | |
344 | } | |
345 | } | |
346 | ||
347 | static void enic_link_check(struct enic *enic) | |
348 | { | |
349 | int link_status = vnic_dev_link_status(enic->vdev); | |
350 | int carrier_ok = netif_carrier_ok(enic->netdev); | |
351 | ||
352 | if (link_status && !carrier_ok) { | |
353 | printk(KERN_INFO PFX "%s: Link UP\n", enic->netdev->name); | |
354 | netif_carrier_on(enic->netdev); | |
355 | } else if (!link_status && carrier_ok) { | |
356 | printk(KERN_INFO PFX "%s: Link DOWN\n", enic->netdev->name); | |
357 | netif_carrier_off(enic->netdev); | |
358 | } | |
359 | } | |
360 | ||
361 | static void enic_mtu_check(struct enic *enic) | |
362 | { | |
363 | u32 mtu = vnic_dev_mtu(enic->vdev); | |
364 | ||
491598a4 | 365 | if (mtu && mtu != enic->port_mtu) { |
01f2e4ea SF |
366 | if (mtu < enic->netdev->mtu) |
367 | printk(KERN_WARNING PFX | |
368 | "%s: interface MTU (%d) set higher " | |
369 | "than switch port MTU (%d)\n", | |
370 | enic->netdev->name, enic->netdev->mtu, mtu); | |
371 | enic->port_mtu = mtu; | |
372 | } | |
373 | } | |
374 | ||
375 | static void enic_msglvl_check(struct enic *enic) | |
376 | { | |
377 | u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); | |
378 | ||
379 | if (msg_enable != enic->msg_enable) { | |
380 | printk(KERN_INFO PFX "%s: msg lvl changed from 0x%x to 0x%x\n", | |
381 | enic->netdev->name, enic->msg_enable, msg_enable); | |
382 | enic->msg_enable = msg_enable; | |
383 | } | |
384 | } | |
385 | ||
386 | static void enic_notify_check(struct enic *enic) | |
387 | { | |
388 | enic_msglvl_check(enic); | |
389 | enic_mtu_check(enic); | |
390 | enic_link_check(enic); | |
391 | } | |
392 | ||
393 | #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) | |
394 | ||
395 | static irqreturn_t enic_isr_legacy(int irq, void *data) | |
396 | { | |
397 | struct net_device *netdev = data; | |
398 | struct enic *enic = netdev_priv(netdev); | |
399 | u32 pba; | |
400 | ||
401 | vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
402 | ||
403 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | |
404 | if (!pba) { | |
405 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
406 | return IRQ_NONE; /* not our interrupt */ | |
407 | } | |
408 | ||
ed8af6b2 SF |
409 | if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) { |
410 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]); | |
01f2e4ea | 411 | enic_notify_check(enic); |
ed8af6b2 | 412 | } |
01f2e4ea SF |
413 | |
414 | if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) { | |
ed8af6b2 | 415 | vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]); |
01f2e4ea SF |
416 | enic_log_q_error(enic); |
417 | /* schedule recovery from WQ/RQ error */ | |
418 | schedule_work(&enic->reset); | |
419 | return IRQ_HANDLED; | |
420 | } | |
421 | ||
422 | if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) { | |
288379f0 BH |
423 | if (napi_schedule_prep(&enic->napi)) |
424 | __napi_schedule(&enic->napi); | |
01f2e4ea SF |
425 | } else { |
426 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); | |
427 | } | |
428 | ||
429 | return IRQ_HANDLED; | |
430 | } | |
431 | ||
432 | static irqreturn_t enic_isr_msi(int irq, void *data) | |
433 | { | |
434 | struct enic *enic = data; | |
435 | ||
436 | /* With MSI, there is no sharing of interrupts, so this is | |
437 | * our interrupt and there is no need to ack it. The device | |
438 | * is not providing per-vector masking, so the OS will not | |
439 | * write to PCI config space to mask/unmask the interrupt. | |
440 | * We're using mask_on_assertion for MSI, so the device | |
441 | * automatically masks the interrupt when the interrupt is | |
442 | * generated. Later, when exiting polling, the interrupt | |
443 | * will be unmasked (see enic_poll). | |
444 | * | |
445 | * Also, the device uses the same PCIe Traffic Class (TC) | |
446 | * for Memory Write data and MSI, so there are no ordering | |
447 | * issues; the MSI will always arrive at the Root Complex | |
448 | * _after_ corresponding Memory Writes (i.e. descriptor | |
449 | * writes). | |
450 | */ | |
451 | ||
288379f0 | 452 | napi_schedule(&enic->napi); |
01f2e4ea SF |
453 | |
454 | return IRQ_HANDLED; | |
455 | } | |
456 | ||
457 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | |
458 | { | |
459 | struct enic *enic = data; | |
460 | ||
461 | /* schedule NAPI polling for RQ cleanup */ | |
288379f0 | 462 | napi_schedule(&enic->napi); |
01f2e4ea SF |
463 | |
464 | return IRQ_HANDLED; | |
465 | } | |
466 | ||
467 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | |
468 | { | |
469 | struct enic *enic = data; | |
470 | unsigned int wq_work_to_do = -1; /* no limit */ | |
471 | unsigned int wq_work_done; | |
472 | ||
473 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
474 | wq_work_to_do, enic_wq_service, NULL); | |
475 | ||
476 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_WQ], | |
477 | wq_work_done, | |
478 | 1 /* unmask intr */, | |
479 | 1 /* reset intr timer */); | |
480 | ||
481 | return IRQ_HANDLED; | |
482 | } | |
483 | ||
484 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | |
485 | { | |
486 | struct enic *enic = data; | |
487 | ||
ed8af6b2 SF |
488 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_ERR]); |
489 | ||
01f2e4ea SF |
490 | enic_log_q_error(enic); |
491 | ||
492 | /* schedule recovery from WQ/RQ error */ | |
493 | schedule_work(&enic->reset); | |
494 | ||
495 | return IRQ_HANDLED; | |
496 | } | |
497 | ||
498 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | |
499 | { | |
500 | struct enic *enic = data; | |
501 | ||
ed8af6b2 | 502 | vnic_intr_return_all_credits(&enic->intr[ENIC_MSIX_NOTIFY]); |
01f2e4ea | 503 | enic_notify_check(enic); |
01f2e4ea SF |
504 | |
505 | return IRQ_HANDLED; | |
506 | } | |
507 | ||
508 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | |
509 | struct vnic_wq *wq, struct sk_buff *skb, | |
510 | unsigned int len_left) | |
511 | { | |
512 | skb_frag_t *frag; | |
513 | ||
514 | /* Queue additional data fragments */ | |
515 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
516 | len_left -= frag->size; | |
517 | enic_queue_wq_desc_cont(wq, skb, | |
518 | pci_map_page(enic->pdev, frag->page, | |
519 | frag->page_offset, frag->size, | |
520 | PCI_DMA_TODEVICE), | |
521 | frag->size, | |
522 | (len_left == 0)); /* EOP? */ | |
523 | } | |
524 | } | |
525 | ||
526 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | |
527 | struct vnic_wq *wq, struct sk_buff *skb, | |
528 | int vlan_tag_insert, unsigned int vlan_tag) | |
529 | { | |
530 | unsigned int head_len = skb_headlen(skb); | |
531 | unsigned int len_left = skb->len - head_len; | |
532 | int eop = (len_left == 0); | |
533 | ||
ea0d7d91 SF |
534 | /* Queue the main skb fragment. The fragments are no larger |
535 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | |
536 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | |
537 | * per fragment is queued. | |
538 | */ | |
01f2e4ea SF |
539 | enic_queue_wq_desc(wq, skb, |
540 | pci_map_single(enic->pdev, skb->data, | |
541 | head_len, PCI_DMA_TODEVICE), | |
542 | head_len, | |
543 | vlan_tag_insert, vlan_tag, | |
544 | eop); | |
545 | ||
546 | if (!eop) | |
547 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
548 | } | |
549 | ||
550 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | |
551 | struct vnic_wq *wq, struct sk_buff *skb, | |
552 | int vlan_tag_insert, unsigned int vlan_tag) | |
553 | { | |
554 | unsigned int head_len = skb_headlen(skb); | |
555 | unsigned int len_left = skb->len - head_len; | |
556 | unsigned int hdr_len = skb_transport_offset(skb); | |
557 | unsigned int csum_offset = hdr_len + skb->csum_offset; | |
558 | int eop = (len_left == 0); | |
559 | ||
ea0d7d91 SF |
560 | /* Queue the main skb fragment. The fragments are no larger |
561 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | |
562 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | |
563 | * per fragment is queued. | |
564 | */ | |
01f2e4ea SF |
565 | enic_queue_wq_desc_csum_l4(wq, skb, |
566 | pci_map_single(enic->pdev, skb->data, | |
567 | head_len, PCI_DMA_TODEVICE), | |
568 | head_len, | |
569 | csum_offset, | |
570 | hdr_len, | |
571 | vlan_tag_insert, vlan_tag, | |
572 | eop); | |
573 | ||
574 | if (!eop) | |
575 | enic_queue_wq_skb_cont(enic, wq, skb, len_left); | |
576 | } | |
577 | ||
578 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | |
579 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | |
580 | int vlan_tag_insert, unsigned int vlan_tag) | |
581 | { | |
ea0d7d91 SF |
582 | unsigned int frag_len_left = skb_headlen(skb); |
583 | unsigned int len_left = skb->len - frag_len_left; | |
01f2e4ea SF |
584 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
585 | int eop = (len_left == 0); | |
ea0d7d91 SF |
586 | unsigned int len; |
587 | dma_addr_t dma_addr; | |
588 | unsigned int offset = 0; | |
589 | skb_frag_t *frag; | |
01f2e4ea SF |
590 | |
591 | /* Preload TCP csum field with IP pseudo hdr calculated | |
592 | * with IP length set to zero. HW will later add in length | |
593 | * to each TCP segment resulting from the TSO. | |
594 | */ | |
595 | ||
09640e63 | 596 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { |
01f2e4ea SF |
597 | ip_hdr(skb)->check = 0; |
598 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | |
599 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
09640e63 | 600 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { |
01f2e4ea SF |
601 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
602 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | |
603 | } | |
604 | ||
ea0d7d91 SF |
605 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors |
606 | * for the main skb fragment | |
607 | */ | |
608 | while (frag_len_left) { | |
609 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); | |
610 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, | |
611 | len, PCI_DMA_TODEVICE); | |
612 | enic_queue_wq_desc_tso(wq, skb, | |
613 | dma_addr, | |
614 | len, | |
615 | mss, hdr_len, | |
616 | vlan_tag_insert, vlan_tag, | |
617 | eop && (len == frag_len_left)); | |
618 | frag_len_left -= len; | |
619 | offset += len; | |
620 | } | |
01f2e4ea | 621 | |
ea0d7d91 SF |
622 | if (eop) |
623 | return; | |
624 | ||
625 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | |
626 | * for additional data fragments | |
627 | */ | |
628 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | |
629 | len_left -= frag->size; | |
630 | frag_len_left = frag->size; | |
631 | offset = frag->page_offset; | |
632 | ||
633 | while (frag_len_left) { | |
634 | len = min(frag_len_left, | |
635 | (unsigned int)WQ_ENET_MAX_DESC_LEN); | |
636 | dma_addr = pci_map_page(enic->pdev, frag->page, | |
637 | offset, len, | |
638 | PCI_DMA_TODEVICE); | |
639 | enic_queue_wq_desc_cont(wq, skb, | |
640 | dma_addr, | |
641 | len, | |
642 | (len_left == 0) && | |
643 | (len == frag_len_left)); /* EOP? */ | |
644 | frag_len_left -= len; | |
645 | offset += len; | |
646 | } | |
647 | } | |
01f2e4ea SF |
648 | } |
649 | ||
650 | static inline void enic_queue_wq_skb(struct enic *enic, | |
651 | struct vnic_wq *wq, struct sk_buff *skb) | |
652 | { | |
653 | unsigned int mss = skb_shinfo(skb)->gso_size; | |
654 | unsigned int vlan_tag = 0; | |
655 | int vlan_tag_insert = 0; | |
656 | ||
657 | if (enic->vlan_group && vlan_tx_tag_present(skb)) { | |
658 | /* VLAN tag from trunking driver */ | |
659 | vlan_tag_insert = 1; | |
660 | vlan_tag = vlan_tx_tag_get(skb); | |
661 | } | |
662 | ||
663 | if (mss) | |
664 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | |
665 | vlan_tag_insert, vlan_tag); | |
666 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | |
667 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | |
668 | vlan_tag_insert, vlan_tag); | |
669 | else | |
670 | enic_queue_wq_skb_vlan(enic, wq, skb, | |
671 | vlan_tag_insert, vlan_tag); | |
672 | } | |
673 | ||
ed8af6b2 | 674 | /* netif_tx_lock held, process context with BHs disabled, or BH */ |
61357325 SH |
675 | static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, |
676 | struct net_device *netdev) | |
01f2e4ea SF |
677 | { |
678 | struct enic *enic = netdev_priv(netdev); | |
679 | struct vnic_wq *wq = &enic->wq[0]; | |
680 | unsigned long flags; | |
681 | ||
682 | if (skb->len <= 0) { | |
683 | dev_kfree_skb(skb); | |
684 | return NETDEV_TX_OK; | |
685 | } | |
686 | ||
687 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | |
688 | * which is very likely. In the off chance it's going to take | |
689 | * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. | |
690 | */ | |
691 | ||
692 | if (skb_shinfo(skb)->gso_size == 0 && | |
693 | skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && | |
694 | skb_linearize(skb)) { | |
695 | dev_kfree_skb(skb); | |
696 | return NETDEV_TX_OK; | |
697 | } | |
698 | ||
699 | spin_lock_irqsave(&enic->wq_lock[0], flags); | |
700 | ||
ea0d7d91 SF |
701 | if (vnic_wq_desc_avail(wq) < |
702 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { | |
01f2e4ea SF |
703 | netif_stop_queue(netdev); |
704 | /* This is a hard error, log it */ | |
705 | printk(KERN_ERR PFX "%s: BUG! Tx ring full when " | |
706 | "queue awake!\n", netdev->name); | |
707 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | |
708 | return NETDEV_TX_BUSY; | |
709 | } | |
710 | ||
711 | enic_queue_wq_skb(enic, wq, skb); | |
712 | ||
ea0d7d91 | 713 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) |
01f2e4ea SF |
714 | netif_stop_queue(netdev); |
715 | ||
01f2e4ea SF |
716 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); |
717 | ||
718 | return NETDEV_TX_OK; | |
719 | } | |
720 | ||
721 | /* dev_base_lock rwlock held, nominally process context */ | |
722 | static struct net_device_stats *enic_get_stats(struct net_device *netdev) | |
723 | { | |
724 | struct enic *enic = netdev_priv(netdev); | |
25f0a061 | 725 | struct net_device_stats *net_stats = &netdev->stats; |
01f2e4ea SF |
726 | struct vnic_stats *stats; |
727 | ||
728 | spin_lock(&enic->devcmd_lock); | |
729 | vnic_dev_stats_dump(enic->vdev, &stats); | |
730 | spin_unlock(&enic->devcmd_lock); | |
731 | ||
25f0a061 SF |
732 | net_stats->tx_packets = stats->tx.tx_frames_ok; |
733 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | |
734 | net_stats->tx_errors = stats->tx.tx_errors; | |
735 | net_stats->tx_dropped = stats->tx.tx_drops; | |
01f2e4ea | 736 | |
25f0a061 SF |
737 | net_stats->rx_packets = stats->rx.rx_frames_ok; |
738 | net_stats->rx_bytes = stats->rx.rx_bytes_ok; | |
739 | net_stats->rx_errors = stats->rx.rx_errors; | |
740 | net_stats->multicast = stats->rx.rx_multicast_frames_ok; | |
350991e1 | 741 | net_stats->rx_over_errors = enic->rq_truncated_pkts; |
bd9fb1a4 | 742 | net_stats->rx_crc_errors = enic->rq_bad_fcs; |
350991e1 | 743 | net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; |
01f2e4ea | 744 | |
25f0a061 | 745 | return net_stats; |
01f2e4ea SF |
746 | } |
747 | ||
748 | static void enic_reset_mcaddrs(struct enic *enic) | |
749 | { | |
750 | enic->mc_count = 0; | |
751 | } | |
752 | ||
753 | static int enic_set_mac_addr(struct net_device *netdev, char *addr) | |
754 | { | |
755 | if (!is_valid_ether_addr(addr)) | |
756 | return -EADDRNOTAVAIL; | |
757 | ||
758 | memcpy(netdev->dev_addr, addr, netdev->addr_len); | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
763 | /* netif_tx_lock held, BHs disabled */ | |
764 | static void enic_set_multicast_list(struct net_device *netdev) | |
765 | { | |
766 | struct enic *enic = netdev_priv(netdev); | |
767 | struct dev_mc_list *list = netdev->mc_list; | |
768 | int directed = 1; | |
769 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | |
770 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | |
771 | int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; | |
772 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | |
773 | (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS); | |
774 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | |
775 | unsigned int mc_count = netdev->mc_count; | |
776 | unsigned int i, j; | |
777 | ||
778 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) | |
779 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | |
780 | ||
781 | spin_lock(&enic->devcmd_lock); | |
782 | ||
783 | vnic_dev_packet_filter(enic->vdev, directed, | |
784 | multicast, broadcast, promisc, allmulti); | |
785 | ||
786 | /* Is there an easier way? Trying to minimize to | |
787 | * calls to add/del multicast addrs. We keep the | |
788 | * addrs from the last call in enic->mc_addr and | |
789 | * look for changes to add/del. | |
790 | */ | |
791 | ||
792 | for (i = 0; list && i < mc_count; i++) { | |
793 | memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN); | |
794 | list = list->next; | |
795 | } | |
796 | ||
797 | for (i = 0; i < enic->mc_count; i++) { | |
798 | for (j = 0; j < mc_count; j++) | |
799 | if (compare_ether_addr(enic->mc_addr[i], | |
800 | mc_addr[j]) == 0) | |
801 | break; | |
802 | if (j == mc_count) | |
803 | enic_del_multicast_addr(enic, enic->mc_addr[i]); | |
804 | } | |
805 | ||
806 | for (i = 0; i < mc_count; i++) { | |
807 | for (j = 0; j < enic->mc_count; j++) | |
808 | if (compare_ether_addr(mc_addr[i], | |
809 | enic->mc_addr[j]) == 0) | |
810 | break; | |
811 | if (j == enic->mc_count) | |
812 | enic_add_multicast_addr(enic, mc_addr[i]); | |
813 | } | |
814 | ||
815 | /* Save the list to compare against next time | |
816 | */ | |
817 | ||
818 | for (i = 0; i < mc_count; i++) | |
819 | memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); | |
820 | ||
821 | enic->mc_count = mc_count; | |
822 | ||
823 | spin_unlock(&enic->devcmd_lock); | |
824 | } | |
825 | ||
826 | /* rtnl lock is held */ | |
827 | static void enic_vlan_rx_register(struct net_device *netdev, | |
828 | struct vlan_group *vlan_group) | |
829 | { | |
830 | struct enic *enic = netdev_priv(netdev); | |
831 | enic->vlan_group = vlan_group; | |
832 | } | |
833 | ||
834 | /* rtnl lock is held */ | |
835 | static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid) | |
836 | { | |
837 | struct enic *enic = netdev_priv(netdev); | |
838 | ||
839 | spin_lock(&enic->devcmd_lock); | |
840 | enic_add_vlan(enic, vid); | |
841 | spin_unlock(&enic->devcmd_lock); | |
842 | } | |
843 | ||
844 | /* rtnl lock is held */ | |
845 | static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) | |
846 | { | |
847 | struct enic *enic = netdev_priv(netdev); | |
848 | ||
849 | spin_lock(&enic->devcmd_lock); | |
850 | enic_del_vlan(enic, vid); | |
851 | spin_unlock(&enic->devcmd_lock); | |
852 | } | |
853 | ||
854 | /* netif_tx_lock held, BHs disabled */ | |
855 | static void enic_tx_timeout(struct net_device *netdev) | |
856 | { | |
857 | struct enic *enic = netdev_priv(netdev); | |
858 | schedule_work(&enic->reset); | |
859 | } | |
860 | ||
861 | static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | |
862 | { | |
863 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
864 | ||
865 | if (!buf->os_buf) | |
866 | return; | |
867 | ||
868 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
869 | buf->len, PCI_DMA_FROMDEVICE); | |
870 | dev_kfree_skb_any(buf->os_buf); | |
871 | } | |
872 | ||
d19e22dc SF |
873 | static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev, |
874 | unsigned int size) | |
01f2e4ea SF |
875 | { |
876 | struct sk_buff *skb; | |
877 | ||
d19e22dc | 878 | skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN); |
01f2e4ea SF |
879 | |
880 | if (skb) | |
881 | skb_reserve(skb, NET_IP_ALIGN); | |
882 | ||
883 | return skb; | |
884 | } | |
885 | ||
886 | static int enic_rq_alloc_buf(struct vnic_rq *rq) | |
887 | { | |
888 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
d19e22dc | 889 | struct net_device *netdev = enic->netdev; |
01f2e4ea | 890 | struct sk_buff *skb; |
d19e22dc | 891 | unsigned int len = netdev->mtu + ETH_HLEN; |
01f2e4ea SF |
892 | unsigned int os_buf_index = 0; |
893 | dma_addr_t dma_addr; | |
894 | ||
d19e22dc | 895 | skb = enic_rq_alloc_skb(netdev, len); |
01f2e4ea SF |
896 | if (!skb) |
897 | return -ENOMEM; | |
898 | ||
899 | dma_addr = pci_map_single(enic->pdev, skb->data, | |
900 | len, PCI_DMA_FROMDEVICE); | |
901 | ||
902 | enic_queue_rq_desc(rq, skb, os_buf_index, | |
903 | dma_addr, len); | |
904 | ||
905 | return 0; | |
906 | } | |
907 | ||
4badc385 SF |
908 | static int enic_rq_alloc_buf_a1(struct vnic_rq *rq) |
909 | { | |
910 | struct rq_enet_desc *desc = vnic_rq_next_desc(rq); | |
911 | ||
912 | if (vnic_rq_posting_soon(rq)) { | |
913 | ||
914 | /* SW workaround for A0 HW erratum: if we're just about | |
915 | * to write posted_index, insert a dummy desc | |
916 | * of type resvd | |
917 | */ | |
918 | ||
919 | rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0); | |
920 | vnic_rq_post(rq, 0, 0, 0, 0); | |
921 | } else { | |
922 | return enic_rq_alloc_buf(rq); | |
923 | } | |
924 | ||
925 | return 0; | |
926 | } | |
927 | ||
928 | static int enic_set_rq_alloc_buf(struct enic *enic) | |
929 | { | |
930 | enum vnic_dev_hw_version hw_ver; | |
931 | int err; | |
932 | ||
933 | err = vnic_dev_hw_version(enic->vdev, &hw_ver); | |
934 | if (err) | |
935 | return err; | |
936 | ||
937 | switch (hw_ver) { | |
938 | case VNIC_DEV_HW_VER_A1: | |
939 | enic->rq_alloc_buf = enic_rq_alloc_buf_a1; | |
940 | break; | |
941 | case VNIC_DEV_HW_VER_A2: | |
942 | case VNIC_DEV_HW_VER_UNKNOWN: | |
943 | enic->rq_alloc_buf = enic_rq_alloc_buf; | |
944 | break; | |
945 | default: | |
946 | return -ENODEV; | |
947 | } | |
948 | ||
949 | return 0; | |
950 | } | |
951 | ||
01f2e4ea SF |
952 | static int enic_get_skb_header(struct sk_buff *skb, void **iphdr, |
953 | void **tcph, u64 *hdr_flags, void *priv) | |
954 | { | |
955 | struct cq_enet_rq_desc *cq_desc = priv; | |
956 | unsigned int ip_len; | |
957 | struct iphdr *iph; | |
958 | ||
959 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
960 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
961 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
962 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
963 | u8 packet_error; | |
964 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
965 | u32 rss_hash; | |
966 | ||
967 | cq_enet_rq_desc_dec(cq_desc, | |
968 | &type, &color, &q_number, &completed_index, | |
969 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
970 | &csum_not_calc, &rss_hash, &bytes_written, | |
971 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
972 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
973 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
974 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
975 | &fcs_ok); | |
976 | ||
977 | if (!(ipv4 && tcp && !ipv4_fragment)) | |
978 | return -1; | |
979 | ||
980 | skb_reset_network_header(skb); | |
981 | iph = ip_hdr(skb); | |
982 | ||
983 | ip_len = ip_hdrlen(skb); | |
984 | skb_set_transport_header(skb, ip_len); | |
985 | ||
986 | /* check if ip header and tcp header are complete */ | |
987 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) | |
988 | return -1; | |
989 | ||
990 | *hdr_flags = LRO_IPV4 | LRO_TCP; | |
991 | *tcph = tcp_hdr(skb); | |
992 | *iphdr = iph; | |
993 | ||
994 | return 0; | |
995 | } | |
996 | ||
997 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | |
998 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
999 | int skipped, void *opaque) | |
1000 | { | |
1001 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
86ca9db7 | 1002 | struct net_device *netdev = enic->netdev; |
01f2e4ea SF |
1003 | struct sk_buff *skb; |
1004 | ||
1005 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | |
1006 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | |
1007 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | |
1008 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | |
1009 | u8 packet_error; | |
1010 | u16 q_number, completed_index, bytes_written, vlan, checksum; | |
1011 | u32 rss_hash; | |
1012 | ||
1013 | if (skipped) | |
1014 | return; | |
1015 | ||
1016 | skb = buf->os_buf; | |
1017 | prefetch(skb->data - NET_IP_ALIGN); | |
1018 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
1019 | buf->len, PCI_DMA_FROMDEVICE); | |
1020 | ||
1021 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | |
1022 | &type, &color, &q_number, &completed_index, | |
1023 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | |
1024 | &csum_not_calc, &rss_hash, &bytes_written, | |
1025 | &packet_error, &vlan_stripped, &vlan, &checksum, | |
1026 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | |
1027 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | |
1028 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | |
1029 | &fcs_ok); | |
1030 | ||
1031 | if (packet_error) { | |
1032 | ||
350991e1 SF |
1033 | if (!fcs_ok) { |
1034 | if (bytes_written > 0) | |
1035 | enic->rq_bad_fcs++; | |
1036 | else if (bytes_written == 0) | |
1037 | enic->rq_truncated_pkts++; | |
1038 | } | |
01f2e4ea SF |
1039 | |
1040 | dev_kfree_skb_any(skb); | |
1041 | ||
1042 | return; | |
1043 | } | |
1044 | ||
1045 | if (eop && bytes_written > 0) { | |
1046 | ||
1047 | /* Good receive | |
1048 | */ | |
1049 | ||
1050 | skb_put(skb, bytes_written); | |
86ca9db7 | 1051 | skb->protocol = eth_type_trans(skb, netdev); |
01f2e4ea SF |
1052 | |
1053 | if (enic->csum_rx_enabled && !csum_not_calc) { | |
1054 | skb->csum = htons(checksum); | |
1055 | skb->ip_summed = CHECKSUM_COMPLETE; | |
1056 | } | |
1057 | ||
86ca9db7 | 1058 | skb->dev = netdev; |
01f2e4ea SF |
1059 | |
1060 | if (enic->vlan_group && vlan_stripped) { | |
1061 | ||
86ca9db7 | 1062 | if ((netdev->features & NETIF_F_LRO) && ipv4) |
01f2e4ea SF |
1063 | lro_vlan_hwaccel_receive_skb(&enic->lro_mgr, |
1064 | skb, enic->vlan_group, | |
1065 | vlan, cq_desc); | |
1066 | else | |
1067 | vlan_hwaccel_receive_skb(skb, | |
1068 | enic->vlan_group, vlan); | |
1069 | ||
1070 | } else { | |
1071 | ||
86ca9db7 | 1072 | if ((netdev->features & NETIF_F_LRO) && ipv4) |
01f2e4ea SF |
1073 | lro_receive_skb(&enic->lro_mgr, skb, cq_desc); |
1074 | else | |
1075 | netif_receive_skb(skb); | |
1076 | ||
1077 | } | |
1078 | ||
1079 | } else { | |
1080 | ||
1081 | /* Buffer overflow | |
1082 | */ | |
1083 | ||
1084 | dev_kfree_skb_any(skb); | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
1089 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
1090 | { | |
1091 | struct enic *enic = vnic_dev_priv(vdev); | |
1092 | ||
1093 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
1094 | completed_index, VNIC_RQ_RETURN_DESC, | |
1095 | enic_rq_indicate_buf, opaque); | |
1096 | ||
1097 | return 0; | |
1098 | } | |
1099 | ||
1100 | static void enic_rq_drop_buf(struct vnic_rq *rq, | |
1101 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | |
1102 | int skipped, void *opaque) | |
1103 | { | |
1104 | struct enic *enic = vnic_dev_priv(rq->vdev); | |
1105 | struct sk_buff *skb = buf->os_buf; | |
1106 | ||
1107 | if (skipped) | |
1108 | return; | |
1109 | ||
1110 | pci_unmap_single(enic->pdev, buf->dma_addr, | |
1111 | buf->len, PCI_DMA_FROMDEVICE); | |
1112 | ||
1113 | dev_kfree_skb_any(skb); | |
1114 | } | |
1115 | ||
1116 | static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc, | |
1117 | u8 type, u16 q_number, u16 completed_index, void *opaque) | |
1118 | { | |
1119 | struct enic *enic = vnic_dev_priv(vdev); | |
1120 | ||
1121 | vnic_rq_service(&enic->rq[q_number], cq_desc, | |
1122 | completed_index, VNIC_RQ_RETURN_DESC, | |
1123 | enic_rq_drop_buf, opaque); | |
1124 | ||
1125 | return 0; | |
1126 | } | |
1127 | ||
1128 | static int enic_poll(struct napi_struct *napi, int budget) | |
1129 | { | |
1130 | struct enic *enic = container_of(napi, struct enic, napi); | |
1131 | struct net_device *netdev = enic->netdev; | |
1132 | unsigned int rq_work_to_do = budget; | |
1133 | unsigned int wq_work_to_do = -1; /* no limit */ | |
1134 | unsigned int work_done, rq_work_done, wq_work_done; | |
1135 | ||
1136 | /* Service RQ (first) and WQ | |
1137 | */ | |
1138 | ||
1139 | rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1140 | rq_work_to_do, enic_rq_service, NULL); | |
1141 | ||
1142 | wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1143 | wq_work_to_do, enic_wq_service, NULL); | |
1144 | ||
1145 | /* Accumulate intr event credits for this polling | |
1146 | * cycle. An intr event is the completion of a | |
1147 | * a WQ or RQ packet. | |
1148 | */ | |
1149 | ||
1150 | work_done = rq_work_done + wq_work_done; | |
1151 | ||
1152 | if (work_done > 0) | |
1153 | vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ], | |
1154 | work_done, | |
1155 | 0 /* don't unmask intr */, | |
1156 | 0 /* don't reset intr timer */); | |
1157 | ||
1158 | if (rq_work_done > 0) { | |
1159 | ||
1160 | /* Replenish RQ | |
1161 | */ | |
1162 | ||
4badc385 | 1163 | vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
01f2e4ea SF |
1164 | |
1165 | } else { | |
1166 | ||
1167 | /* If no work done, flush all LROs and exit polling | |
1168 | */ | |
1169 | ||
86ca9db7 | 1170 | if (netdev->features & NETIF_F_LRO) |
01f2e4ea SF |
1171 | lro_flush_all(&enic->lro_mgr); |
1172 | ||
288379f0 | 1173 | napi_complete(napi); |
ed8af6b2 | 1174 | vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]); |
01f2e4ea SF |
1175 | } |
1176 | ||
1177 | return rq_work_done; | |
1178 | } | |
1179 | ||
1180 | static int enic_poll_msix(struct napi_struct *napi, int budget) | |
1181 | { | |
1182 | struct enic *enic = container_of(napi, struct enic, napi); | |
1183 | struct net_device *netdev = enic->netdev; | |
1184 | unsigned int work_to_do = budget; | |
1185 | unsigned int work_done; | |
1186 | ||
1187 | /* Service RQ | |
1188 | */ | |
1189 | ||
1190 | work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ], | |
1191 | work_to_do, enic_rq_service, NULL); | |
1192 | ||
1193 | if (work_done > 0) { | |
1194 | ||
1195 | /* Replenish RQ | |
1196 | */ | |
1197 | ||
4badc385 | 1198 | vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf); |
01f2e4ea | 1199 | |
ed8af6b2 | 1200 | /* Return intr event credits for this polling |
01f2e4ea | 1201 | * cycle. An intr event is the completion of a |
ed8af6b2 | 1202 | * RQ packet. |
01f2e4ea SF |
1203 | */ |
1204 | ||
1205 | vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ], | |
1206 | work_done, | |
1207 | 0 /* don't unmask intr */, | |
1208 | 0 /* don't reset intr timer */); | |
1209 | } else { | |
1210 | ||
1211 | /* If no work done, flush all LROs and exit polling | |
1212 | */ | |
1213 | ||
86ca9db7 | 1214 | if (netdev->features & NETIF_F_LRO) |
01f2e4ea SF |
1215 | lro_flush_all(&enic->lro_mgr); |
1216 | ||
288379f0 | 1217 | napi_complete(napi); |
01f2e4ea SF |
1218 | vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]); |
1219 | } | |
1220 | ||
1221 | return work_done; | |
1222 | } | |
1223 | ||
1224 | static void enic_notify_timer(unsigned long data) | |
1225 | { | |
1226 | struct enic *enic = (struct enic *)data; | |
1227 | ||
1228 | enic_notify_check(enic); | |
1229 | ||
25f0a061 SF |
1230 | mod_timer(&enic->notify_timer, |
1231 | round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); | |
01f2e4ea SF |
1232 | } |
1233 | ||
1234 | static void enic_free_intr(struct enic *enic) | |
1235 | { | |
1236 | struct net_device *netdev = enic->netdev; | |
1237 | unsigned int i; | |
1238 | ||
1239 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1240 | case VNIC_DEV_INTR_MODE_INTX: | |
01f2e4ea SF |
1241 | free_irq(enic->pdev->irq, netdev); |
1242 | break; | |
8f4d248c SF |
1243 | case VNIC_DEV_INTR_MODE_MSI: |
1244 | free_irq(enic->pdev->irq, enic); | |
1245 | break; | |
01f2e4ea SF |
1246 | case VNIC_DEV_INTR_MODE_MSIX: |
1247 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | |
1248 | if (enic->msix[i].requested) | |
1249 | free_irq(enic->msix_entry[i].vector, | |
1250 | enic->msix[i].devid); | |
1251 | break; | |
1252 | default: | |
1253 | break; | |
1254 | } | |
1255 | } | |
1256 | ||
1257 | static int enic_request_intr(struct enic *enic) | |
1258 | { | |
1259 | struct net_device *netdev = enic->netdev; | |
1260 | unsigned int i; | |
1261 | int err = 0; | |
1262 | ||
1263 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1264 | ||
1265 | case VNIC_DEV_INTR_MODE_INTX: | |
1266 | ||
1267 | err = request_irq(enic->pdev->irq, enic_isr_legacy, | |
1268 | IRQF_SHARED, netdev->name, netdev); | |
1269 | break; | |
1270 | ||
1271 | case VNIC_DEV_INTR_MODE_MSI: | |
1272 | ||
1273 | err = request_irq(enic->pdev->irq, enic_isr_msi, | |
1274 | 0, netdev->name, enic); | |
1275 | break; | |
1276 | ||
1277 | case VNIC_DEV_INTR_MODE_MSIX: | |
1278 | ||
1279 | sprintf(enic->msix[ENIC_MSIX_RQ].devname, | |
8f4d248c | 1280 | "%.11s-rx-0", netdev->name); |
01f2e4ea SF |
1281 | enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; |
1282 | enic->msix[ENIC_MSIX_RQ].devid = enic; | |
1283 | ||
1284 | sprintf(enic->msix[ENIC_MSIX_WQ].devname, | |
8f4d248c | 1285 | "%.11s-tx-0", netdev->name); |
01f2e4ea SF |
1286 | enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; |
1287 | enic->msix[ENIC_MSIX_WQ].devid = enic; | |
1288 | ||
1289 | sprintf(enic->msix[ENIC_MSIX_ERR].devname, | |
1290 | "%.11s-err", netdev->name); | |
1291 | enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; | |
1292 | enic->msix[ENIC_MSIX_ERR].devid = enic; | |
1293 | ||
1294 | sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, | |
1295 | "%.11s-notify", netdev->name); | |
1296 | enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; | |
1297 | enic->msix[ENIC_MSIX_NOTIFY].devid = enic; | |
1298 | ||
1299 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { | |
1300 | err = request_irq(enic->msix_entry[i].vector, | |
1301 | enic->msix[i].isr, 0, | |
1302 | enic->msix[i].devname, | |
1303 | enic->msix[i].devid); | |
1304 | if (err) { | |
1305 | enic_free_intr(enic); | |
1306 | break; | |
1307 | } | |
1308 | enic->msix[i].requested = 1; | |
1309 | } | |
1310 | ||
1311 | break; | |
1312 | ||
1313 | default: | |
1314 | break; | |
1315 | } | |
1316 | ||
1317 | return err; | |
1318 | } | |
1319 | ||
1320 | static int enic_notify_set(struct enic *enic) | |
1321 | { | |
1322 | int err; | |
1323 | ||
56ac88b3 | 1324 | spin_lock(&enic->devcmd_lock); |
01f2e4ea SF |
1325 | switch (vnic_dev_get_intr_mode(enic->vdev)) { |
1326 | case VNIC_DEV_INTR_MODE_INTX: | |
1327 | err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); | |
1328 | break; | |
1329 | case VNIC_DEV_INTR_MODE_MSIX: | |
1330 | err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); | |
1331 | break; | |
1332 | default: | |
1333 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | |
1334 | break; | |
1335 | } | |
56ac88b3 | 1336 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1337 | |
1338 | return err; | |
1339 | } | |
1340 | ||
1341 | static void enic_notify_timer_start(struct enic *enic) | |
1342 | { | |
1343 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1344 | case VNIC_DEV_INTR_MODE_MSI: | |
1345 | mod_timer(&enic->notify_timer, jiffies); | |
1346 | break; | |
1347 | default: | |
1348 | /* Using intr for notification for INTx/MSI-X */ | |
1349 | break; | |
1350 | }; | |
1351 | } | |
1352 | ||
1353 | /* rtnl lock is held, process context */ | |
1354 | static int enic_open(struct net_device *netdev) | |
1355 | { | |
1356 | struct enic *enic = netdev_priv(netdev); | |
1357 | unsigned int i; | |
1358 | int err; | |
1359 | ||
4b75a442 SF |
1360 | err = enic_request_intr(enic); |
1361 | if (err) { | |
1362 | printk(KERN_ERR PFX "%s: Unable to request irq.\n", | |
1363 | netdev->name); | |
1364 | return err; | |
1365 | } | |
1366 | ||
1367 | err = enic_notify_set(enic); | |
1368 | if (err) { | |
1369 | printk(KERN_ERR PFX | |
1370 | "%s: Failed to alloc notify buffer, aborting.\n", | |
1371 | netdev->name); | |
1372 | goto err_out_free_intr; | |
1373 | } | |
1374 | ||
01f2e4ea | 1375 | for (i = 0; i < enic->rq_count; i++) { |
4badc385 | 1376 | err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf); |
01f2e4ea SF |
1377 | if (err) { |
1378 | printk(KERN_ERR PFX | |
1379 | "%s: Unable to alloc receive buffers.\n", | |
1380 | netdev->name); | |
4b75a442 | 1381 | goto err_out_notify_unset; |
01f2e4ea SF |
1382 | } |
1383 | } | |
1384 | ||
1385 | for (i = 0; i < enic->wq_count; i++) | |
1386 | vnic_wq_enable(&enic->wq[i]); | |
1387 | for (i = 0; i < enic->rq_count; i++) | |
1388 | vnic_rq_enable(&enic->rq[i]); | |
1389 | ||
56ac88b3 | 1390 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1391 | enic_add_station_addr(enic); |
56ac88b3 | 1392 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1393 | enic_set_multicast_list(netdev); |
1394 | ||
1395 | netif_wake_queue(netdev); | |
1396 | napi_enable(&enic->napi); | |
56ac88b3 | 1397 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1398 | vnic_dev_enable(enic->vdev); |
56ac88b3 | 1399 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1400 | |
1401 | for (i = 0; i < enic->intr_count; i++) | |
1402 | vnic_intr_unmask(&enic->intr[i]); | |
1403 | ||
1404 | enic_notify_timer_start(enic); | |
1405 | ||
1406 | return 0; | |
4b75a442 SF |
1407 | |
1408 | err_out_notify_unset: | |
56ac88b3 | 1409 | spin_lock(&enic->devcmd_lock); |
4b75a442 | 1410 | vnic_dev_notify_unset(enic->vdev); |
56ac88b3 | 1411 | spin_unlock(&enic->devcmd_lock); |
4b75a442 SF |
1412 | err_out_free_intr: |
1413 | enic_free_intr(enic); | |
1414 | ||
1415 | return err; | |
01f2e4ea SF |
1416 | } |
1417 | ||
1418 | /* rtnl lock is held, process context */ | |
1419 | static int enic_stop(struct net_device *netdev) | |
1420 | { | |
1421 | struct enic *enic = netdev_priv(netdev); | |
1422 | unsigned int i; | |
1423 | int err; | |
1424 | ||
1425 | del_timer_sync(&enic->notify_timer); | |
1426 | ||
56ac88b3 | 1427 | spin_lock(&enic->devcmd_lock); |
01f2e4ea | 1428 | vnic_dev_disable(enic->vdev); |
56ac88b3 | 1429 | spin_unlock(&enic->devcmd_lock); |
01f2e4ea SF |
1430 | napi_disable(&enic->napi); |
1431 | netif_stop_queue(netdev); | |
1432 | ||
1433 | for (i = 0; i < enic->intr_count; i++) | |
1434 | vnic_intr_mask(&enic->intr[i]); | |
1435 | ||
1436 | for (i = 0; i < enic->wq_count; i++) { | |
1437 | err = vnic_wq_disable(&enic->wq[i]); | |
1438 | if (err) | |
1439 | return err; | |
1440 | } | |
1441 | for (i = 0; i < enic->rq_count; i++) { | |
1442 | err = vnic_rq_disable(&enic->rq[i]); | |
1443 | if (err) | |
1444 | return err; | |
1445 | } | |
1446 | ||
56ac88b3 | 1447 | spin_lock(&enic->devcmd_lock); |
4b75a442 | 1448 | vnic_dev_notify_unset(enic->vdev); |
56ac88b3 | 1449 | spin_unlock(&enic->devcmd_lock); |
4b75a442 SF |
1450 | enic_free_intr(enic); |
1451 | ||
01f2e4ea SF |
1452 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ], |
1453 | -1, enic_rq_service_drop, NULL); | |
1454 | (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ], | |
1455 | -1, enic_wq_service, NULL); | |
1456 | ||
1457 | for (i = 0; i < enic->wq_count; i++) | |
1458 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | |
1459 | for (i = 0; i < enic->rq_count; i++) | |
1460 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | |
1461 | for (i = 0; i < enic->cq_count; i++) | |
1462 | vnic_cq_clean(&enic->cq[i]); | |
1463 | for (i = 0; i < enic->intr_count; i++) | |
1464 | vnic_intr_clean(&enic->intr[i]); | |
1465 | ||
1466 | return 0; | |
1467 | } | |
1468 | ||
1469 | static int enic_change_mtu(struct net_device *netdev, int new_mtu) | |
1470 | { | |
1471 | struct enic *enic = netdev_priv(netdev); | |
1472 | int running = netif_running(netdev); | |
1473 | ||
25f0a061 SF |
1474 | if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) |
1475 | return -EINVAL; | |
1476 | ||
01f2e4ea SF |
1477 | if (running) |
1478 | enic_stop(netdev); | |
1479 | ||
01f2e4ea SF |
1480 | netdev->mtu = new_mtu; |
1481 | ||
1482 | if (netdev->mtu > enic->port_mtu) | |
1483 | printk(KERN_WARNING PFX | |
1484 | "%s: interface MTU (%d) set higher " | |
1485 | "than port MTU (%d)\n", | |
1486 | netdev->name, netdev->mtu, enic->port_mtu); | |
1487 | ||
1488 | if (running) | |
1489 | enic_open(netdev); | |
1490 | ||
1491 | return 0; | |
1492 | } | |
1493 | ||
1494 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1495 | static void enic_poll_controller(struct net_device *netdev) | |
1496 | { | |
1497 | struct enic *enic = netdev_priv(netdev); | |
1498 | struct vnic_dev *vdev = enic->vdev; | |
1499 | ||
1500 | switch (vnic_dev_get_intr_mode(vdev)) { | |
1501 | case VNIC_DEV_INTR_MODE_MSIX: | |
1502 | enic_isr_msix_rq(enic->pdev->irq, enic); | |
1503 | enic_isr_msix_wq(enic->pdev->irq, enic); | |
1504 | break; | |
1505 | case VNIC_DEV_INTR_MODE_MSI: | |
1506 | enic_isr_msi(enic->pdev->irq, enic); | |
1507 | break; | |
1508 | case VNIC_DEV_INTR_MODE_INTX: | |
1509 | enic_isr_legacy(enic->pdev->irq, netdev); | |
1510 | break; | |
1511 | default: | |
1512 | break; | |
1513 | } | |
1514 | } | |
1515 | #endif | |
1516 | ||
1517 | static int enic_dev_wait(struct vnic_dev *vdev, | |
1518 | int (*start)(struct vnic_dev *, int), | |
1519 | int (*finished)(struct vnic_dev *, int *), | |
1520 | int arg) | |
1521 | { | |
1522 | unsigned long time; | |
1523 | int done; | |
1524 | int err; | |
1525 | ||
1526 | BUG_ON(in_interrupt()); | |
1527 | ||
1528 | err = start(vdev, arg); | |
1529 | if (err) | |
1530 | return err; | |
1531 | ||
1532 | /* Wait for func to complete...2 seconds max | |
1533 | */ | |
1534 | ||
1535 | time = jiffies + (HZ * 2); | |
1536 | do { | |
1537 | ||
1538 | err = finished(vdev, &done); | |
1539 | if (err) | |
1540 | return err; | |
1541 | ||
1542 | if (done) | |
1543 | return 0; | |
1544 | ||
1545 | schedule_timeout_uninterruptible(HZ / 10); | |
1546 | ||
1547 | } while (time_after(time, jiffies)); | |
1548 | ||
1549 | return -ETIMEDOUT; | |
1550 | } | |
1551 | ||
1552 | static int enic_dev_open(struct enic *enic) | |
1553 | { | |
1554 | int err; | |
1555 | ||
1556 | err = enic_dev_wait(enic->vdev, vnic_dev_open, | |
1557 | vnic_dev_open_done, 0); | |
1558 | if (err) | |
1559 | printk(KERN_ERR PFX | |
1560 | "vNIC device open failed, err %d.\n", err); | |
1561 | ||
1562 | return err; | |
1563 | } | |
1564 | ||
1565 | static int enic_dev_soft_reset(struct enic *enic) | |
1566 | { | |
1567 | int err; | |
1568 | ||
1569 | err = enic_dev_wait(enic->vdev, vnic_dev_soft_reset, | |
1570 | vnic_dev_soft_reset_done, 0); | |
1571 | if (err) | |
1572 | printk(KERN_ERR PFX | |
1573 | "vNIC soft reset failed, err %d.\n", err); | |
1574 | ||
1575 | return err; | |
1576 | } | |
1577 | ||
68f71708 SF |
1578 | static int enic_set_niccfg(struct enic *enic) |
1579 | { | |
1580 | const u8 rss_default_cpu = 0; | |
1581 | const u8 rss_hash_type = 0; | |
1582 | const u8 rss_hash_bits = 0; | |
1583 | const u8 rss_base_cpu = 0; | |
1584 | const u8 rss_enable = 0; | |
1585 | const u8 tso_ipid_split_en = 0; | |
1586 | const u8 ig_vlan_strip_en = 1; | |
1587 | ||
1588 | /* Enable VLAN tag stripping. RSS not enabled (yet). | |
6ba9cdc0 | 1589 | */ |
68f71708 SF |
1590 | |
1591 | return enic_set_nic_cfg(enic, | |
1592 | rss_default_cpu, rss_hash_type, | |
1593 | rss_hash_bits, rss_base_cpu, | |
1594 | rss_enable, tso_ipid_split_en, | |
1595 | ig_vlan_strip_en); | |
1596 | } | |
1597 | ||
01f2e4ea SF |
1598 | static void enic_reset(struct work_struct *work) |
1599 | { | |
1600 | struct enic *enic = container_of(work, struct enic, reset); | |
1601 | ||
1602 | if (!netif_running(enic->netdev)) | |
1603 | return; | |
1604 | ||
1605 | rtnl_lock(); | |
1606 | ||
1607 | spin_lock(&enic->devcmd_lock); | |
1608 | vnic_dev_hang_notify(enic->vdev); | |
1609 | spin_unlock(&enic->devcmd_lock); | |
1610 | ||
1611 | enic_stop(enic->netdev); | |
1612 | enic_dev_soft_reset(enic); | |
68f71708 | 1613 | vnic_dev_init(enic->vdev, 0); |
01f2e4ea SF |
1614 | enic_reset_mcaddrs(enic); |
1615 | enic_init_vnic_resources(enic); | |
68f71708 | 1616 | enic_set_niccfg(enic); |
01f2e4ea SF |
1617 | enic_open(enic->netdev); |
1618 | ||
1619 | rtnl_unlock(); | |
1620 | } | |
1621 | ||
1622 | static int enic_set_intr_mode(struct enic *enic) | |
1623 | { | |
6ba9cdc0 SF |
1624 | unsigned int n = 1; |
1625 | unsigned int m = 1; | |
01f2e4ea SF |
1626 | unsigned int i; |
1627 | ||
1628 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | |
1629 | * system capabilities. | |
1630 | * | |
1631 | * Try MSI-X first | |
1632 | * | |
1633 | * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs | |
1634 | * (the second to last INTR is used for WQ/RQ errors) | |
1635 | * (the last INTR is used for notifications) | |
1636 | */ | |
1637 | ||
1638 | BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); | |
1639 | for (i = 0; i < n + m + 2; i++) | |
1640 | enic->msix_entry[i].entry = i; | |
1641 | ||
1642 | if (enic->config.intr_mode < 1 && | |
1643 | enic->rq_count >= n && | |
1644 | enic->wq_count >= m && | |
1645 | enic->cq_count >= n + m && | |
1646 | enic->intr_count >= n + m + 2 && | |
1647 | !pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | |
1648 | ||
1649 | enic->rq_count = n; | |
1650 | enic->wq_count = m; | |
1651 | enic->cq_count = n + m; | |
1652 | enic->intr_count = n + m + 2; | |
1653 | ||
1654 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSIX); | |
1655 | ||
1656 | return 0; | |
1657 | } | |
1658 | ||
1659 | /* Next try MSI | |
1660 | * | |
1661 | * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR | |
1662 | */ | |
1663 | ||
1664 | if (enic->config.intr_mode < 2 && | |
1665 | enic->rq_count >= 1 && | |
1666 | enic->wq_count >= 1 && | |
1667 | enic->cq_count >= 2 && | |
1668 | enic->intr_count >= 1 && | |
1669 | !pci_enable_msi(enic->pdev)) { | |
1670 | ||
1671 | enic->rq_count = 1; | |
1672 | enic->wq_count = 1; | |
1673 | enic->cq_count = 2; | |
1674 | enic->intr_count = 1; | |
1675 | ||
1676 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); | |
1677 | ||
1678 | return 0; | |
1679 | } | |
1680 | ||
1681 | /* Next try INTx | |
1682 | * | |
1683 | * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs | |
1684 | * (the first INTR is used for WQ/RQ) | |
1685 | * (the second INTR is used for WQ/RQ errors) | |
1686 | * (the last INTR is used for notifications) | |
1687 | */ | |
1688 | ||
1689 | if (enic->config.intr_mode < 3 && | |
1690 | enic->rq_count >= 1 && | |
1691 | enic->wq_count >= 1 && | |
1692 | enic->cq_count >= 2 && | |
1693 | enic->intr_count >= 3) { | |
1694 | ||
1695 | enic->rq_count = 1; | |
1696 | enic->wq_count = 1; | |
1697 | enic->cq_count = 2; | |
1698 | enic->intr_count = 3; | |
1699 | ||
1700 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); | |
1701 | ||
1702 | return 0; | |
1703 | } | |
1704 | ||
1705 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1706 | ||
1707 | return -EINVAL; | |
1708 | } | |
1709 | ||
1710 | static void enic_clear_intr_mode(struct enic *enic) | |
1711 | { | |
1712 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1713 | case VNIC_DEV_INTR_MODE_MSIX: | |
1714 | pci_disable_msix(enic->pdev); | |
1715 | break; | |
1716 | case VNIC_DEV_INTR_MODE_MSI: | |
1717 | pci_disable_msi(enic->pdev); | |
1718 | break; | |
1719 | default: | |
1720 | break; | |
1721 | } | |
1722 | ||
1723 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | |
1724 | } | |
1725 | ||
afe29f7a SH |
1726 | static const struct net_device_ops enic_netdev_ops = { |
1727 | .ndo_open = enic_open, | |
1728 | .ndo_stop = enic_stop, | |
00829823 | 1729 | .ndo_start_xmit = enic_hard_start_xmit, |
afe29f7a SH |
1730 | .ndo_get_stats = enic_get_stats, |
1731 | .ndo_validate_addr = eth_validate_addr, | |
fe96aaa1 | 1732 | .ndo_set_mac_address = eth_mac_addr, |
afe29f7a SH |
1733 | .ndo_set_multicast_list = enic_set_multicast_list, |
1734 | .ndo_change_mtu = enic_change_mtu, | |
1735 | .ndo_vlan_rx_register = enic_vlan_rx_register, | |
1736 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, | |
1737 | .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, | |
1738 | .ndo_tx_timeout = enic_tx_timeout, | |
1739 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
1740 | .ndo_poll_controller = enic_poll_controller, | |
1741 | #endif | |
1742 | }; | |
1743 | ||
27e6c7d3 SF |
1744 | static void enic_iounmap(struct enic *enic) |
1745 | { | |
1746 | unsigned int i; | |
1747 | ||
1748 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) | |
1749 | if (enic->bar[i].vaddr) | |
1750 | iounmap(enic->bar[i].vaddr); | |
1751 | } | |
1752 | ||
01f2e4ea SF |
1753 | static int __devinit enic_probe(struct pci_dev *pdev, |
1754 | const struct pci_device_id *ent) | |
1755 | { | |
1756 | struct net_device *netdev; | |
1757 | struct enic *enic; | |
1758 | int using_dac = 0; | |
1759 | unsigned int i; | |
1760 | int err; | |
1761 | ||
01f2e4ea SF |
1762 | /* Allocate net device structure and initialize. Private |
1763 | * instance data is initialized to zero. | |
1764 | */ | |
1765 | ||
1766 | netdev = alloc_etherdev(sizeof(struct enic)); | |
1767 | if (!netdev) { | |
1768 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | |
1769 | return -ENOMEM; | |
1770 | } | |
1771 | ||
01f2e4ea SF |
1772 | pci_set_drvdata(pdev, netdev); |
1773 | ||
1774 | SET_NETDEV_DEV(netdev, &pdev->dev); | |
1775 | ||
1776 | enic = netdev_priv(netdev); | |
1777 | enic->netdev = netdev; | |
1778 | enic->pdev = pdev; | |
1779 | ||
1780 | /* Setup PCI resources | |
1781 | */ | |
1782 | ||
1783 | err = pci_enable_device(pdev); | |
1784 | if (err) { | |
1785 | printk(KERN_ERR PFX | |
4b75a442 | 1786 | "Cannot enable PCI device, aborting.\n"); |
01f2e4ea SF |
1787 | goto err_out_free_netdev; |
1788 | } | |
1789 | ||
1790 | err = pci_request_regions(pdev, DRV_NAME); | |
1791 | if (err) { | |
1792 | printk(KERN_ERR PFX | |
4b75a442 | 1793 | "Cannot request PCI regions, aborting.\n"); |
01f2e4ea SF |
1794 | goto err_out_disable_device; |
1795 | } | |
1796 | ||
1797 | pci_set_master(pdev); | |
1798 | ||
1799 | /* Query PCI controller on system for DMA addressing | |
1800 | * limitation for the device. Try 40-bit first, and | |
1801 | * fail to 32-bit. | |
1802 | */ | |
1803 | ||
50cf156a | 1804 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); |
01f2e4ea | 1805 | if (err) { |
284901a9 | 1806 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
01f2e4ea SF |
1807 | if (err) { |
1808 | printk(KERN_ERR PFX | |
4b75a442 | 1809 | "No usable DMA configuration, aborting.\n"); |
01f2e4ea SF |
1810 | goto err_out_release_regions; |
1811 | } | |
284901a9 | 1812 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
01f2e4ea SF |
1813 | if (err) { |
1814 | printk(KERN_ERR PFX | |
4b75a442 SF |
1815 | "Unable to obtain 32-bit DMA " |
1816 | "for consistent allocations, aborting.\n"); | |
01f2e4ea SF |
1817 | goto err_out_release_regions; |
1818 | } | |
1819 | } else { | |
50cf156a | 1820 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); |
01f2e4ea SF |
1821 | if (err) { |
1822 | printk(KERN_ERR PFX | |
4b75a442 SF |
1823 | "Unable to obtain 40-bit DMA " |
1824 | "for consistent allocations, aborting.\n"); | |
01f2e4ea SF |
1825 | goto err_out_release_regions; |
1826 | } | |
1827 | using_dac = 1; | |
1828 | } | |
1829 | ||
27e6c7d3 | 1830 | /* Map vNIC resources from BAR0-5 |
01f2e4ea SF |
1831 | */ |
1832 | ||
27e6c7d3 SF |
1833 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { |
1834 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | |
1835 | continue; | |
1836 | enic->bar[i].len = pci_resource_len(pdev, i); | |
1837 | enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); | |
1838 | if (!enic->bar[i].vaddr) { | |
1839 | printk(KERN_ERR PFX | |
1840 | "Cannot memory-map BAR %d, aborting.\n", i); | |
1841 | err = -ENODEV; | |
1842 | goto err_out_iounmap; | |
1843 | } | |
1844 | enic->bar[i].bus_addr = pci_resource_start(pdev, i); | |
01f2e4ea SF |
1845 | } |
1846 | ||
1847 | /* Register vNIC device | |
1848 | */ | |
1849 | ||
27e6c7d3 SF |
1850 | enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, |
1851 | ARRAY_SIZE(enic->bar)); | |
01f2e4ea SF |
1852 | if (!enic->vdev) { |
1853 | printk(KERN_ERR PFX | |
4b75a442 | 1854 | "vNIC registration failed, aborting.\n"); |
01f2e4ea SF |
1855 | err = -ENODEV; |
1856 | goto err_out_iounmap; | |
1857 | } | |
1858 | ||
1859 | /* Issue device open to get device in known state | |
1860 | */ | |
1861 | ||
1862 | err = enic_dev_open(enic); | |
1863 | if (err) { | |
1864 | printk(KERN_ERR PFX | |
4b75a442 | 1865 | "vNIC dev open failed, aborting.\n"); |
01f2e4ea SF |
1866 | goto err_out_vnic_unregister; |
1867 | } | |
1868 | ||
1869 | /* Issue device init to initialize the vnic-to-switch link. | |
1870 | * We'll start with carrier off and wait for link UP | |
1871 | * notification later to turn on carrier. We don't need | |
1872 | * to wait here for the vnic-to-switch link initialization | |
1873 | * to complete; link UP notification is the indication that | |
1874 | * the process is complete. | |
1875 | */ | |
1876 | ||
1877 | netif_carrier_off(netdev); | |
1878 | ||
1879 | err = vnic_dev_init(enic->vdev, 0); | |
1880 | if (err) { | |
1881 | printk(KERN_ERR PFX | |
4b75a442 | 1882 | "vNIC dev init failed, aborting.\n"); |
01f2e4ea SF |
1883 | goto err_out_dev_close; |
1884 | } | |
1885 | ||
1886 | /* Get vNIC configuration | |
1887 | */ | |
1888 | ||
1889 | err = enic_get_vnic_config(enic); | |
1890 | if (err) { | |
1891 | printk(KERN_ERR PFX | |
4b75a442 | 1892 | "Get vNIC configuration failed, aborting.\n"); |
01f2e4ea SF |
1893 | goto err_out_dev_close; |
1894 | } | |
1895 | ||
1896 | /* Get available resource counts | |
86ca9db7 | 1897 | */ |
01f2e4ea SF |
1898 | |
1899 | enic_get_res_counts(enic); | |
1900 | ||
1901 | /* Set interrupt mode based on resource counts and system | |
1902 | * capabilities | |
86ca9db7 | 1903 | */ |
01f2e4ea SF |
1904 | |
1905 | err = enic_set_intr_mode(enic); | |
1906 | if (err) { | |
1907 | printk(KERN_ERR PFX | |
4b75a442 | 1908 | "Failed to set intr mode, aborting.\n"); |
01f2e4ea SF |
1909 | goto err_out_dev_close; |
1910 | } | |
1911 | ||
1912 | /* Allocate and configure vNIC resources | |
1913 | */ | |
1914 | ||
1915 | err = enic_alloc_vnic_resources(enic); | |
1916 | if (err) { | |
1917 | printk(KERN_ERR PFX | |
4b75a442 | 1918 | "Failed to alloc vNIC resources, aborting.\n"); |
01f2e4ea SF |
1919 | goto err_out_free_vnic_resources; |
1920 | } | |
1921 | ||
1922 | enic_init_vnic_resources(enic); | |
1923 | ||
68f71708 | 1924 | err = enic_set_niccfg(enic); |
01f2e4ea SF |
1925 | if (err) { |
1926 | printk(KERN_ERR PFX | |
4b75a442 | 1927 | "Failed to config nic, aborting.\n"); |
01f2e4ea SF |
1928 | goto err_out_free_vnic_resources; |
1929 | } | |
1930 | ||
1931 | /* Setup notification timer, HW reset task, and locks | |
1932 | */ | |
1933 | ||
1934 | init_timer(&enic->notify_timer); | |
1935 | enic->notify_timer.function = enic_notify_timer; | |
1936 | enic->notify_timer.data = (unsigned long)enic; | |
1937 | ||
1938 | INIT_WORK(&enic->reset, enic_reset); | |
1939 | ||
1940 | for (i = 0; i < enic->wq_count; i++) | |
1941 | spin_lock_init(&enic->wq_lock[i]); | |
1942 | ||
1943 | spin_lock_init(&enic->devcmd_lock); | |
1944 | ||
1945 | /* Register net device | |
1946 | */ | |
1947 | ||
1948 | enic->port_mtu = enic->config.mtu; | |
1949 | (void)enic_change_mtu(netdev, enic->port_mtu); | |
1950 | ||
1951 | err = enic_set_mac_addr(netdev, enic->mac_addr); | |
1952 | if (err) { | |
1953 | printk(KERN_ERR PFX | |
4b75a442 SF |
1954 | "Invalid MAC address, aborting.\n"); |
1955 | goto err_out_free_vnic_resources; | |
01f2e4ea SF |
1956 | } |
1957 | ||
afe29f7a | 1958 | netdev->netdev_ops = &enic_netdev_ops; |
01f2e4ea SF |
1959 | netdev->watchdog_timeo = 2 * HZ; |
1960 | netdev->ethtool_ops = &enic_ethtool_ops; | |
01f2e4ea SF |
1961 | |
1962 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | |
1963 | default: | |
1964 | netif_napi_add(netdev, &enic->napi, enic_poll, 64); | |
1965 | break; | |
1966 | case VNIC_DEV_INTR_MODE_MSIX: | |
1967 | netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); | |
1968 | break; | |
1969 | } | |
1970 | ||
9f63a7c6 SF |
1971 | netdev->features |= NETIF_F_HW_VLAN_TX | |
1972 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; | |
01f2e4ea SF |
1973 | if (ENIC_SETTING(enic, TXCSUM)) |
1974 | netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; | |
1975 | if (ENIC_SETTING(enic, TSO)) | |
1976 | netdev->features |= NETIF_F_TSO | | |
1977 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | |
86ca9db7 SF |
1978 | if (ENIC_SETTING(enic, LRO)) |
1979 | netdev->features |= NETIF_F_LRO; | |
01f2e4ea SF |
1980 | if (using_dac) |
1981 | netdev->features |= NETIF_F_HIGHDMA; | |
1982 | ||
01f2e4ea SF |
1983 | enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); |
1984 | ||
86ca9db7 SF |
1985 | enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; |
1986 | enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; | |
1987 | enic->lro_mgr.lro_arr = enic->lro_desc; | |
1988 | enic->lro_mgr.get_skb_header = enic_get_skb_header; | |
1989 | enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; | |
1990 | enic->lro_mgr.dev = netdev; | |
1991 | enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; | |
1992 | enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | |
1993 | ||
01f2e4ea SF |
1994 | err = register_netdev(netdev); |
1995 | if (err) { | |
1996 | printk(KERN_ERR PFX | |
4b75a442 SF |
1997 | "Cannot register net device, aborting.\n"); |
1998 | goto err_out_free_vnic_resources; | |
01f2e4ea SF |
1999 | } |
2000 | ||
2001 | return 0; | |
2002 | ||
01f2e4ea SF |
2003 | err_out_free_vnic_resources: |
2004 | enic_free_vnic_resources(enic); | |
01f2e4ea SF |
2005 | err_out_dev_close: |
2006 | vnic_dev_close(enic->vdev); | |
2007 | err_out_vnic_unregister: | |
2008 | enic_clear_intr_mode(enic); | |
2009 | vnic_dev_unregister(enic->vdev); | |
2010 | err_out_iounmap: | |
2011 | enic_iounmap(enic); | |
2012 | err_out_release_regions: | |
2013 | pci_release_regions(pdev); | |
2014 | err_out_disable_device: | |
2015 | pci_disable_device(pdev); | |
2016 | err_out_free_netdev: | |
2017 | pci_set_drvdata(pdev, NULL); | |
2018 | free_netdev(netdev); | |
2019 | ||
2020 | return err; | |
2021 | } | |
2022 | ||
2023 | static void __devexit enic_remove(struct pci_dev *pdev) | |
2024 | { | |
2025 | struct net_device *netdev = pci_get_drvdata(pdev); | |
2026 | ||
2027 | if (netdev) { | |
2028 | struct enic *enic = netdev_priv(netdev); | |
2029 | ||
2030 | flush_scheduled_work(); | |
2031 | unregister_netdev(netdev); | |
01f2e4ea | 2032 | enic_free_vnic_resources(enic); |
01f2e4ea SF |
2033 | vnic_dev_close(enic->vdev); |
2034 | enic_clear_intr_mode(enic); | |
2035 | vnic_dev_unregister(enic->vdev); | |
2036 | enic_iounmap(enic); | |
2037 | pci_release_regions(pdev); | |
2038 | pci_disable_device(pdev); | |
2039 | pci_set_drvdata(pdev, NULL); | |
2040 | free_netdev(netdev); | |
2041 | } | |
2042 | } | |
2043 | ||
2044 | static struct pci_driver enic_driver = { | |
2045 | .name = DRV_NAME, | |
2046 | .id_table = enic_id_table, | |
2047 | .probe = enic_probe, | |
2048 | .remove = __devexit_p(enic_remove), | |
2049 | }; | |
2050 | ||
2051 | static int __init enic_init_module(void) | |
2052 | { | |
2053 | printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | |
2054 | ||
2055 | return pci_register_driver(&enic_driver); | |
2056 | } | |
2057 | ||
2058 | static void __exit enic_cleanup_module(void) | |
2059 | { | |
2060 | pci_unregister_driver(&enic_driver); | |
2061 | } | |
2062 | ||
2063 | module_init(enic_init_module); | |
2064 | module_exit(enic_cleanup_module); |