Commit | Line | Data |
---|---|---|
f94b533d TT |
1 | /* |
2 | * Copyright (c) 2005 Ammasso, Inc. All rights reserved. | |
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | #include <linux/module.h> | |
34 | #include <linux/moduleparam.h> | |
35 | #include <linux/pci.h> | |
36 | #include <linux/netdevice.h> | |
37 | #include <linux/etherdevice.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/delay.h> | |
40 | #include <linux/ethtool.h> | |
41 | #include <linux/mii.h> | |
42 | #include <linux/if_vlan.h> | |
43 | #include <linux/crc32.h> | |
44 | #include <linux/in.h> | |
45 | #include <linux/ip.h> | |
46 | #include <linux/tcp.h> | |
47 | #include <linux/init.h> | |
48 | #include <linux/dma-mapping.h> | |
49 | ||
50 | #include <asm/io.h> | |
51 | #include <asm/irq.h> | |
52 | #include <asm/byteorder.h> | |
53 | ||
54 | #include <rdma/ib_smi.h> | |
55 | #include "c2.h" | |
56 | #include "c2_provider.h" | |
57 | ||
58 | MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>"); | |
59 | MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver"); | |
60 | MODULE_LICENSE("Dual BSD/GPL"); | |
61 | MODULE_VERSION(DRV_VERSION); | |
62 | ||
63 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | |
64 | | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; | |
65 | ||
66 | static int debug = -1; /* defaults above */ | |
67 | module_param(debug, int, 0); | |
68 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | |
69 | ||
70 | static int c2_up(struct net_device *netdev); | |
71 | static int c2_down(struct net_device *netdev); | |
72 | static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev); | |
73 | static void c2_tx_interrupt(struct net_device *netdev); | |
74 | static void c2_rx_interrupt(struct net_device *netdev); | |
7d12e780 | 75 | static irqreturn_t c2_interrupt(int irq, void *dev_id); |
f94b533d TT |
76 | static void c2_tx_timeout(struct net_device *netdev); |
77 | static int c2_change_mtu(struct net_device *netdev, int new_mtu); | |
78 | static void c2_reset(struct c2_port *c2_port); | |
f94b533d TT |
79 | |
80 | static struct pci_device_id c2_pci_table[] = { | |
81 | { PCI_DEVICE(0x18b8, 0xb001) }, | |
82 | { 0 } | |
83 | }; | |
84 | ||
85 | MODULE_DEVICE_TABLE(pci, c2_pci_table); | |
86 | ||
87 | static void c2_print_macaddr(struct net_device *netdev) | |
88 | { | |
181c74e8 | 89 | pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq); |
f94b533d TT |
90 | } |
91 | ||
92 | static void c2_set_rxbufsize(struct c2_port *c2_port) | |
93 | { | |
94 | struct net_device *netdev = c2_port->netdev; | |
95 | ||
96 | if (netdev->mtu > RX_BUF_SIZE) | |
97 | c2_port->rx_buf_size = | |
98 | netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) + | |
99 | NET_IP_ALIGN; | |
100 | else | |
101 | c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE; | |
102 | } | |
103 | ||
104 | /* | |
105 | * Allocate TX ring elements and chain them together. | |
106 | * One-to-one association of adapter descriptors with ring elements. | |
107 | */ | |
108 | static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, | |
109 | dma_addr_t base, void __iomem * mmio_txp_ring) | |
110 | { | |
111 | struct c2_tx_desc *tx_desc; | |
112 | struct c2_txp_desc __iomem *txp_desc; | |
113 | struct c2_element *elem; | |
114 | int i; | |
115 | ||
116 | tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL); | |
117 | if (!tx_ring->start) | |
118 | return -ENOMEM; | |
119 | ||
120 | elem = tx_ring->start; | |
121 | tx_desc = vaddr; | |
122 | txp_desc = mmio_txp_ring; | |
123 | for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) { | |
124 | tx_desc->len = 0; | |
125 | tx_desc->status = 0; | |
126 | ||
127 | /* Set TXP_HTXD_UNINIT */ | |
dc544bc9 | 128 | __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL), |
f94b533d TT |
129 | (void __iomem *) txp_desc + C2_TXP_ADDR); |
130 | __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN); | |
dc544bc9 | 131 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT), |
f94b533d TT |
132 | (void __iomem *) txp_desc + C2_TXP_FLAGS); |
133 | ||
134 | elem->skb = NULL; | |
135 | elem->ht_desc = tx_desc; | |
136 | elem->hw_desc = txp_desc; | |
137 | ||
138 | if (i == tx_ring->count - 1) { | |
139 | elem->next = tx_ring->start; | |
140 | tx_desc->next_offset = base; | |
141 | } else { | |
142 | elem->next = elem + 1; | |
143 | tx_desc->next_offset = | |
144 | base + (i + 1) * sizeof(*tx_desc); | |
145 | } | |
146 | } | |
147 | ||
148 | tx_ring->to_use = tx_ring->to_clean = tx_ring->start; | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | /* | |
154 | * Allocate RX ring elements and chain them together. | |
155 | * One-to-one association of adapter descriptors with ring elements. | |
156 | */ | |
157 | static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr, | |
158 | dma_addr_t base, void __iomem * mmio_rxp_ring) | |
159 | { | |
160 | struct c2_rx_desc *rx_desc; | |
161 | struct c2_rxp_desc __iomem *rxp_desc; | |
162 | struct c2_element *elem; | |
163 | int i; | |
164 | ||
165 | rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL); | |
166 | if (!rx_ring->start) | |
167 | return -ENOMEM; | |
168 | ||
169 | elem = rx_ring->start; | |
170 | rx_desc = vaddr; | |
171 | rxp_desc = mmio_rxp_ring; | |
172 | for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) { | |
173 | rx_desc->len = 0; | |
174 | rx_desc->status = 0; | |
175 | ||
176 | /* Set RXP_HRXD_UNINIT */ | |
dc544bc9 | 177 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK), |
f94b533d TT |
178 | (void __iomem *) rxp_desc + C2_RXP_STATUS); |
179 | __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT); | |
180 | __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN); | |
dc544bc9 | 181 | __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL), |
f94b533d | 182 | (void __iomem *) rxp_desc + C2_RXP_ADDR); |
dc544bc9 | 183 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT), |
f94b533d TT |
184 | (void __iomem *) rxp_desc + C2_RXP_FLAGS); |
185 | ||
186 | elem->skb = NULL; | |
187 | elem->ht_desc = rx_desc; | |
188 | elem->hw_desc = rxp_desc; | |
189 | ||
190 | if (i == rx_ring->count - 1) { | |
191 | elem->next = rx_ring->start; | |
192 | rx_desc->next_offset = base; | |
193 | } else { | |
194 | elem->next = elem + 1; | |
195 | rx_desc->next_offset = | |
196 | base + (i + 1) * sizeof(*rx_desc); | |
197 | } | |
198 | } | |
199 | ||
200 | rx_ring->to_use = rx_ring->to_clean = rx_ring->start; | |
201 | ||
202 | return 0; | |
203 | } | |
204 | ||
205 | /* Setup buffer for receiving */ | |
206 | static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem) | |
207 | { | |
208 | struct c2_dev *c2dev = c2_port->c2dev; | |
209 | struct c2_rx_desc *rx_desc = elem->ht_desc; | |
210 | struct sk_buff *skb; | |
211 | dma_addr_t mapaddr; | |
212 | u32 maplen; | |
213 | struct c2_rxp_hdr *rxp_hdr; | |
214 | ||
215 | skb = dev_alloc_skb(c2_port->rx_buf_size); | |
216 | if (unlikely(!skb)) { | |
217 | pr_debug("%s: out of memory for receive\n", | |
218 | c2_port->netdev->name); | |
219 | return -ENOMEM; | |
220 | } | |
221 | ||
222 | /* Zero out the rxp hdr in the sk_buff */ | |
223 | memset(skb->data, 0, sizeof(*rxp_hdr)); | |
224 | ||
225 | skb->dev = c2_port->netdev; | |
226 | ||
227 | maplen = c2_port->rx_buf_size; | |
228 | mapaddr = | |
229 | pci_map_single(c2dev->pcidev, skb->data, maplen, | |
230 | PCI_DMA_FROMDEVICE); | |
231 | ||
232 | /* Set the sk_buff RXP_header to RXP_HRXD_READY */ | |
233 | rxp_hdr = (struct c2_rxp_hdr *) skb->data; | |
234 | rxp_hdr->flags = RXP_HRXD_READY; | |
235 | ||
236 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | |
dc544bc9 | 237 | __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)), |
f94b533d | 238 | elem->hw_desc + C2_RXP_LEN); |
dc544bc9 RD |
239 | __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR); |
240 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), | |
241 | elem->hw_desc + C2_RXP_FLAGS); | |
f94b533d TT |
242 | |
243 | elem->skb = skb; | |
244 | elem->mapaddr = mapaddr; | |
245 | elem->maplen = maplen; | |
246 | rx_desc->len = maplen; | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
251 | /* | |
252 | * Allocate buffers for the Rx ring | |
253 | * For receive: rx_ring.to_clean is next received frame | |
254 | */ | |
255 | static int c2_rx_fill(struct c2_port *c2_port) | |
256 | { | |
257 | struct c2_ring *rx_ring = &c2_port->rx_ring; | |
258 | struct c2_element *elem; | |
259 | int ret = 0; | |
260 | ||
261 | elem = rx_ring->start; | |
262 | do { | |
263 | if (c2_rx_alloc(c2_port, elem)) { | |
264 | ret = 1; | |
265 | break; | |
266 | } | |
267 | } while ((elem = elem->next) != rx_ring->start); | |
268 | ||
269 | rx_ring->to_clean = rx_ring->start; | |
270 | return ret; | |
271 | } | |
272 | ||
273 | /* Free all buffers in RX ring, assumes receiver stopped */ | |
274 | static void c2_rx_clean(struct c2_port *c2_port) | |
275 | { | |
276 | struct c2_dev *c2dev = c2_port->c2dev; | |
277 | struct c2_ring *rx_ring = &c2_port->rx_ring; | |
278 | struct c2_element *elem; | |
279 | struct c2_rx_desc *rx_desc; | |
280 | ||
281 | elem = rx_ring->start; | |
282 | do { | |
283 | rx_desc = elem->ht_desc; | |
284 | rx_desc->len = 0; | |
285 | ||
286 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | |
287 | __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); | |
288 | __raw_writew(0, elem->hw_desc + C2_RXP_LEN); | |
dc544bc9 | 289 | __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL), |
f94b533d | 290 | elem->hw_desc + C2_RXP_ADDR); |
dc544bc9 | 291 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT), |
f94b533d TT |
292 | elem->hw_desc + C2_RXP_FLAGS); |
293 | ||
294 | if (elem->skb) { | |
295 | pci_unmap_single(c2dev->pcidev, elem->mapaddr, | |
296 | elem->maplen, PCI_DMA_FROMDEVICE); | |
297 | dev_kfree_skb(elem->skb); | |
298 | elem->skb = NULL; | |
299 | } | |
300 | } while ((elem = elem->next) != rx_ring->start); | |
301 | } | |
302 | ||
303 | static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem) | |
304 | { | |
305 | struct c2_tx_desc *tx_desc = elem->ht_desc; | |
306 | ||
307 | tx_desc->len = 0; | |
308 | ||
309 | pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen, | |
310 | PCI_DMA_TODEVICE); | |
311 | ||
312 | if (elem->skb) { | |
313 | dev_kfree_skb_any(elem->skb); | |
314 | elem->skb = NULL; | |
315 | } | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | /* Free all buffers in TX ring, assumes transmitter stopped */ | |
321 | static void c2_tx_clean(struct c2_port *c2_port) | |
322 | { | |
323 | struct c2_ring *tx_ring = &c2_port->tx_ring; | |
324 | struct c2_element *elem; | |
325 | struct c2_txp_desc txp_htxd; | |
326 | int retry; | |
327 | unsigned long flags; | |
328 | ||
329 | spin_lock_irqsave(&c2_port->tx_lock, flags); | |
330 | ||
331 | elem = tx_ring->start; | |
332 | ||
333 | do { | |
334 | retry = 0; | |
335 | do { | |
336 | txp_htxd.flags = | |
337 | readw(elem->hw_desc + C2_TXP_FLAGS); | |
338 | ||
339 | if (txp_htxd.flags == TXP_HTXD_READY) { | |
340 | retry = 1; | |
341 | __raw_writew(0, | |
342 | elem->hw_desc + C2_TXP_LEN); | |
343 | __raw_writeq(0, | |
344 | elem->hw_desc + C2_TXP_ADDR); | |
dc544bc9 | 345 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE), |
f94b533d | 346 | elem->hw_desc + C2_TXP_FLAGS); |
687c75dc | 347 | c2_port->netdev->stats.tx_dropped++; |
f94b533d TT |
348 | break; |
349 | } else { | |
350 | __raw_writew(0, | |
351 | elem->hw_desc + C2_TXP_LEN); | |
dc544bc9 | 352 | __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL), |
f94b533d | 353 | elem->hw_desc + C2_TXP_ADDR); |
dc544bc9 | 354 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT), |
f94b533d TT |
355 | elem->hw_desc + C2_TXP_FLAGS); |
356 | } | |
357 | ||
358 | c2_tx_free(c2_port->c2dev, elem); | |
359 | ||
360 | } while ((elem = elem->next) != tx_ring->start); | |
361 | } while (retry); | |
362 | ||
363 | c2_port->tx_avail = c2_port->tx_ring.count - 1; | |
364 | c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; | |
365 | ||
366 | if (c2_port->tx_avail > MAX_SKB_FRAGS + 1) | |
367 | netif_wake_queue(c2_port->netdev); | |
368 | ||
369 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | |
370 | } | |
371 | ||
372 | /* | |
373 | * Process transmit descriptors marked 'DONE' by the firmware, | |
374 | * freeing up their unneeded sk_buffs. | |
375 | */ | |
376 | static void c2_tx_interrupt(struct net_device *netdev) | |
377 | { | |
378 | struct c2_port *c2_port = netdev_priv(netdev); | |
379 | struct c2_dev *c2dev = c2_port->c2dev; | |
380 | struct c2_ring *tx_ring = &c2_port->tx_ring; | |
381 | struct c2_element *elem; | |
382 | struct c2_txp_desc txp_htxd; | |
383 | ||
384 | spin_lock(&c2_port->tx_lock); | |
385 | ||
386 | for (elem = tx_ring->to_clean; elem != tx_ring->to_use; | |
387 | elem = elem->next) { | |
388 | txp_htxd.flags = | |
dc544bc9 | 389 | be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS)); |
f94b533d TT |
390 | |
391 | if (txp_htxd.flags != TXP_HTXD_DONE) | |
392 | break; | |
393 | ||
394 | if (netif_msg_tx_done(c2_port)) { | |
395 | /* PCI reads are expensive in fast path */ | |
396 | txp_htxd.len = | |
dc544bc9 | 397 | be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN)); |
f94b533d TT |
398 | pr_debug("%s: tx done slot %3Zu status 0x%x len " |
399 | "%5u bytes\n", | |
400 | netdev->name, elem - tx_ring->start, | |
401 | txp_htxd.flags, txp_htxd.len); | |
402 | } | |
403 | ||
404 | c2_tx_free(c2dev, elem); | |
405 | ++(c2_port->tx_avail); | |
406 | } | |
407 | ||
408 | tx_ring->to_clean = elem; | |
409 | ||
410 | if (netif_queue_stopped(netdev) | |
411 | && c2_port->tx_avail > MAX_SKB_FRAGS + 1) | |
412 | netif_wake_queue(netdev); | |
413 | ||
414 | spin_unlock(&c2_port->tx_lock); | |
415 | } | |
416 | ||
417 | static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem) | |
418 | { | |
419 | struct c2_rx_desc *rx_desc = elem->ht_desc; | |
420 | struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; | |
421 | ||
422 | if (rxp_hdr->status != RXP_HRXD_OK || | |
423 | rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) { | |
424 | pr_debug("BAD RXP_HRXD\n"); | |
425 | pr_debug(" rx_desc : %p\n", rx_desc); | |
426 | pr_debug(" index : %Zu\n", | |
427 | elem - c2_port->rx_ring.start); | |
428 | pr_debug(" len : %u\n", rx_desc->len); | |
429 | pr_debug(" rxp_hdr : %p [PA %p]\n", rxp_hdr, | |
430 | (void *) __pa((unsigned long) rxp_hdr)); | |
431 | pr_debug(" flags : 0x%x\n", rxp_hdr->flags); | |
432 | pr_debug(" status: 0x%x\n", rxp_hdr->status); | |
433 | pr_debug(" len : %u\n", rxp_hdr->len); | |
434 | pr_debug(" rsvd : 0x%x\n", rxp_hdr->rsvd); | |
435 | } | |
436 | ||
437 | /* Setup the skb for reuse since we're dropping this pkt */ | |
27a884dc ACM |
438 | elem->skb->data = elem->skb->head; |
439 | skb_reset_tail_pointer(elem->skb); | |
f94b533d TT |
440 | |
441 | /* Zero out the rxp hdr in the sk_buff */ | |
442 | memset(elem->skb->data, 0, sizeof(*rxp_hdr)); | |
443 | ||
444 | /* Write the descriptor to the adapter's rx ring */ | |
445 | __raw_writew(0, elem->hw_desc + C2_RXP_STATUS); | |
446 | __raw_writew(0, elem->hw_desc + C2_RXP_COUNT); | |
dc544bc9 | 447 | __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)), |
f94b533d | 448 | elem->hw_desc + C2_RXP_LEN); |
dc544bc9 RD |
449 | __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr), |
450 | elem->hw_desc + C2_RXP_ADDR); | |
451 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), | |
452 | elem->hw_desc + C2_RXP_FLAGS); | |
f94b533d TT |
453 | |
454 | pr_debug("packet dropped\n"); | |
687c75dc | 455 | c2_port->netdev->stats.rx_dropped++; |
f94b533d TT |
456 | } |
457 | ||
458 | static void c2_rx_interrupt(struct net_device *netdev) | |
459 | { | |
460 | struct c2_port *c2_port = netdev_priv(netdev); | |
461 | struct c2_dev *c2dev = c2_port->c2dev; | |
462 | struct c2_ring *rx_ring = &c2_port->rx_ring; | |
463 | struct c2_element *elem; | |
464 | struct c2_rx_desc *rx_desc; | |
465 | struct c2_rxp_hdr *rxp_hdr; | |
466 | struct sk_buff *skb; | |
467 | dma_addr_t mapaddr; | |
468 | u32 maplen, buflen; | |
469 | unsigned long flags; | |
470 | ||
471 | spin_lock_irqsave(&c2dev->lock, flags); | |
472 | ||
473 | /* Begin where we left off */ | |
474 | rx_ring->to_clean = rx_ring->start + c2dev->cur_rx; | |
475 | ||
476 | for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean; | |
477 | elem = elem->next) { | |
478 | rx_desc = elem->ht_desc; | |
479 | mapaddr = elem->mapaddr; | |
480 | maplen = elem->maplen; | |
481 | skb = elem->skb; | |
482 | rxp_hdr = (struct c2_rxp_hdr *) skb->data; | |
483 | ||
484 | if (rxp_hdr->flags != RXP_HRXD_DONE) | |
485 | break; | |
486 | buflen = rxp_hdr->len; | |
487 | ||
488 | /* Sanity check the RXP header */ | |
489 | if (rxp_hdr->status != RXP_HRXD_OK || | |
490 | buflen > (rx_desc->len - sizeof(*rxp_hdr))) { | |
491 | c2_rx_error(c2_port, elem); | |
492 | continue; | |
493 | } | |
494 | ||
495 | /* | |
496 | * Allocate and map a new skb for replenishing the host | |
497 | * RX desc | |
498 | */ | |
499 | if (c2_rx_alloc(c2_port, elem)) { | |
500 | c2_rx_error(c2_port, elem); | |
501 | continue; | |
502 | } | |
503 | ||
504 | /* Unmap the old skb */ | |
505 | pci_unmap_single(c2dev->pcidev, mapaddr, maplen, | |
506 | PCI_DMA_FROMDEVICE); | |
507 | ||
508 | prefetch(skb->data); | |
509 | ||
510 | /* | |
511 | * Skip past the leading 8 bytes comprising of the | |
512 | * "struct c2_rxp_hdr", prepended by the adapter | |
513 | * to the usual Ethernet header ("struct ethhdr"), | |
514 | * to the start of the raw Ethernet packet. | |
515 | * | |
516 | * Fix up the various fields in the sk_buff before | |
517 | * passing it up to netif_rx(). The transfer size | |
518 | * (in bytes) specified by the adapter len field of | |
519 | * the "struct rxp_hdr_t" does NOT include the | |
520 | * "sizeof(struct c2_rxp_hdr)". | |
521 | */ | |
522 | skb->data += sizeof(*rxp_hdr); | |
27a884dc | 523 | skb_set_tail_pointer(skb, buflen); |
f94b533d | 524 | skb->len = buflen; |
f94b533d TT |
525 | skb->protocol = eth_type_trans(skb, netdev); |
526 | ||
527 | netif_rx(skb); | |
528 | ||
687c75dc SH |
529 | netdev->stats.rx_packets++; |
530 | netdev->stats.rx_bytes += buflen; | |
f94b533d TT |
531 | } |
532 | ||
533 | /* Save where we left off */ | |
534 | rx_ring->to_clean = elem; | |
535 | c2dev->cur_rx = elem - rx_ring->start; | |
536 | C2_SET_CUR_RX(c2dev, c2dev->cur_rx); | |
537 | ||
538 | spin_unlock_irqrestore(&c2dev->lock, flags); | |
539 | } | |
540 | ||
541 | /* | |
542 | * Handle netisr0 TX & RX interrupts. | |
543 | */ | |
7d12e780 | 544 | static irqreturn_t c2_interrupt(int irq, void *dev_id) |
f94b533d TT |
545 | { |
546 | unsigned int netisr0, dmaisr; | |
547 | int handled = 0; | |
548 | struct c2_dev *c2dev = (struct c2_dev *) dev_id; | |
549 | ||
550 | /* Process CCILNET interrupts */ | |
551 | netisr0 = readl(c2dev->regs + C2_NISR0); | |
552 | if (netisr0) { | |
553 | ||
554 | /* | |
555 | * There is an issue with the firmware that always | |
556 | * provides the status of RX for both TX & RX | |
557 | * interrupts. So process both queues here. | |
558 | */ | |
559 | c2_rx_interrupt(c2dev->netdev); | |
560 | c2_tx_interrupt(c2dev->netdev); | |
561 | ||
562 | /* Clear the interrupt */ | |
563 | writel(netisr0, c2dev->regs + C2_NISR0); | |
564 | handled++; | |
565 | } | |
566 | ||
567 | /* Process RNIC interrupts */ | |
568 | dmaisr = readl(c2dev->regs + C2_DISR); | |
569 | if (dmaisr) { | |
570 | writel(dmaisr, c2dev->regs + C2_DISR); | |
571 | c2_rnic_interrupt(c2dev); | |
572 | handled++; | |
573 | } | |
574 | ||
575 | if (handled) { | |
576 | return IRQ_HANDLED; | |
577 | } else { | |
578 | return IRQ_NONE; | |
579 | } | |
580 | } | |
581 | ||
582 | static int c2_up(struct net_device *netdev) | |
583 | { | |
584 | struct c2_port *c2_port = netdev_priv(netdev); | |
585 | struct c2_dev *c2dev = c2_port->c2dev; | |
586 | struct c2_element *elem; | |
587 | struct c2_rxp_hdr *rxp_hdr; | |
588 | struct in_device *in_dev; | |
589 | size_t rx_size, tx_size; | |
590 | int ret, i; | |
591 | unsigned int netimr0; | |
592 | ||
593 | if (netif_msg_ifup(c2_port)) | |
594 | pr_debug("%s: enabling interface\n", netdev->name); | |
595 | ||
596 | /* Set the Rx buffer size based on MTU */ | |
597 | c2_set_rxbufsize(c2_port); | |
598 | ||
599 | /* Allocate DMA'able memory for Tx/Rx host descriptor rings */ | |
600 | rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc); | |
601 | tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); | |
602 | ||
603 | c2_port->mem_size = tx_size + rx_size; | |
604 | c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size, | |
605 | &c2_port->dma); | |
606 | if (c2_port->mem == NULL) { | |
607 | pr_debug("Unable to allocate memory for " | |
608 | "host descriptor rings\n"); | |
609 | return -ENOMEM; | |
610 | } | |
611 | ||
612 | memset(c2_port->mem, 0, c2_port->mem_size); | |
613 | ||
614 | /* Create the Rx host descriptor ring */ | |
615 | if ((ret = | |
616 | c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma, | |
617 | c2dev->mmio_rxp_ring))) { | |
618 | pr_debug("Unable to create RX ring\n"); | |
619 | goto bail0; | |
620 | } | |
621 | ||
622 | /* Allocate Rx buffers for the host descriptor ring */ | |
623 | if (c2_rx_fill(c2_port)) { | |
624 | pr_debug("Unable to fill RX ring\n"); | |
625 | goto bail1; | |
626 | } | |
627 | ||
628 | /* Create the Tx host descriptor ring */ | |
629 | if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, | |
630 | c2_port->dma + rx_size, | |
631 | c2dev->mmio_txp_ring))) { | |
632 | pr_debug("Unable to create TX ring\n"); | |
633 | goto bail1; | |
634 | } | |
635 | ||
636 | /* Set the TX pointer to where we left off */ | |
637 | c2_port->tx_avail = c2_port->tx_ring.count - 1; | |
638 | c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = | |
639 | c2_port->tx_ring.start + c2dev->cur_tx; | |
640 | ||
641 | /* missing: Initialize MAC */ | |
642 | ||
643 | BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); | |
644 | ||
645 | /* Reset the adapter, ensures the driver is in sync with the RXP */ | |
646 | c2_reset(c2_port); | |
647 | ||
648 | /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */ | |
649 | for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count; | |
650 | i++, elem++) { | |
651 | rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data; | |
652 | rxp_hdr->flags = 0; | |
dc544bc9 | 653 | __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY), |
f94b533d TT |
654 | elem->hw_desc + C2_RXP_FLAGS); |
655 | } | |
656 | ||
657 | /* Enable network packets */ | |
658 | netif_start_queue(netdev); | |
659 | ||
660 | /* Enable IRQ */ | |
661 | writel(0, c2dev->regs + C2_IDIS); | |
662 | netimr0 = readl(c2dev->regs + C2_NIMR0); | |
663 | netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT); | |
664 | writel(netimr0, c2dev->regs + C2_NIMR0); | |
665 | ||
666 | /* Tell the stack to ignore arp requests for ipaddrs bound to | |
667 | * other interfaces. This is needed to prevent the host stack | |
668 | * from responding to arp requests to the ipaddr bound on the | |
669 | * rdma interface. | |
670 | */ | |
671 | in_dev = in_dev_get(netdev); | |
42f811b8 | 672 | IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1); |
f94b533d TT |
673 | in_dev_put(in_dev); |
674 | ||
675 | return 0; | |
676 | ||
677 | bail1: | |
678 | c2_rx_clean(c2_port); | |
679 | kfree(c2_port->rx_ring.start); | |
680 | ||
681 | bail0: | |
682 | pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, | |
683 | c2_port->dma); | |
684 | ||
685 | return ret; | |
686 | } | |
687 | ||
688 | static int c2_down(struct net_device *netdev) | |
689 | { | |
690 | struct c2_port *c2_port = netdev_priv(netdev); | |
691 | struct c2_dev *c2dev = c2_port->c2dev; | |
692 | ||
693 | if (netif_msg_ifdown(c2_port)) | |
694 | pr_debug("%s: disabling interface\n", | |
695 | netdev->name); | |
696 | ||
697 | /* Wait for all the queued packets to get sent */ | |
698 | c2_tx_interrupt(netdev); | |
699 | ||
700 | /* Disable network packets */ | |
701 | netif_stop_queue(netdev); | |
702 | ||
703 | /* Disable IRQs by clearing the interrupt mask */ | |
704 | writel(1, c2dev->regs + C2_IDIS); | |
705 | writel(0, c2dev->regs + C2_NIMR0); | |
706 | ||
707 | /* missing: Stop transmitter */ | |
708 | ||
709 | /* missing: Stop receiver */ | |
710 | ||
711 | /* Reset the adapter, ensures the driver is in sync with the RXP */ | |
712 | c2_reset(c2_port); | |
713 | ||
714 | /* missing: Turn off LEDs here */ | |
715 | ||
716 | /* Free all buffers in the host descriptor rings */ | |
717 | c2_tx_clean(c2_port); | |
718 | c2_rx_clean(c2_port); | |
719 | ||
720 | /* Free the host descriptor rings */ | |
721 | kfree(c2_port->rx_ring.start); | |
722 | kfree(c2_port->tx_ring.start); | |
723 | pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem, | |
724 | c2_port->dma); | |
725 | ||
726 | return 0; | |
727 | } | |
728 | ||
729 | static void c2_reset(struct c2_port *c2_port) | |
730 | { | |
731 | struct c2_dev *c2dev = c2_port->c2dev; | |
732 | unsigned int cur_rx = c2dev->cur_rx; | |
733 | ||
734 | /* Tell the hardware to quiesce */ | |
735 | C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI); | |
736 | ||
737 | /* | |
738 | * The hardware will reset the C2_PCI_HRX_QUI bit once | |
739 | * the RXP is quiesced. Wait 2 seconds for this. | |
740 | */ | |
741 | ssleep(2); | |
742 | ||
743 | cur_rx = C2_GET_CUR_RX(c2dev); | |
744 | ||
745 | if (cur_rx & C2_PCI_HRX_QUI) | |
746 | pr_debug("c2_reset: failed to quiesce the hardware!\n"); | |
747 | ||
748 | cur_rx &= ~C2_PCI_HRX_QUI; | |
749 | ||
750 | c2dev->cur_rx = cur_rx; | |
751 | ||
752 | pr_debug("Current RX: %u\n", c2dev->cur_rx); | |
753 | } | |
754 | ||
755 | static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |
756 | { | |
757 | struct c2_port *c2_port = netdev_priv(netdev); | |
758 | struct c2_dev *c2dev = c2_port->c2dev; | |
759 | struct c2_ring *tx_ring = &c2_port->tx_ring; | |
760 | struct c2_element *elem; | |
761 | dma_addr_t mapaddr; | |
762 | u32 maplen; | |
763 | unsigned long flags; | |
764 | unsigned int i; | |
765 | ||
766 | spin_lock_irqsave(&c2_port->tx_lock, flags); | |
767 | ||
768 | if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) { | |
769 | netif_stop_queue(netdev); | |
770 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | |
771 | ||
772 | pr_debug("%s: Tx ring full when queue awake!\n", | |
773 | netdev->name); | |
774 | return NETDEV_TX_BUSY; | |
775 | } | |
776 | ||
777 | maplen = skb_headlen(skb); | |
778 | mapaddr = | |
779 | pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE); | |
780 | ||
781 | elem = tx_ring->to_use; | |
782 | elem->skb = skb; | |
783 | elem->mapaddr = mapaddr; | |
784 | elem->maplen = maplen; | |
785 | ||
786 | /* Tell HW to xmit */ | |
dc544bc9 RD |
787 | __raw_writeq((__force u64) cpu_to_be64(mapaddr), |
788 | elem->hw_desc + C2_TXP_ADDR); | |
789 | __raw_writew((__force u16) cpu_to_be16(maplen), | |
790 | elem->hw_desc + C2_TXP_LEN); | |
791 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY), | |
792 | elem->hw_desc + C2_TXP_FLAGS); | |
f94b533d | 793 | |
687c75dc SH |
794 | netdev->stats.tx_packets++; |
795 | netdev->stats.tx_bytes += maplen; | |
f94b533d TT |
796 | |
797 | /* Loop thru additional data fragments and queue them */ | |
798 | if (skb_shinfo(skb)->nr_frags) { | |
799 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
800 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
801 | maplen = frag->size; | |
802 | mapaddr = | |
803 | pci_map_page(c2dev->pcidev, frag->page, | |
804 | frag->page_offset, maplen, | |
805 | PCI_DMA_TODEVICE); | |
806 | ||
807 | elem = elem->next; | |
808 | elem->skb = NULL; | |
809 | elem->mapaddr = mapaddr; | |
810 | elem->maplen = maplen; | |
811 | ||
812 | /* Tell HW to xmit */ | |
dc544bc9 | 813 | __raw_writeq((__force u64) cpu_to_be64(mapaddr), |
f94b533d | 814 | elem->hw_desc + C2_TXP_ADDR); |
dc544bc9 | 815 | __raw_writew((__force u16) cpu_to_be16(maplen), |
f94b533d | 816 | elem->hw_desc + C2_TXP_LEN); |
dc544bc9 | 817 | __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY), |
f94b533d TT |
818 | elem->hw_desc + C2_TXP_FLAGS); |
819 | ||
687c75dc SH |
820 | netdev->stats.tx_packets++; |
821 | netdev->stats.tx_bytes += maplen; | |
f94b533d TT |
822 | } |
823 | } | |
824 | ||
825 | tx_ring->to_use = elem->next; | |
826 | c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1); | |
827 | ||
828 | if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) { | |
829 | netif_stop_queue(netdev); | |
830 | if (netif_msg_tx_queued(c2_port)) | |
831 | pr_debug("%s: transmit queue full\n", | |
832 | netdev->name); | |
833 | } | |
834 | ||
835 | spin_unlock_irqrestore(&c2_port->tx_lock, flags); | |
836 | ||
837 | netdev->trans_start = jiffies; | |
838 | ||
839 | return NETDEV_TX_OK; | |
840 | } | |
841 | ||
f94b533d TT |
842 | static void c2_tx_timeout(struct net_device *netdev) |
843 | { | |
844 | struct c2_port *c2_port = netdev_priv(netdev); | |
845 | ||
846 | if (netif_msg_timer(c2_port)) | |
847 | pr_debug("%s: tx timeout\n", netdev->name); | |
848 | ||
849 | c2_tx_clean(c2_port); | |
850 | } | |
851 | ||
852 | static int c2_change_mtu(struct net_device *netdev, int new_mtu) | |
853 | { | |
854 | int ret = 0; | |
855 | ||
856 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | |
857 | return -EINVAL; | |
858 | ||
859 | netdev->mtu = new_mtu; | |
860 | ||
861 | if (netif_running(netdev)) { | |
862 | c2_down(netdev); | |
863 | ||
864 | c2_up(netdev); | |
865 | } | |
866 | ||
867 | return ret; | |
868 | } | |
869 | ||
687c75dc SH |
870 | static const struct net_device_ops c2_netdev = { |
871 | .ndo_open = c2_up, | |
872 | .ndo_stop = c2_down, | |
873 | .ndo_start_xmit = c2_xmit_frame, | |
874 | .ndo_tx_timeout = c2_tx_timeout, | |
875 | .ndo_change_mtu = c2_change_mtu, | |
876 | .ndo_set_mac_address = eth_mac_addr, | |
877 | .ndo_validate_addr = eth_validate_addr, | |
878 | }; | |
879 | ||
f94b533d TT |
880 | /* Initialize network device */ |
881 | static struct net_device *c2_devinit(struct c2_dev *c2dev, | |
882 | void __iomem * mmio_addr) | |
883 | { | |
884 | struct c2_port *c2_port = NULL; | |
885 | struct net_device *netdev = alloc_etherdev(sizeof(*c2_port)); | |
886 | ||
887 | if (!netdev) { | |
888 | pr_debug("c2_port etherdev alloc failed"); | |
889 | return NULL; | |
890 | } | |
891 | ||
f94b533d TT |
892 | SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev); |
893 | ||
687c75dc | 894 | netdev->netdev_ops = &c2_netdev; |
f94b533d TT |
895 | netdev->watchdog_timeo = C2_TX_TIMEOUT; |
896 | netdev->irq = c2dev->pcidev->irq; | |
897 | ||
898 | c2_port = netdev_priv(netdev); | |
899 | c2_port->netdev = netdev; | |
900 | c2_port->c2dev = c2dev; | |
901 | c2_port->msg_enable = netif_msg_init(debug, default_msg); | |
902 | c2_port->tx_ring.count = C2_NUM_TX_DESC; | |
903 | c2_port->rx_ring.count = C2_NUM_RX_DESC; | |
904 | ||
905 | spin_lock_init(&c2_port->tx_lock); | |
906 | ||
907 | /* Copy our 48-bit ethernet hardware address */ | |
908 | memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6); | |
909 | ||
910 | /* Validate the MAC address */ | |
911 | if (!is_valid_ether_addr(netdev->dev_addr)) { | |
912 | pr_debug("Invalid MAC Address\n"); | |
913 | c2_print_macaddr(netdev); | |
914 | free_netdev(netdev); | |
915 | return NULL; | |
916 | } | |
917 | ||
918 | c2dev->netdev = netdev; | |
919 | ||
920 | return netdev; | |
921 | } | |
922 | ||
923 | static int __devinit c2_probe(struct pci_dev *pcidev, | |
924 | const struct pci_device_id *ent) | |
925 | { | |
926 | int ret = 0, i; | |
927 | unsigned long reg0_start, reg0_flags, reg0_len; | |
928 | unsigned long reg2_start, reg2_flags, reg2_len; | |
929 | unsigned long reg4_start, reg4_flags, reg4_len; | |
930 | unsigned kva_map_size; | |
931 | struct net_device *netdev = NULL; | |
932 | struct c2_dev *c2dev = NULL; | |
933 | void __iomem *mmio_regs = NULL; | |
934 | ||
935 | printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n", | |
936 | DRV_VERSION); | |
937 | ||
938 | /* Enable PCI device */ | |
939 | ret = pci_enable_device(pcidev); | |
940 | if (ret) { | |
941 | printk(KERN_ERR PFX "%s: Unable to enable PCI device\n", | |
942 | pci_name(pcidev)); | |
943 | goto bail0; | |
944 | } | |
945 | ||
946 | reg0_start = pci_resource_start(pcidev, BAR_0); | |
947 | reg0_len = pci_resource_len(pcidev, BAR_0); | |
948 | reg0_flags = pci_resource_flags(pcidev, BAR_0); | |
949 | ||
950 | reg2_start = pci_resource_start(pcidev, BAR_2); | |
951 | reg2_len = pci_resource_len(pcidev, BAR_2); | |
952 | reg2_flags = pci_resource_flags(pcidev, BAR_2); | |
953 | ||
954 | reg4_start = pci_resource_start(pcidev, BAR_4); | |
955 | reg4_len = pci_resource_len(pcidev, BAR_4); | |
956 | reg4_flags = pci_resource_flags(pcidev, BAR_4); | |
957 | ||
958 | pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len); | |
959 | pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len); | |
960 | pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len); | |
961 | ||
962 | /* Make sure PCI base addr are MMIO */ | |
963 | if (!(reg0_flags & IORESOURCE_MEM) || | |
964 | !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) { | |
965 | printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); | |
966 | ret = -ENODEV; | |
967 | goto bail1; | |
968 | } | |
969 | ||
970 | /* Check for weird/broken PCI region reporting */ | |
971 | if ((reg0_len < C2_REG0_SIZE) || | |
972 | (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) { | |
973 | printk(KERN_ERR PFX "Invalid PCI region sizes\n"); | |
974 | ret = -ENODEV; | |
975 | goto bail1; | |
976 | } | |
977 | ||
978 | /* Reserve PCI I/O and memory resources */ | |
979 | ret = pci_request_regions(pcidev, DRV_NAME); | |
980 | if (ret) { | |
981 | printk(KERN_ERR PFX "%s: Unable to request regions\n", | |
982 | pci_name(pcidev)); | |
983 | goto bail1; | |
984 | } | |
985 | ||
986 | if ((sizeof(dma_addr_t) > 4)) { | |
6a35528a | 987 | ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); |
f94b533d TT |
988 | if (ret < 0) { |
989 | printk(KERN_ERR PFX "64b DMA configuration failed\n"); | |
990 | goto bail2; | |
991 | } | |
992 | } else { | |
284901a9 | 993 | ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); |
f94b533d TT |
994 | if (ret < 0) { |
995 | printk(KERN_ERR PFX "32b DMA configuration failed\n"); | |
996 | goto bail2; | |
997 | } | |
998 | } | |
999 | ||
1000 | /* Enables bus-mastering on the device */ | |
1001 | pci_set_master(pcidev); | |
1002 | ||
1003 | /* Remap the adapter PCI registers in BAR4 */ | |
1004 | mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, | |
1005 | sizeof(struct c2_adapter_pci_regs)); | |
4b290439 | 1006 | if (!mmio_regs) { |
f94b533d TT |
1007 | printk(KERN_ERR PFX |
1008 | "Unable to remap adapter PCI registers in BAR4\n"); | |
1009 | ret = -EIO; | |
1010 | goto bail2; | |
1011 | } | |
1012 | ||
1013 | /* Validate PCI regs magic */ | |
1014 | for (i = 0; i < sizeof(c2_magic); i++) { | |
1015 | if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) { | |
1016 | printk(KERN_ERR PFX "Downlevel Firmware boot loader " | |
1017 | "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash " | |
1018 | "utility to update your boot loader\n", | |
1019 | i + 1, sizeof(c2_magic), | |
1020 | readb(mmio_regs + C2_REGS_MAGIC + i), | |
1021 | c2_magic[i]); | |
1022 | printk(KERN_ERR PFX "Adapter not claimed\n"); | |
1023 | iounmap(mmio_regs); | |
1024 | ret = -EIO; | |
1025 | goto bail2; | |
1026 | } | |
1027 | } | |
1028 | ||
1029 | /* Validate the adapter version */ | |
dc544bc9 | 1030 | if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { |
f94b533d TT |
1031 | printk(KERN_ERR PFX "Version mismatch " |
1032 | "[fw=%u, c2=%u], Adapter not claimed\n", | |
dc544bc9 | 1033 | be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)), |
f94b533d TT |
1034 | C2_VERSION); |
1035 | ret = -EINVAL; | |
1036 | iounmap(mmio_regs); | |
1037 | goto bail2; | |
1038 | } | |
1039 | ||
1040 | /* Validate the adapter IVN */ | |
dc544bc9 | 1041 | if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { |
f94b533d TT |
1042 | printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " |
1043 | "the OpenIB device support kit. " | |
1044 | "[fw=0x%x, c2=0x%x], Adapter not claimed\n", | |
dc544bc9 RD |
1045 | be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)), |
1046 | C2_IVN); | |
f94b533d TT |
1047 | ret = -EINVAL; |
1048 | iounmap(mmio_regs); | |
1049 | goto bail2; | |
1050 | } | |
1051 | ||
1052 | /* Allocate hardware structure */ | |
1053 | c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev)); | |
1054 | if (!c2dev) { | |
1055 | printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", | |
1056 | pci_name(pcidev)); | |
1057 | ret = -ENOMEM; | |
1058 | iounmap(mmio_regs); | |
1059 | goto bail2; | |
1060 | } | |
1061 | ||
1062 | memset(c2dev, 0, sizeof(*c2dev)); | |
1063 | spin_lock_init(&c2dev->lock); | |
1064 | c2dev->pcidev = pcidev; | |
1065 | c2dev->cur_tx = 0; | |
1066 | ||
1067 | /* Get the last RX index */ | |
1068 | c2dev->cur_rx = | |
dc544bc9 | 1069 | (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) - |
f94b533d TT |
1070 | 0xffffc000) / sizeof(struct c2_rxp_desc); |
1071 | ||
1072 | /* Request an interrupt line for the driver */ | |
38515e90 | 1073 | ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev); |
f94b533d TT |
1074 | if (ret) { |
1075 | printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", | |
1076 | pci_name(pcidev), pcidev->irq); | |
1077 | iounmap(mmio_regs); | |
1078 | goto bail3; | |
1079 | } | |
1080 | ||
1081 | /* Set driver specific data */ | |
1082 | pci_set_drvdata(pcidev, c2dev); | |
1083 | ||
1084 | /* Initialize network device */ | |
1085 | if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { | |
1086 | iounmap(mmio_regs); | |
1087 | goto bail4; | |
1088 | } | |
1089 | ||
1090 | /* Save off the actual size prior to unmapping mmio_regs */ | |
dc544bc9 | 1091 | kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE)); |
f94b533d TT |
1092 | |
1093 | /* Unmap the adapter PCI registers in BAR4 */ | |
1094 | iounmap(mmio_regs); | |
1095 | ||
1096 | /* Register network device */ | |
1097 | ret = register_netdev(netdev); | |
1098 | if (ret) { | |
1099 | printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", | |
1100 | ret); | |
1101 | goto bail5; | |
1102 | } | |
1103 | ||
1104 | /* Disable network packets */ | |
1105 | netif_stop_queue(netdev); | |
1106 | ||
1107 | /* Remap the adapter HRXDQ PA space to kernel VA space */ | |
1108 | c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, | |
1109 | C2_RXP_HRXDQ_SIZE); | |
4b290439 | 1110 | if (!c2dev->mmio_rxp_ring) { |
f94b533d TT |
1111 | printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); |
1112 | ret = -EIO; | |
1113 | goto bail6; | |
1114 | } | |
1115 | ||
1116 | /* Remap the adapter HTXDQ PA space to kernel VA space */ | |
1117 | c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, | |
1118 | C2_TXP_HTXDQ_SIZE); | |
4b290439 | 1119 | if (!c2dev->mmio_txp_ring) { |
f94b533d TT |
1120 | printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); |
1121 | ret = -EIO; | |
1122 | goto bail7; | |
1123 | } | |
1124 | ||
1125 | /* Save off the current RX index in the last 4 bytes of the TXP Ring */ | |
1126 | C2_SET_CUR_RX(c2dev, c2dev->cur_rx); | |
1127 | ||
1128 | /* Remap the PCI registers in adapter BAR0 to kernel VA space */ | |
1129 | c2dev->regs = ioremap_nocache(reg0_start, reg0_len); | |
4b290439 | 1130 | if (!c2dev->regs) { |
f94b533d TT |
1131 | printk(KERN_ERR PFX "Unable to remap BAR0\n"); |
1132 | ret = -EIO; | |
1133 | goto bail8; | |
1134 | } | |
1135 | ||
1136 | /* Remap the PCI registers in adapter BAR4 to kernel VA space */ | |
1137 | c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; | |
1138 | c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, | |
1139 | kva_map_size); | |
4b290439 | 1140 | if (!c2dev->kva) { |
f94b533d TT |
1141 | printk(KERN_ERR PFX "Unable to remap BAR4\n"); |
1142 | ret = -EIO; | |
1143 | goto bail9; | |
1144 | } | |
1145 | ||
1146 | /* Print out the MAC address */ | |
1147 | c2_print_macaddr(netdev); | |
1148 | ||
1149 | ret = c2_rnic_init(c2dev); | |
1150 | if (ret) { | |
1151 | printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret); | |
1152 | goto bail10; | |
1153 | } | |
1154 | ||
2ffcab6a TT |
1155 | if (c2_register_device(c2dev)) |
1156 | goto bail10; | |
f94b533d TT |
1157 | |
1158 | return 0; | |
1159 | ||
1160 | bail10: | |
1161 | iounmap(c2dev->kva); | |
1162 | ||
1163 | bail9: | |
1164 | iounmap(c2dev->regs); | |
1165 | ||
1166 | bail8: | |
1167 | iounmap(c2dev->mmio_txp_ring); | |
1168 | ||
1169 | bail7: | |
1170 | iounmap(c2dev->mmio_rxp_ring); | |
1171 | ||
1172 | bail6: | |
1173 | unregister_netdev(netdev); | |
1174 | ||
1175 | bail5: | |
1176 | free_netdev(netdev); | |
1177 | ||
1178 | bail4: | |
1179 | free_irq(pcidev->irq, c2dev); | |
1180 | ||
1181 | bail3: | |
1182 | ib_dealloc_device(&c2dev->ibdev); | |
1183 | ||
1184 | bail2: | |
1185 | pci_release_regions(pcidev); | |
1186 | ||
1187 | bail1: | |
1188 | pci_disable_device(pcidev); | |
1189 | ||
1190 | bail0: | |
1191 | return ret; | |
1192 | } | |
1193 | ||
1194 | static void __devexit c2_remove(struct pci_dev *pcidev) | |
1195 | { | |
1196 | struct c2_dev *c2dev = pci_get_drvdata(pcidev); | |
1197 | struct net_device *netdev = c2dev->netdev; | |
1198 | ||
1199 | /* Unregister with OpenIB */ | |
1200 | c2_unregister_device(c2dev); | |
1201 | ||
1202 | /* Clean up the RNIC resources */ | |
1203 | c2_rnic_term(c2dev); | |
1204 | ||
1205 | /* Remove network device from the kernel */ | |
1206 | unregister_netdev(netdev); | |
1207 | ||
1208 | /* Free network device */ | |
1209 | free_netdev(netdev); | |
1210 | ||
1211 | /* Free the interrupt line */ | |
1212 | free_irq(pcidev->irq, c2dev); | |
1213 | ||
1214 | /* missing: Turn LEDs off here */ | |
1215 | ||
1216 | /* Unmap adapter PA space */ | |
1217 | iounmap(c2dev->kva); | |
1218 | iounmap(c2dev->regs); | |
1219 | iounmap(c2dev->mmio_txp_ring); | |
1220 | iounmap(c2dev->mmio_rxp_ring); | |
1221 | ||
1222 | /* Free the hardware structure */ | |
1223 | ib_dealloc_device(&c2dev->ibdev); | |
1224 | ||
1225 | /* Release reserved PCI I/O and memory resources */ | |
1226 | pci_release_regions(pcidev); | |
1227 | ||
1228 | /* Disable PCI device */ | |
1229 | pci_disable_device(pcidev); | |
1230 | ||
1231 | /* Clear driver specific data */ | |
1232 | pci_set_drvdata(pcidev, NULL); | |
1233 | } | |
1234 | ||
1235 | static struct pci_driver c2_pci_driver = { | |
1236 | .name = DRV_NAME, | |
1237 | .id_table = c2_pci_table, | |
1238 | .probe = c2_probe, | |
1239 | .remove = __devexit_p(c2_remove), | |
1240 | }; | |
1241 | ||
1242 | static int __init c2_init_module(void) | |
1243 | { | |
d986a274 | 1244 | return pci_register_driver(&c2_pci_driver); |
f94b533d TT |
1245 | } |
1246 | ||
1247 | static void __exit c2_exit_module(void) | |
1248 | { | |
1249 | pci_unregister_driver(&c2_pci_driver); | |
1250 | } | |
1251 | ||
1252 | module_init(c2_init_module); | |
1253 | module_exit(c2_exit_module); |