bna: Enable Multi Buffer RX
[linux-2.6-block.git] / drivers / net / ethernet / brocade / bna / bnad.c
CommitLineData
8b230ed8
RM
1/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
f859d7cb 18#include <linux/bitops.h>
8b230ed8
RM
19#include <linux/netdevice.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/in.h>
23#include <linux/ethtool.h>
24#include <linux/if_vlan.h>
25#include <linux/if_ether.h>
26#include <linux/ip.h>
70c71606 27#include <linux/prefetch.h>
9d9779e7 28#include <linux/module.h>
8b230ed8
RM
29
30#include "bnad.h"
31#include "bna.h"
32#include "cna.h"
33
b7ee31c5 34static DEFINE_MUTEX(bnad_fwimg_mutex);
8b230ed8
RM
35
36/*
37 * Module params
38 */
39static uint bnad_msix_disable;
40module_param(bnad_msix_disable, uint, 0444);
41MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
42
43static uint bnad_ioc_auto_recover = 1;
44module_param(bnad_ioc_auto_recover, uint, 0444);
45MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
46
7afc5dbd
KG
47static uint bna_debugfs_enable = 1;
48module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
50 " Range[false:0|true:1]");
51
8b230ed8
RM
52/*
53 * Global variables
54 */
482da0fa 55static u32 bnad_rxqs_per_cq = 2;
e1e0918f 56static u32 bna_id;
57static struct mutex bnad_list_mutex;
58static LIST_HEAD(bnad_list);
b7ee31c5 59static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8b230ed8
RM
60
61/*
62 * Local MACROS
63 */
8b230ed8
RM
64#define BNAD_GET_MBOX_IRQ(_bnad) \
65 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
8811e267 66 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
8b230ed8
RM
67 ((_bnad)->pcidev->irq))
68
5216562a 69#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
8b230ed8
RM
70do { \
71 (_res_info)->res_type = BNA_RES_T_MEM; \
72 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
73 (_res_info)->res_u.mem_info.num = (_num); \
5216562a 74 (_res_info)->res_u.mem_info.len = (_size); \
8b230ed8
RM
75} while (0)
76
72a9730b
KG
77static void
78bnad_add_to_list(struct bnad *bnad)
79{
80 mutex_lock(&bnad_list_mutex);
81 list_add_tail(&bnad->list_entry, &bnad_list);
82 bnad->id = bna_id++;
83 mutex_unlock(&bnad_list_mutex);
84}
85
86static void
87bnad_remove_from_list(struct bnad *bnad)
88{
89 mutex_lock(&bnad_list_mutex);
90 list_del(&bnad->list_entry);
91 mutex_unlock(&bnad_list_mutex);
92}
93
8b230ed8
RM
94/*
95 * Reinitialize completions in CQ, once Rx is taken down
96 */
97static void
b3cc6e88 98bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
8b230ed8 99{
5216562a 100 struct bna_cq_entry *cmpl;
8b230ed8
RM
101 int i;
102
8b230ed8 103 for (i = 0; i < ccb->q_depth; i++) {
5216562a 104 cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
8b230ed8 105 cmpl->valid = 0;
8b230ed8
RM
106 }
107}
108
5216562a
RM
109/* Tx Datapath functions */
110
111
112/* Caller should ensure that the entry at unmap_q[index] is valid */
271e8b79 113static u32
5216562a
RM
114bnad_tx_buff_unmap(struct bnad *bnad,
115 struct bnad_tx_unmap *unmap_q,
116 u32 q_depth, u32 index)
271e8b79 117{
5216562a
RM
118 struct bnad_tx_unmap *unmap;
119 struct sk_buff *skb;
120 int vector, nvecs;
121
122 unmap = &unmap_q[index];
123 nvecs = unmap->nvecs;
124
125 skb = unmap->skb;
126 unmap->skb = NULL;
127 unmap->nvecs = 0;
128 dma_unmap_single(&bnad->pcidev->dev,
129 dma_unmap_addr(&unmap->vectors[0], dma_addr),
130 skb_headlen(skb), DMA_TO_DEVICE);
131 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
132 nvecs--;
133
134 vector = 0;
135 while (nvecs) {
136 vector++;
137 if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
138 vector = 0;
139 BNA_QE_INDX_INC(index, q_depth);
140 unmap = &unmap_q[index];
141 }
271e8b79 142
5216562a
RM
143 dma_unmap_page(&bnad->pcidev->dev,
144 dma_unmap_addr(&unmap->vectors[vector], dma_addr),
145 skb_shinfo(skb)->frags[nvecs].size, DMA_TO_DEVICE);
146 dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
147 nvecs--;
271e8b79
RM
148 }
149
5216562a
RM
150 BNA_QE_INDX_INC(index, q_depth);
151
271e8b79
RM
152 return index;
153}
154
8b230ed8
RM
155/*
156 * Frees all pending Tx Bufs
157 * At this point no activity is expected on the Q,
158 * so DMA unmap & freeing is fine.
159 */
160static void
5216562a 161bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8 162{
5216562a
RM
163 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164 struct sk_buff *skb;
165 int i;
8b230ed8 166
5216562a
RM
167 for (i = 0; i < tcb->q_depth; i++) {
168 skb = unmap_q[i].skb;
938fa488 169 if (!skb)
8b230ed8 170 continue;
5216562a 171 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
938fa488 172
8b230ed8
RM
173 dev_kfree_skb_any(skb);
174 }
175}
176
8b230ed8 177/*
b3cc6e88 178 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
8b230ed8
RM
179 * Can be called in a) Interrupt context
180 * b) Sending context
8b230ed8
RM
181 */
182static u32
5216562a 183bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8 184{
5216562a
RM
185 u32 sent_packets = 0, sent_bytes = 0;
186 u32 wis, unmap_wis, hw_cons, cons, q_depth;
187 struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
188 struct bnad_tx_unmap *unmap;
189 struct sk_buff *skb;
8b230ed8 190
d95d1081 191 /* Just return if TX is stopped */
be7fa326 192 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
8b230ed8
RM
193 return 0;
194
5216562a
RM
195 hw_cons = *(tcb->hw_consumer_index);
196 cons = tcb->consumer_index;
197 q_depth = tcb->q_depth;
8b230ed8 198
5216562a 199 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
8b230ed8
RM
200 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
201
8b230ed8 202 while (wis) {
5216562a
RM
203 unmap = &unmap_q[cons];
204
205 skb = unmap->skb;
8b230ed8 206
8b230ed8
RM
207 sent_packets++;
208 sent_bytes += skb->len;
8b230ed8 209
5216562a
RM
210 unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
211 wis -= unmap_wis;
8b230ed8 212
5216562a 213 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
8b230ed8
RM
214 dev_kfree_skb_any(skb);
215 }
216
217 /* Update consumer pointers. */
5216562a 218 tcb->consumer_index = hw_cons;
8b230ed8
RM
219
220 tcb->txq->tx_packets += sent_packets;
221 tcb->txq->tx_bytes += sent_bytes;
222
223 return sent_packets;
224}
225
8b230ed8 226static u32
b3cc6e88 227bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
8b230ed8
RM
228{
229 struct net_device *netdev = bnad->netdev;
be7fa326 230 u32 sent = 0;
8b230ed8
RM
231
232 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
233 return 0;
234
b3cc6e88 235 sent = bnad_txcmpl_process(bnad, tcb);
8b230ed8
RM
236 if (sent) {
237 if (netif_queue_stopped(netdev) &&
238 netif_carrier_ok(netdev) &&
239 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
240 BNAD_NETIF_WAKE_THRESHOLD) {
be7fa326
RM
241 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
242 netif_wake_queue(netdev);
243 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
244 }
8b230ed8 245 }
be7fa326
RM
246 }
247
248 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
8b230ed8 249 bna_ib_ack(tcb->i_dbell, sent);
8b230ed8
RM
250
251 smp_mb__before_clear_bit();
252 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
253
254 return sent;
255}
256
257/* MSIX Tx Completion Handler */
258static irqreturn_t
259bnad_msix_tx(int irq, void *data)
260{
261 struct bna_tcb *tcb = (struct bna_tcb *)data;
262 struct bnad *bnad = tcb->bnad;
263
b3cc6e88 264 bnad_tx_complete(bnad, tcb);
8b230ed8
RM
265
266 return IRQ_HANDLED;
267}
268
30f9fc94
RM
269static inline void
270bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
271{
272 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
273
274 unmap_q->reuse_pi = -1;
275 unmap_q->alloc_order = -1;
276 unmap_q->map_size = 0;
277 unmap_q->type = BNAD_RXBUF_NONE;
278}
279
280/* Default is page-based allocation. Multi-buffer support - TBD */
281static int
282bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283{
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
e29aa339 285 int order;
30f9fc94
RM
286
287 bnad_rxq_alloc_uninit(bnad, rcb);
288
e29aa339
RM
289 order = get_order(rcb->rxq->buffer_size);
290
291 unmap_q->type = BNAD_RXBUF_PAGE;
30f9fc94
RM
292
293 if (bna_is_small_rxq(rcb->id)) {
294 unmap_q->alloc_order = 0;
295 unmap_q->map_size = rcb->rxq->buffer_size;
296 } else {
e29aa339
RM
297 if (rcb->rxq->multi_buffer) {
298 unmap_q->alloc_order = 0;
299 unmap_q->map_size = rcb->rxq->buffer_size;
300 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
301 } else {
302 unmap_q->alloc_order = order;
303 unmap_q->map_size =
304 (rcb->rxq->buffer_size > 2048) ?
305 PAGE_SIZE << order : 2048;
306 }
30f9fc94
RM
307 }
308
309 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
310
30f9fc94
RM
311 return 0;
312}
313
314static inline void
315bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
316{
317 if (!unmap->page)
318 return;
319
320 dma_unmap_page(&bnad->pcidev->dev,
321 dma_unmap_addr(&unmap->vector, dma_addr),
322 unmap->vector.len, DMA_FROM_DEVICE);
323 put_page(unmap->page);
324 unmap->page = NULL;
325 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
326 unmap->vector.len = 0;
327}
328
329static inline void
330bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
331{
332 if (!unmap->skb)
333 return;
334
335 dma_unmap_single(&bnad->pcidev->dev,
336 dma_unmap_addr(&unmap->vector, dma_addr),
337 unmap->vector.len, DMA_FROM_DEVICE);
338 dev_kfree_skb_any(unmap->skb);
339 unmap->skb = NULL;
340 dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
341 unmap->vector.len = 0;
342}
343
8b230ed8 344static void
b3cc6e88 345bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
8b230ed8 346{
30f9fc94 347 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
5216562a
RM
348 int i;
349
350 for (i = 0; i < rcb->q_depth; i++) {
30f9fc94 351 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
8b230ed8 352
e29aa339 353 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
30f9fc94 354 bnad_rxq_cleanup_skb(bnad, unmap);
e29aa339
RM
355 else
356 bnad_rxq_cleanup_page(bnad, unmap);
30f9fc94
RM
357 }
358 bnad_rxq_alloc_uninit(bnad, rcb);
359}
5216562a 360
30f9fc94
RM
361static u32
362bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
363{
364 u32 alloced, prod, q_depth;
365 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
366 struct bnad_rx_unmap *unmap, *prev;
367 struct bna_rxq_entry *rxent;
368 struct page *page;
369 u32 page_offset, alloc_size;
370 dma_addr_t dma_addr;
371
372 prod = rcb->producer_index;
373 q_depth = rcb->q_depth;
374
375 alloc_size = PAGE_SIZE << unmap_q->alloc_order;
376 alloced = 0;
377
378 while (nalloc--) {
379 unmap = &unmap_q->unmap[prod];
380
381 if (unmap_q->reuse_pi < 0) {
382 page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
383 unmap_q->alloc_order);
384 page_offset = 0;
385 } else {
386 prev = &unmap_q->unmap[unmap_q->reuse_pi];
387 page = prev->page;
388 page_offset = prev->page_offset + unmap_q->map_size;
389 get_page(page);
390 }
391
392 if (unlikely(!page)) {
393 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
394 rcb->rxq->rxbuf_alloc_failed++;
395 goto finishing;
396 }
397
398 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
399 unmap_q->map_size, DMA_FROM_DEVICE);
400
401 unmap->page = page;
402 unmap->page_offset = page_offset;
403 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
404 unmap->vector.len = unmap_q->map_size;
405 page_offset += unmap_q->map_size;
406
407 if (page_offset < alloc_size)
408 unmap_q->reuse_pi = prod;
409 else
410 unmap_q->reuse_pi = -1;
411
412 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
413 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
414 BNA_QE_INDX_INC(prod, q_depth);
415 alloced++;
416 }
417
418finishing:
419 if (likely(alloced)) {
420 rcb->producer_index = prod;
421 smp_mb();
422 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
423 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 424 }
30f9fc94
RM
425
426 return alloced;
8b230ed8
RM
427}
428
30f9fc94
RM
429static u32
430bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
8b230ed8 431{
30f9fc94
RM
432 u32 alloced, prod, q_depth, buff_sz;
433 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
5216562a 434 struct bnad_rx_unmap *unmap;
8b230ed8
RM
435 struct bna_rxq_entry *rxent;
436 struct sk_buff *skb;
437 dma_addr_t dma_addr;
438
5216562a 439 buff_sz = rcb->rxq->buffer_size;
5216562a
RM
440 prod = rcb->producer_index;
441 q_depth = rcb->q_depth;
8b230ed8 442
30f9fc94
RM
443 alloced = 0;
444 while (nalloc--) {
445 unmap = &unmap_q->unmap[prod];
446
447 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
448
8b230ed8
RM
449 if (unlikely(!skb)) {
450 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
3caa1e95 451 rcb->rxq->rxbuf_alloc_failed++;
8b230ed8
RM
452 goto finishing;
453 }
5ea74318 454 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
5216562a 455 buff_sz, DMA_FROM_DEVICE);
8b230ed8 456
5216562a
RM
457 unmap->skb = skb;
458 dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
459 unmap->vector.len = buff_sz;
30f9fc94
RM
460
461 rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
462 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
5216562a 463 BNA_QE_INDX_INC(prod, q_depth);
8b230ed8
RM
464 alloced++;
465 }
466
467finishing:
468 if (likely(alloced)) {
5216562a 469 rcb->producer_index = prod;
8b230ed8 470 smp_mb();
5bcf6ac0 471 if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
be7fa326 472 bna_rxq_prod_indx_doorbell(rcb);
8b230ed8 473 }
30f9fc94
RM
474
475 return alloced;
476}
477
478static inline void
479bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
480{
481 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
482 u32 to_alloc;
483
484 to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
485 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
486 return;
487
e29aa339 488 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
30f9fc94 489 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
e29aa339
RM
490 else
491 bnad_rxq_refill_page(bnad, rcb, to_alloc);
8b230ed8
RM
492}
493
5e46631f
RM
494#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
495 BNA_CQ_EF_IPV6 | \
496 BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
497 BNA_CQ_EF_L4_CKSUM_OK)
498
499#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
500 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
501#define flags_tcp6 (BNA_CQ_EF_IPV6 | \
502 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
503#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
504 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
505#define flags_udp6 (BNA_CQ_EF_IPV6 | \
506 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
507
e29aa339
RM
508static void
509bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
510 u32 sop_ci, u32 nvecs)
30f9fc94 511{
e29aa339
RM
512 struct bnad_rx_unmap_q *unmap_q;
513 struct bnad_rx_unmap *unmap;
514 u32 ci, vec;
30f9fc94 515
e29aa339
RM
516 unmap_q = rcb->unmap_q;
517 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
518 unmap = &unmap_q->unmap[ci];
519 BNA_QE_INDX_INC(ci, rcb->q_depth);
520
521 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
522 bnad_rxq_cleanup_skb(bnad, unmap);
523 else
524 bnad_rxq_cleanup_page(bnad, unmap);
525 }
526}
527
528static void
529bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
530 u32 sop_ci, u32 nvecs, u32 last_fraglen)
531{
532 struct bnad *bnad;
533 u32 ci, vec, len, totlen = 0;
534 struct bnad_rx_unmap_q *unmap_q;
535 struct bnad_rx_unmap *unmap;
536
537 unmap_q = rcb->unmap_q;
538 bnad = rcb->bnad;
539 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
540 unmap = &unmap_q->unmap[ci];
541 BNA_QE_INDX_INC(ci, rcb->q_depth);
30f9fc94
RM
542
543 dma_unmap_page(&bnad->pcidev->dev,
544 dma_unmap_addr(&unmap->vector, dma_addr),
545 unmap->vector.len, DMA_FROM_DEVICE);
e29aa339
RM
546
547 len = (vec == nvecs) ?
548 last_fraglen : unmap->vector.len;
549 totlen += len;
550
30f9fc94 551 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
e29aa339 552 unmap->page, unmap->page_offset, len);
30f9fc94
RM
553
554 unmap->page = NULL;
555 unmap->vector.len = 0;
30f9fc94
RM
556 }
557
e29aa339
RM
558 skb->len += totlen;
559 skb->data_len += totlen;
560 skb->truesize += totlen;
561}
562
563static inline void
564bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
565 struct bnad_rx_unmap *unmap, u32 len)
566{
567 prefetch(skb->data);
30f9fc94
RM
568
569 dma_unmap_single(&bnad->pcidev->dev,
570 dma_unmap_addr(&unmap->vector, dma_addr),
571 unmap->vector.len, DMA_FROM_DEVICE);
572
e29aa339 573 skb_put(skb, len);
30f9fc94
RM
574 skb->protocol = eth_type_trans(skb, bnad->netdev);
575
576 unmap->skb = NULL;
577 unmap->vector.len = 0;
30f9fc94
RM
578}
579
8b230ed8 580static u32
b3cc6e88 581bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
8b230ed8 582{
e29aa339 583 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
8b230ed8 584 struct bna_rcb *rcb = NULL;
30f9fc94 585 struct bnad_rx_unmap_q *unmap_q;
e29aa339
RM
586 struct bnad_rx_unmap *unmap = NULL;
587 struct sk_buff *skb = NULL;
8b230ed8 588 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
30f9fc94 589 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
e29aa339
RM
590 u32 packets = 0, len = 0, totlen = 0;
591 u32 pi, vec, sop_ci = 0, nvecs = 0;
592 u32 flags, masked_flags;
078086f3 593
8b230ed8 594 prefetch(bnad->netdev);
5216562a
RM
595
596 cq = ccb->sw_q;
597 cmpl = &cq[ccb->producer_index];
598
599 while (cmpl->valid && (packets < budget)) {
8b230ed8
RM
600 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
601
078086f3 602 if (bna_is_small_rxq(cmpl->rxq_id))
8b230ed8 603 rcb = ccb->rcb[1];
078086f3
RM
604 else
605 rcb = ccb->rcb[0];
8b230ed8
RM
606
607 unmap_q = rcb->unmap_q;
608
e29aa339
RM
609 /* start of packet ci */
610 sop_ci = rcb->consumer_index;
611
612 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
613 unmap = &unmap_q->unmap[sop_ci];
614 skb = unmap->skb;
615 } else {
616 skb = napi_get_frags(&rx_ctrl->napi);
617 if (unlikely(!skb))
618 break;
619 }
620 prefetch(skb);
621
622 flags = ntohl(cmpl->flags);
623 len = ntohs(cmpl->length);
624 totlen = len;
625 nvecs = 1;
626
627 /* Check all the completions for this frame.
628 * busy-wait doesn't help much, break here.
629 */
630 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
631 (flags & BNA_CQ_EF_EOP) == 0) {
632 pi = ccb->producer_index;
633 do {
634 BNA_QE_INDX_INC(pi, ccb->q_depth);
635 next_cmpl = &cq[pi];
636
637 if (!next_cmpl->valid)
638 break;
5216562a 639
e29aa339
RM
640 len = ntohs(next_cmpl->length);
641 flags = ntohl(next_cmpl->flags);
642
643 nvecs++;
644 totlen += len;
645 } while ((flags & BNA_CQ_EF_EOP) == 0);
646
647 if (!next_cmpl->valid)
648 break;
649 }
650
651 /* TODO: BNA_CQ_EF_LOCAL ? */
652 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
653 BNA_CQ_EF_FCS_ERROR |
654 BNA_CQ_EF_TOO_LONG))) {
655 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
8b230ed8 656 rcb->rxq->rx_packets_with_error++;
e29aa339 657
8b230ed8
RM
658 goto next;
659 }
660
e29aa339
RM
661 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
662 bnad_cq_setup_skb(bnad, skb, unmap, len);
663 else
664 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
30f9fc94 665
e29aa339
RM
666 packets++;
667 rcb->rxq->rx_packets++;
668 rcb->rxq->rx_bytes += totlen;
669 ccb->bytes_per_intr += totlen;
5e46631f
RM
670
671 masked_flags = flags & flags_cksum_prot_mask;
672
8b230ed8 673 if (likely
e5ee20e7 674 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
5e46631f
RM
675 ((masked_flags == flags_tcp4) ||
676 (masked_flags == flags_udp4) ||
677 (masked_flags == flags_tcp6) ||
678 (masked_flags == flags_udp6))))
8b230ed8
RM
679 skb->ip_summed = CHECKSUM_UNNECESSARY;
680 else
bc8acf2c 681 skb_checksum_none_assert(skb);
8b230ed8 682
f859d7cb 683 if (flags & BNA_CQ_EF_VLAN)
86a9bad3 684 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
f859d7cb 685
e29aa339 686 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
f859d7cb 687 netif_receive_skb(skb);
e29aa339
RM
688 else
689 napi_gro_frags(&rx_ctrl->napi);
8b230ed8
RM
690
691next:
e29aa339
RM
692 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
693 for (vec = 0; vec < nvecs; vec++) {
694 cmpl = &cq[ccb->producer_index];
695 cmpl->valid = 0;
696 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
697 }
30f9fc94 698 cmpl = &cq[ccb->producer_index];
8b230ed8
RM
699 }
700
30f9fc94 701 napi_gro_flush(&rx_ctrl->napi, false);
2be67144 702 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
271e8b79
RM
703 bna_ib_ack_disable_irq(ccb->i_dbell, packets);
704
5216562a 705 bnad_rxq_post(bnad, ccb->rcb[0]);
2be67144 706 if (ccb->rcb[1])
5216562a 707 bnad_rxq_post(bnad, ccb->rcb[1]);
078086f3 708
8b230ed8
RM
709 return packets;
710}
711
8b230ed8
RM
712static void
713bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
714{
715 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
be7fa326
RM
716 struct napi_struct *napi = &rx_ctrl->napi;
717
718 if (likely(napi_schedule_prep(napi))) {
be7fa326 719 __napi_schedule(napi);
271e8b79 720 rx_ctrl->rx_schedule++;
8b230ed8 721 }
8b230ed8
RM
722}
723
724/* MSIX Rx Path Handler */
725static irqreturn_t
726bnad_msix_rx(int irq, void *data)
727{
728 struct bna_ccb *ccb = (struct bna_ccb *)data;
8b230ed8 729
271e8b79
RM
730 if (ccb) {
731 ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++;
2be67144 732 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
271e8b79 733 }
8b230ed8
RM
734
735 return IRQ_HANDLED;
736}
737
738/* Interrupt handlers */
739
740/* Mbox Interrupt Handlers */
741static irqreturn_t
742bnad_msix_mbox_handler(int irq, void *data)
743{
744 u32 intr_status;
e2fa6f2e 745 unsigned long flags;
be7fa326 746 struct bnad *bnad = (struct bnad *)data;
8b230ed8 747
8b230ed8 748 spin_lock_irqsave(&bnad->bna_lock, flags);
dfee325a
RM
749 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
750 spin_unlock_irqrestore(&bnad->bna_lock, flags);
751 return IRQ_HANDLED;
752 }
8b230ed8
RM
753
754 bna_intr_status_get(&bnad->bna, intr_status);
755
078086f3 756 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8
RM
757 bna_mbox_handler(&bnad->bna, intr_status);
758
759 spin_unlock_irqrestore(&bnad->bna_lock, flags);
760
8b230ed8
RM
761 return IRQ_HANDLED;
762}
763
764static irqreturn_t
765bnad_isr(int irq, void *data)
766{
767 int i, j;
768 u32 intr_status;
769 unsigned long flags;
be7fa326 770 struct bnad *bnad = (struct bnad *)data;
8b230ed8
RM
771 struct bnad_rx_info *rx_info;
772 struct bnad_rx_ctrl *rx_ctrl;
078086f3 773 struct bna_tcb *tcb = NULL;
8b230ed8 774
dfee325a
RM
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
e2fa6f2e 778 return IRQ_NONE;
dfee325a 779 }
8b230ed8
RM
780
781 bna_intr_status_get(&bnad->bna, intr_status);
e2fa6f2e 782
dfee325a
RM
783 if (unlikely(!intr_status)) {
784 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 785 return IRQ_NONE;
dfee325a 786 }
8b230ed8 787
078086f3 788 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
8b230ed8 789 bna_mbox_handler(&bnad->bna, intr_status);
be7fa326 790
8b230ed8
RM
791 spin_unlock_irqrestore(&bnad->bna_lock, flags);
792
be7fa326
RM
793 if (!BNA_IS_INTX_DATA_INTR(intr_status))
794 return IRQ_HANDLED;
795
8b230ed8 796 /* Process data interrupts */
be7fa326
RM
797 /* Tx processing */
798 for (i = 0; i < bnad->num_tx; i++) {
078086f3
RM
799 for (j = 0; j < bnad->num_txq_per_tx; j++) {
800 tcb = bnad->tx_info[i].tcb[j];
801 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
b3cc6e88 802 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
078086f3 803 }
be7fa326
RM
804 }
805 /* Rx processing */
8b230ed8
RM
806 for (i = 0; i < bnad->num_rx; i++) {
807 rx_info = &bnad->rx_info[i];
808 if (!rx_info->rx)
809 continue;
810 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
811 rx_ctrl = &rx_info->rx_ctrl[j];
812 if (rx_ctrl->ccb)
813 bnad_netif_rx_schedule_poll(bnad,
814 rx_ctrl->ccb);
815 }
816 }
8b230ed8
RM
817 return IRQ_HANDLED;
818}
819
820/*
821 * Called in interrupt / callback context
822 * with bna_lock held, so cfg_flags access is OK
823 */
824static void
825bnad_enable_mbox_irq(struct bnad *bnad)
826{
be7fa326 827 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
e2fa6f2e 828
8b230ed8
RM
829 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
830}
831
832/*
833 * Called with bnad->bna_lock held b'cos of
834 * bnad->cfg_flags access.
835 */
b7ee31c5 836static void
8b230ed8
RM
837bnad_disable_mbox_irq(struct bnad *bnad)
838{
be7fa326 839 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
8b230ed8 840
be7fa326
RM
841 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
842}
8b230ed8 843
be7fa326
RM
844static void
845bnad_set_netdev_perm_addr(struct bnad *bnad)
846{
847 struct net_device *netdev = bnad->netdev;
e2fa6f2e 848
be7fa326
RM
849 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
850 if (is_zero_ether_addr(netdev->dev_addr))
851 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
8b230ed8
RM
852}
853
854/* Control Path Handlers */
855
856/* Callbacks */
857void
078086f3 858bnad_cb_mbox_intr_enable(struct bnad *bnad)
8b230ed8
RM
859{
860 bnad_enable_mbox_irq(bnad);
861}
862
863void
078086f3 864bnad_cb_mbox_intr_disable(struct bnad *bnad)
8b230ed8
RM
865{
866 bnad_disable_mbox_irq(bnad);
867}
868
869void
078086f3
RM
870bnad_cb_ioceth_ready(struct bnad *bnad)
871{
872 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
873 complete(&bnad->bnad_completions.ioc_comp);
874}
875
876void
877bnad_cb_ioceth_failed(struct bnad *bnad)
8b230ed8 878{
078086f3 879 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
8b230ed8 880 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
881}
882
883void
078086f3 884bnad_cb_ioceth_disabled(struct bnad *bnad)
8b230ed8 885{
078086f3 886 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
8b230ed8 887 complete(&bnad->bnad_completions.ioc_comp);
8b230ed8
RM
888}
889
890static void
078086f3 891bnad_cb_enet_disabled(void *arg)
8b230ed8
RM
892{
893 struct bnad *bnad = (struct bnad *)arg;
894
8b230ed8 895 netif_carrier_off(bnad->netdev);
078086f3 896 complete(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
897}
898
899void
078086f3 900bnad_cb_ethport_link_status(struct bnad *bnad,
8b230ed8
RM
901 enum bna_link_status link_status)
902{
3db1cd5c 903 bool link_up = false;
8b230ed8
RM
904
905 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
906
907 if (link_status == BNA_CEE_UP) {
078086f3
RM
908 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
909 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 910 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3
RM
911 } else {
912 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
913 BNAD_UPDATE_CTR(bnad, cee_toggle);
8b230ed8 914 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
078086f3 915 }
8b230ed8
RM
916
917 if (link_up) {
918 if (!netif_carrier_ok(bnad->netdev)) {
078086f3
RM
919 uint tx_id, tcb_id;
920 printk(KERN_WARNING "bna: %s link up\n",
8b230ed8
RM
921 bnad->netdev->name);
922 netif_carrier_on(bnad->netdev);
923 BNAD_UPDATE_CTR(bnad, link_toggle);
078086f3
RM
924 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
925 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
926 tcb_id++) {
927 struct bna_tcb *tcb =
928 bnad->tx_info[tx_id].tcb[tcb_id];
929 u32 txq_id;
930 if (!tcb)
931 continue;
932
933 txq_id = tcb->id;
934
935 if (test_bit(BNAD_TXQ_TX_STARTED,
936 &tcb->flags)) {
937 /*
938 * Force an immediate
939 * Transmit Schedule */
940 printk(KERN_INFO "bna: %s %d "
941 "TXQ_STARTED\n",
942 bnad->netdev->name,
943 txq_id);
944 netif_wake_subqueue(
945 bnad->netdev,
946 txq_id);
947 BNAD_UPDATE_CTR(bnad,
948 netif_queue_wakeup);
949 } else {
950 netif_stop_subqueue(
951 bnad->netdev,
952 txq_id);
953 BNAD_UPDATE_CTR(bnad,
954 netif_queue_stop);
955 }
956 }
8b230ed8
RM
957 }
958 }
959 } else {
960 if (netif_carrier_ok(bnad->netdev)) {
078086f3 961 printk(KERN_WARNING "bna: %s link down\n",
8b230ed8
RM
962 bnad->netdev->name);
963 netif_carrier_off(bnad->netdev);
964 BNAD_UPDATE_CTR(bnad, link_toggle);
965 }
966 }
967}
968
969static void
078086f3 970bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
8b230ed8
RM
971{
972 struct bnad *bnad = (struct bnad *)arg;
973
974 complete(&bnad->bnad_completions.tx_comp);
975}
976
977static void
978bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
979{
980 struct bnad_tx_info *tx_info =
981 (struct bnad_tx_info *)tcb->txq->tx->priv;
8b230ed8 982
5216562a 983 tcb->priv = tcb;
8b230ed8 984 tx_info->tcb[tcb->id] = tcb;
8b230ed8
RM
985}
986
987static void
988bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
989{
990 struct bnad_tx_info *tx_info =
991 (struct bnad_tx_info *)tcb->txq->tx->priv;
992
993 tx_info->tcb[tcb->id] = NULL;
01b54b14 994 tcb->priv = NULL;
8b230ed8
RM
995}
996
8b230ed8
RM
997static void
998bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
999{
1000 struct bnad_rx_info *rx_info =
1001 (struct bnad_rx_info *)ccb->cq->rx->priv;
1002
1003 rx_info->rx_ctrl[ccb->id].ccb = ccb;
1004 ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1005}
1006
1007static void
1008bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1009{
1010 struct bnad_rx_info *rx_info =
1011 (struct bnad_rx_info *)ccb->cq->rx->priv;
1012
1013 rx_info->rx_ctrl[ccb->id].ccb = NULL;
1014}
1015
1016static void
078086f3 1017bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
8b230ed8
RM
1018{
1019 struct bnad_tx_info *tx_info =
078086f3
RM
1020 (struct bnad_tx_info *)tx->priv;
1021 struct bna_tcb *tcb;
1022 u32 txq_id;
1023 int i;
8b230ed8 1024
078086f3
RM
1025 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1026 tcb = tx_info->tcb[i];
1027 if (!tcb)
1028 continue;
1029 txq_id = tcb->id;
1030 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1031 netif_stop_subqueue(bnad->netdev, txq_id);
1032 printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n",
1033 bnad->netdev->name, txq_id);
1034 }
8b230ed8
RM
1035}
1036
1037static void
078086f3 1038bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
8b230ed8 1039{
078086f3
RM
1040 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1041 struct bna_tcb *tcb;
078086f3
RM
1042 u32 txq_id;
1043 int i;
8b230ed8 1044
078086f3
RM
1045 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1046 tcb = tx_info->tcb[i];
1047 if (!tcb)
1048 continue;
1049 txq_id = tcb->id;
8b230ed8 1050
01b54b14 1051 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
078086f3 1052 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
01b54b14 1053 BUG_ON(*(tcb->hw_consumer_index) != 0);
078086f3
RM
1054
1055 if (netif_carrier_ok(bnad->netdev)) {
1056 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
1057 bnad->netdev->name, txq_id);
1058 netif_wake_subqueue(bnad->netdev, txq_id);
1059 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1060 }
1061 }
be7fa326
RM
1062
1063 /*
078086f3 1064 * Workaround for first ioceth enable failure & we
be7fa326
RM
1065 * get a 0 MAC address. We try to get the MAC address
1066 * again here.
1067 */
1068 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
078086f3 1069 bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr);
be7fa326
RM
1070 bnad_set_netdev_perm_addr(bnad);
1071 }
be7fa326
RM
1072}
1073
01b54b14
JH
1074/*
1075 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1076 */
1077static void
1078bnad_tx_cleanup(struct delayed_work *work)
1079{
1080 struct bnad_tx_info *tx_info =
1081 container_of(work, struct bnad_tx_info, tx_cleanup_work);
1082 struct bnad *bnad = NULL;
01b54b14
JH
1083 struct bna_tcb *tcb;
1084 unsigned long flags;
5216562a 1085 u32 i, pending = 0;
01b54b14
JH
1086
1087 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1088 tcb = tx_info->tcb[i];
1089 if (!tcb)
1090 continue;
1091
1092 bnad = tcb->bnad;
1093
1094 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1095 pending++;
1096 continue;
1097 }
1098
b3cc6e88 1099 bnad_txq_cleanup(bnad, tcb);
01b54b14 1100
01b54b14
JH
1101 smp_mb__before_clear_bit();
1102 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1103 }
1104
1105 if (pending) {
1106 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1107 msecs_to_jiffies(1));
1108 return;
1109 }
1110
1111 spin_lock_irqsave(&bnad->bna_lock, flags);
1112 bna_tx_cleanup_complete(tx_info->tx);
1113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1114}
1115
be7fa326 1116static void
078086f3 1117bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
be7fa326 1118{
078086f3
RM
1119 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1120 struct bna_tcb *tcb;
1121 int i;
1122
1123 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1124 tcb = tx_info->tcb[i];
1125 if (!tcb)
1126 continue;
1127 }
1128
01b54b14 1129 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
8b230ed8
RM
1130}
1131
5bcf6ac0
RM
1132static void
1133bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1134{
1135 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1136 struct bna_ccb *ccb;
1137 struct bnad_rx_ctrl *rx_ctrl;
1138 int i;
1139
1140 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1141 rx_ctrl = &rx_info->rx_ctrl[i];
1142 ccb = rx_ctrl->ccb;
1143 if (!ccb)
1144 continue;
1145
1146 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1147
1148 if (ccb->rcb[1])
1149 clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1150 }
1151}
1152
01b54b14
JH
1153/*
1154 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1155 */
1156static void
1157bnad_rx_cleanup(void *work)
1158{
1159 struct bnad_rx_info *rx_info =
1160 container_of(work, struct bnad_rx_info, rx_cleanup_work);
1161 struct bnad_rx_ctrl *rx_ctrl;
1162 struct bnad *bnad = NULL;
1163 unsigned long flags;
5216562a 1164 u32 i;
01b54b14
JH
1165
1166 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1167 rx_ctrl = &rx_info->rx_ctrl[i];
1168
1169 if (!rx_ctrl->ccb)
1170 continue;
1171
1172 bnad = rx_ctrl->ccb->bnad;
1173
1174 /*
1175 * Wait till the poll handler has exited
1176 * and nothing can be scheduled anymore
1177 */
1178 napi_disable(&rx_ctrl->napi);
1179
b3cc6e88
JH
1180 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1181 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
01b54b14 1182 if (rx_ctrl->ccb->rcb[1])
b3cc6e88 1183 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
01b54b14
JH
1184 }
1185
1186 spin_lock_irqsave(&bnad->bna_lock, flags);
1187 bna_rx_cleanup_complete(rx_info->rx);
1188 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1189}
1190
8b230ed8 1191static void
078086f3 1192bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1193{
078086f3
RM
1194 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1195 struct bna_ccb *ccb;
1196 struct bnad_rx_ctrl *rx_ctrl;
1197 int i;
1198
772b5235 1199 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1200 rx_ctrl = &rx_info->rx_ctrl[i];
1201 ccb = rx_ctrl->ccb;
1202 if (!ccb)
1203 continue;
1204
1205 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1206
1207 if (ccb->rcb[1])
1208 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
078086f3 1209 }
be7fa326 1210
01b54b14 1211 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
8b230ed8
RM
1212}
1213
1214static void
078086f3 1215bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1216{
078086f3
RM
1217 struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1218 struct bna_ccb *ccb;
1219 struct bna_rcb *rcb;
1220 struct bnad_rx_ctrl *rx_ctrl;
30f9fc94 1221 int i, j;
be7fa326 1222
772b5235 1223 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
078086f3
RM
1224 rx_ctrl = &rx_info->rx_ctrl[i];
1225 ccb = rx_ctrl->ccb;
1226 if (!ccb)
1227 continue;
be7fa326 1228
01b54b14 1229 napi_enable(&rx_ctrl->napi);
8b230ed8 1230
078086f3
RM
1231 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1232 rcb = ccb->rcb[j];
1233 if (!rcb)
1234 continue;
078086f3 1235
30f9fc94 1236 bnad_rxq_alloc_init(bnad, rcb);
078086f3 1237 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
5bcf6ac0 1238 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
5216562a 1239 bnad_rxq_post(bnad, rcb);
078086f3 1240 }
8b230ed8
RM
1241 }
1242}
1243
1244static void
078086f3 1245bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
8b230ed8
RM
1246{
1247 struct bnad *bnad = (struct bnad *)arg;
1248
1249 complete(&bnad->bnad_completions.rx_comp);
1250}
1251
1252static void
078086f3 1253bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
8b230ed8 1254{
078086f3 1255 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
8b230ed8
RM
1256 complete(&bnad->bnad_completions.mcast_comp);
1257}
1258
1259void
1260bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1261 struct bna_stats *stats)
1262{
1263 if (status == BNA_CB_SUCCESS)
1264 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1265
1266 if (!netif_running(bnad->netdev) ||
1267 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1268 return;
1269
1270 mod_timer(&bnad->stats_timer,
1271 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1272}
1273
078086f3
RM
1274static void
1275bnad_cb_enet_mtu_set(struct bnad *bnad)
1276{
1277 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1278 complete(&bnad->bnad_completions.mtu_comp);
1279}
1280
72a9730b
KG
1281void
1282bnad_cb_completion(void *arg, enum bfa_status status)
1283{
1284 struct bnad_iocmd_comp *iocmd_comp =
1285 (struct bnad_iocmd_comp *)arg;
1286
1287 iocmd_comp->comp_status = (u32) status;
1288 complete(&iocmd_comp->comp);
1289}
1290
8b230ed8
RM
1291/* Resource allocation, free functions */
1292
1293static void
1294bnad_mem_free(struct bnad *bnad,
1295 struct bna_mem_info *mem_info)
1296{
1297 int i;
1298 dma_addr_t dma_pa;
1299
1300 if (mem_info->mdl == NULL)
1301 return;
1302
1303 for (i = 0; i < mem_info->num; i++) {
1304 if (mem_info->mdl[i].kva != NULL) {
1305 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1306 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1307 dma_pa);
5ea74318
IV
1308 dma_free_coherent(&bnad->pcidev->dev,
1309 mem_info->mdl[i].len,
1310 mem_info->mdl[i].kva, dma_pa);
8b230ed8
RM
1311 } else
1312 kfree(mem_info->mdl[i].kva);
1313 }
1314 }
1315 kfree(mem_info->mdl);
1316 mem_info->mdl = NULL;
1317}
1318
1319static int
1320bnad_mem_alloc(struct bnad *bnad,
1321 struct bna_mem_info *mem_info)
1322{
1323 int i;
1324 dma_addr_t dma_pa;
1325
1326 if ((mem_info->num == 0) || (mem_info->len == 0)) {
1327 mem_info->mdl = NULL;
1328 return 0;
1329 }
1330
1331 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1332 GFP_KERNEL);
1333 if (mem_info->mdl == NULL)
1334 return -ENOMEM;
1335
1336 if (mem_info->mem_type == BNA_MEM_T_DMA) {
1337 for (i = 0; i < mem_info->num; i++) {
1338 mem_info->mdl[i].len = mem_info->len;
1339 mem_info->mdl[i].kva =
5ea74318 1340 dma_alloc_coherent(&bnad->pcidev->dev,
1f9061d2
JP
1341 mem_info->len, &dma_pa,
1342 GFP_KERNEL);
8b230ed8
RM
1343 if (mem_info->mdl[i].kva == NULL)
1344 goto err_return;
1345
1346 BNA_SET_DMA_ADDR(dma_pa,
1347 &(mem_info->mdl[i].dma));
1348 }
1349 } else {
1350 for (i = 0; i < mem_info->num; i++) {
1351 mem_info->mdl[i].len = mem_info->len;
1352 mem_info->mdl[i].kva = kzalloc(mem_info->len,
1353 GFP_KERNEL);
1354 if (mem_info->mdl[i].kva == NULL)
1355 goto err_return;
1356 }
1357 }
1358
1359 return 0;
1360
1361err_return:
1362 bnad_mem_free(bnad, mem_info);
1363 return -ENOMEM;
1364}
1365
1366/* Free IRQ for Mailbox */
1367static void
078086f3 1368bnad_mbox_irq_free(struct bnad *bnad)
8b230ed8
RM
1369{
1370 int irq;
1371 unsigned long flags;
1372
8b230ed8 1373 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 1374 bnad_disable_mbox_irq(bnad);
e2fa6f2e 1375 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1376
1377 irq = BNAD_GET_MBOX_IRQ(bnad);
be7fa326 1378 free_irq(irq, bnad);
8b230ed8
RM
1379}
1380
1381/*
1382 * Allocates IRQ for Mailbox, but keep it disabled
1383 * This will be enabled once we get the mbox enable callback
1384 * from bna
1385 */
1386static int
078086f3 1387bnad_mbox_irq_alloc(struct bnad *bnad)
8b230ed8 1388{
0120b99c
RM
1389 int err = 0;
1390 unsigned long irq_flags, flags;
8b230ed8 1391 u32 irq;
0120b99c 1392 irq_handler_t irq_handler;
8b230ed8 1393
8b230ed8
RM
1394 spin_lock_irqsave(&bnad->bna_lock, flags);
1395 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1396 irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
8811e267 1397 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8279171a 1398 irq_flags = 0;
8b230ed8
RM
1399 } else {
1400 irq_handler = (irq_handler_t)bnad_isr;
1401 irq = bnad->pcidev->irq;
5f77898d 1402 irq_flags = IRQF_SHARED;
8b230ed8 1403 }
8811e267 1404
8b230ed8 1405 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1406 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1407
e2fa6f2e
RM
1408 /*
1409 * Set the Mbox IRQ disable flag, so that the IRQ handler
1410 * called from request_irq() for SHARED IRQs do not execute
1411 */
1412 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1413
be7fa326
RM
1414 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1415
8279171a 1416 err = request_irq(irq, irq_handler, irq_flags,
be7fa326 1417 bnad->mbox_irq_name, bnad);
e2fa6f2e 1418
be7fa326 1419 return err;
8b230ed8
RM
1420}
1421
1422static void
1423bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1424{
1425 kfree(intr_info->idl);
1426 intr_info->idl = NULL;
1427}
1428
1429/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1430static int
1431bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
078086f3 1432 u32 txrx_id, struct bna_intr_info *intr_info)
8b230ed8
RM
1433{
1434 int i, vector_start = 0;
1435 u32 cfg_flags;
1436 unsigned long flags;
1437
1438 spin_lock_irqsave(&bnad->bna_lock, flags);
1439 cfg_flags = bnad->cfg_flags;
1440 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1441
1442 if (cfg_flags & BNAD_CF_MSIX) {
1443 intr_info->intr_type = BNA_INTR_T_MSIX;
1444 intr_info->idl = kcalloc(intr_info->num,
1445 sizeof(struct bna_intr_descr),
1446 GFP_KERNEL);
1447 if (!intr_info->idl)
1448 return -ENOMEM;
1449
1450 switch (src) {
1451 case BNAD_INTR_TX:
8811e267 1452 vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
8b230ed8
RM
1453 break;
1454
1455 case BNAD_INTR_RX:
8811e267
RM
1456 vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1457 (bnad->num_tx * bnad->num_txq_per_tx) +
8b230ed8
RM
1458 txrx_id;
1459 break;
1460
1461 default:
1462 BUG();
1463 }
1464
1465 for (i = 0; i < intr_info->num; i++)
1466 intr_info->idl[i].vector = vector_start + i;
1467 } else {
1468 intr_info->intr_type = BNA_INTR_T_INTX;
1469 intr_info->num = 1;
1470 intr_info->idl = kcalloc(intr_info->num,
1471 sizeof(struct bna_intr_descr),
1472 GFP_KERNEL);
1473 if (!intr_info->idl)
1474 return -ENOMEM;
1475
1476 switch (src) {
1477 case BNAD_INTR_TX:
8811e267 1478 intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
8b230ed8
RM
1479 break;
1480
1481 case BNAD_INTR_RX:
8811e267 1482 intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
8b230ed8
RM
1483 break;
1484 }
1485 }
1486 return 0;
1487}
1488
1aa8b471 1489/* NOTE: Should be called for MSIX only
8b230ed8
RM
1490 * Unregisters Tx MSIX vector(s) from the kernel
1491 */
1492static void
1493bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1494 int num_txqs)
1495{
1496 int i;
1497 int vector_num;
1498
1499 for (i = 0; i < num_txqs; i++) {
1500 if (tx_info->tcb[i] == NULL)
1501 continue;
1502
1503 vector_num = tx_info->tcb[i]->intr_vector;
1504 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1505 }
1506}
1507
1aa8b471 1508/* NOTE: Should be called for MSIX only
8b230ed8
RM
1509 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1510 */
1511static int
1512bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
078086f3 1513 u32 tx_id, int num_txqs)
8b230ed8
RM
1514{
1515 int i;
1516 int err;
1517 int vector_num;
1518
1519 for (i = 0; i < num_txqs; i++) {
1520 vector_num = tx_info->tcb[i]->intr_vector;
1521 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1522 tx_id + tx_info->tcb[i]->id);
1523 err = request_irq(bnad->msix_table[vector_num].vector,
1524 (irq_handler_t)bnad_msix_tx, 0,
1525 tx_info->tcb[i]->name,
1526 tx_info->tcb[i]);
1527 if (err)
1528 goto err_return;
1529 }
1530
1531 return 0;
1532
1533err_return:
1534 if (i > 0)
1535 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1536 return -1;
1537}
1538
1aa8b471 1539/* NOTE: Should be called for MSIX only
8b230ed8
RM
1540 * Unregisters Rx MSIX vector(s) from the kernel
1541 */
1542static void
1543bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1544 int num_rxps)
1545{
1546 int i;
1547 int vector_num;
1548
1549 for (i = 0; i < num_rxps; i++) {
1550 if (rx_info->rx_ctrl[i].ccb == NULL)
1551 continue;
1552
1553 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1554 free_irq(bnad->msix_table[vector_num].vector,
1555 rx_info->rx_ctrl[i].ccb);
1556 }
1557}
1558
1aa8b471 1559/* NOTE: Should be called for MSIX only
8b230ed8
RM
1560 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1561 */
1562static int
1563bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
078086f3 1564 u32 rx_id, int num_rxps)
8b230ed8
RM
1565{
1566 int i;
1567 int err;
1568 int vector_num;
1569
1570 for (i = 0; i < num_rxps; i++) {
1571 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1572 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1573 bnad->netdev->name,
1574 rx_id + rx_info->rx_ctrl[i].ccb->id);
1575 err = request_irq(bnad->msix_table[vector_num].vector,
1576 (irq_handler_t)bnad_msix_rx, 0,
1577 rx_info->rx_ctrl[i].ccb->name,
1578 rx_info->rx_ctrl[i].ccb);
1579 if (err)
1580 goto err_return;
1581 }
1582
1583 return 0;
1584
1585err_return:
1586 if (i > 0)
1587 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1588 return -1;
1589}
1590
1591/* Free Tx object Resources */
1592static void
1593bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1594{
1595 int i;
1596
1597 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1598 if (res_info[i].res_type == BNA_RES_T_MEM)
1599 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1600 else if (res_info[i].res_type == BNA_RES_T_INTR)
1601 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1602 }
1603}
1604
1605/* Allocates memory and interrupt resources for Tx object */
1606static int
1607bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
078086f3 1608 u32 tx_id)
8b230ed8
RM
1609{
1610 int i, err = 0;
1611
1612 for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1613 if (res_info[i].res_type == BNA_RES_T_MEM)
1614 err = bnad_mem_alloc(bnad,
1615 &res_info[i].res_u.mem_info);
1616 else if (res_info[i].res_type == BNA_RES_T_INTR)
1617 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1618 &res_info[i].res_u.intr_info);
1619 if (err)
1620 goto err_return;
1621 }
1622 return 0;
1623
1624err_return:
1625 bnad_tx_res_free(bnad, res_info);
1626 return err;
1627}
1628
1629/* Free Rx object Resources */
1630static void
1631bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1632{
1633 int i;
1634
1635 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1636 if (res_info[i].res_type == BNA_RES_T_MEM)
1637 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1638 else if (res_info[i].res_type == BNA_RES_T_INTR)
1639 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1640 }
1641}
1642
1643/* Allocates memory and interrupt resources for Rx object */
1644static int
1645bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1646 uint rx_id)
1647{
1648 int i, err = 0;
1649
1650 /* All memory needs to be allocated before setup_ccbs */
1651 for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1652 if (res_info[i].res_type == BNA_RES_T_MEM)
1653 err = bnad_mem_alloc(bnad,
1654 &res_info[i].res_u.mem_info);
1655 else if (res_info[i].res_type == BNA_RES_T_INTR)
1656 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1657 &res_info[i].res_u.intr_info);
1658 if (err)
1659 goto err_return;
1660 }
1661 return 0;
1662
1663err_return:
1664 bnad_rx_res_free(bnad, res_info);
1665 return err;
1666}
1667
1668/* Timer callbacks */
1669/* a) IOC timer */
1670static void
1671bnad_ioc_timeout(unsigned long data)
1672{
1673 struct bnad *bnad = (struct bnad *)data;
1674 unsigned long flags;
1675
1676 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1677 bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1678 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1679}
1680
1681static void
1682bnad_ioc_hb_check(unsigned long data)
1683{
1684 struct bnad *bnad = (struct bnad *)data;
1685 unsigned long flags;
1686
1687 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1688 bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1689 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1690}
1691
1692static void
1d32f769 1693bnad_iocpf_timeout(unsigned long data)
8b230ed8
RM
1694{
1695 struct bnad *bnad = (struct bnad *)data;
1696 unsigned long flags;
1697
1698 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1699 bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc);
1d32f769
RM
1700 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1701}
1702
1703static void
1704bnad_iocpf_sem_timeout(unsigned long data)
1705{
1706 struct bnad *bnad = (struct bnad *)data;
1707 unsigned long flags;
1708
1709 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1710 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc);
8b230ed8
RM
1711 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1712}
1713
1714/*
1715 * All timer routines use bnad->bna_lock to protect against
1716 * the following race, which may occur in case of no locking:
0120b99c 1717 * Time CPU m CPU n
8b230ed8
RM
1718 * 0 1 = test_bit
1719 * 1 clear_bit
1720 * 2 del_timer_sync
1721 * 3 mod_timer
1722 */
1723
1724/* b) Dynamic Interrupt Moderation Timer */
1725static void
1726bnad_dim_timeout(unsigned long data)
1727{
1728 struct bnad *bnad = (struct bnad *)data;
1729 struct bnad_rx_info *rx_info;
1730 struct bnad_rx_ctrl *rx_ctrl;
1731 int i, j;
1732 unsigned long flags;
1733
1734 if (!netif_carrier_ok(bnad->netdev))
1735 return;
1736
1737 spin_lock_irqsave(&bnad->bna_lock, flags);
1738 for (i = 0; i < bnad->num_rx; i++) {
1739 rx_info = &bnad->rx_info[i];
1740 if (!rx_info->rx)
1741 continue;
1742 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1743 rx_ctrl = &rx_info->rx_ctrl[j];
1744 if (!rx_ctrl->ccb)
1745 continue;
1746 bna_rx_dim_update(rx_ctrl->ccb);
1747 }
1748 }
1749
1750 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1751 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1752 mod_timer(&bnad->dim_timer,
1753 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1754 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1755}
1756
1757/* c) Statistics Timer */
1758static void
1759bnad_stats_timeout(unsigned long data)
1760{
1761 struct bnad *bnad = (struct bnad *)data;
1762 unsigned long flags;
1763
1764 if (!netif_running(bnad->netdev) ||
1765 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1766 return;
1767
1768 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 1769 bna_hw_stats_get(&bnad->bna);
8b230ed8
RM
1770 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1771}
1772
1773/*
1774 * Set up timer for DIM
1775 * Called with bnad->bna_lock held
1776 */
1777void
1778bnad_dim_timer_start(struct bnad *bnad)
1779{
1780 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1781 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1782 setup_timer(&bnad->dim_timer, bnad_dim_timeout,
1783 (unsigned long)bnad);
1784 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1785 mod_timer(&bnad->dim_timer,
1786 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1787 }
1788}
1789
1790/*
1791 * Set up timer for statistics
1792 * Called with mutex_lock(&bnad->conf_mutex) held
1793 */
1794static void
1795bnad_stats_timer_start(struct bnad *bnad)
1796{
1797 unsigned long flags;
1798
1799 spin_lock_irqsave(&bnad->bna_lock, flags);
1800 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1801 setup_timer(&bnad->stats_timer, bnad_stats_timeout,
1802 (unsigned long)bnad);
1803 mod_timer(&bnad->stats_timer,
1804 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1805 }
1806 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
1807}
1808
1809/*
1810 * Stops the stats timer
1811 * Called with mutex_lock(&bnad->conf_mutex) held
1812 */
1813static void
1814bnad_stats_timer_stop(struct bnad *bnad)
1815{
1816 int to_del = 0;
1817 unsigned long flags;
1818
1819 spin_lock_irqsave(&bnad->bna_lock, flags);
1820 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1821 to_del = 1;
1822 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1823 if (to_del)
1824 del_timer_sync(&bnad->stats_timer);
1825}
1826
1827/* Utilities */
1828
1829static void
1830bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1831{
1832 int i = 1; /* Index 0 has broadcast address */
1833 struct netdev_hw_addr *mc_addr;
1834
1835 netdev_for_each_mc_addr(mc_addr, netdev) {
1836 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0],
1837 ETH_ALEN);
1838 i++;
1839 }
1840}
1841
1842static int
1843bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1844{
1845 struct bnad_rx_ctrl *rx_ctrl =
1846 container_of(napi, struct bnad_rx_ctrl, napi);
2be67144 1847 struct bnad *bnad = rx_ctrl->bnad;
8b230ed8
RM
1848 int rcvd = 0;
1849
271e8b79 1850 rx_ctrl->rx_poll_ctr++;
8b230ed8
RM
1851
1852 if (!netif_carrier_ok(bnad->netdev))
1853 goto poll_exit;
1854
b3cc6e88 1855 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
271e8b79 1856 if (rcvd >= budget)
8b230ed8
RM
1857 return rcvd;
1858
1859poll_exit:
19dbff9f 1860 napi_complete(napi);
8b230ed8 1861
271e8b79 1862 rx_ctrl->rx_complete++;
2be67144
RM
1863
1864 if (rx_ctrl->ccb)
271e8b79
RM
1865 bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1866
8b230ed8
RM
1867 return rcvd;
1868}
1869
2be67144 1870#define BNAD_NAPI_POLL_QUOTA 64
8b230ed8 1871static void
01b54b14 1872bnad_napi_add(struct bnad *bnad, u32 rx_id)
8b230ed8 1873{
8b230ed8
RM
1874 struct bnad_rx_ctrl *rx_ctrl;
1875 int i;
8b230ed8
RM
1876
1877 /* Initialize & enable NAPI */
1878 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1879 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1880 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
2be67144
RM
1881 bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1882 }
1883}
1884
1885static void
01b54b14 1886bnad_napi_delete(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
1887{
1888 int i;
1889
1890 /* First disable and then clean up */
01b54b14 1891 for (i = 0; i < bnad->num_rxp_per_rx; i++)
8b230ed8 1892 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
8b230ed8
RM
1893}
1894
1895/* Should be held with conf_lock held */
1896void
b3cc6e88 1897bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1898{
1899 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1900 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1901 unsigned long flags;
1902
1903 if (!tx_info->tx)
1904 return;
1905
1906 init_completion(&bnad->bnad_completions.tx_comp);
1907 spin_lock_irqsave(&bnad->bna_lock, flags);
1908 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1909 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1910 wait_for_completion(&bnad->bnad_completions.tx_comp);
1911
1912 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1913 bnad_tx_msix_unregister(bnad, tx_info,
1914 bnad->num_txq_per_tx);
1915
1916 spin_lock_irqsave(&bnad->bna_lock, flags);
1917 bna_tx_destroy(tx_info->tx);
1918 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1919
1920 tx_info->tx = NULL;
078086f3 1921 tx_info->tx_id = 0;
8b230ed8 1922
8b230ed8
RM
1923 bnad_tx_res_free(bnad, res_info);
1924}
1925
1926/* Should be held with conf_lock held */
1927int
078086f3 1928bnad_setup_tx(struct bnad *bnad, u32 tx_id)
8b230ed8
RM
1929{
1930 int err;
1931 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1932 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1933 struct bna_intr_info *intr_info =
1934 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1935 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
d91d25d5 1936 static const struct bna_tx_event_cbfn tx_cbfn = {
1937 .tcb_setup_cbfn = bnad_cb_tcb_setup,
1938 .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1939 .tx_stall_cbfn = bnad_cb_tx_stall,
1940 .tx_resume_cbfn = bnad_cb_tx_resume,
1941 .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1942 };
1943
8b230ed8
RM
1944 struct bna_tx *tx;
1945 unsigned long flags;
1946
078086f3
RM
1947 tx_info->tx_id = tx_id;
1948
8b230ed8
RM
1949 /* Initialize the Tx object configuration */
1950 tx_config->num_txq = bnad->num_txq_per_tx;
1951 tx_config->txq_depth = bnad->txq_depth;
1952 tx_config->tx_type = BNA_TX_T_REGULAR;
078086f3 1953 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
8b230ed8 1954
8b230ed8
RM
1955 /* Get BNA's resource requirement for one tx object */
1956 spin_lock_irqsave(&bnad->bna_lock, flags);
1957 bna_tx_res_req(bnad->num_txq_per_tx,
1958 bnad->txq_depth, res_info);
1959 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1960
1961 /* Fill Unmap Q memory requirements */
5216562a
RM
1962 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1963 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1964 bnad->txq_depth));
8b230ed8
RM
1965
1966 /* Allocate resources */
1967 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1968 if (err)
1969 return err;
1970
1971 /* Ask BNA to create one Tx object, supplying required resources */
1972 spin_lock_irqsave(&bnad->bna_lock, flags);
1973 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1974 tx_info);
1975 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1976 if (!tx)
1977 goto err_return;
1978 tx_info->tx = tx;
1979
01b54b14
JH
1980 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1981 (work_func_t)bnad_tx_cleanup);
1982
8b230ed8
RM
1983 /* Register ISR for the Tx object */
1984 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1985 err = bnad_tx_msix_register(bnad, tx_info,
1986 tx_id, bnad->num_txq_per_tx);
1987 if (err)
1988 goto err_return;
1989 }
1990
1991 spin_lock_irqsave(&bnad->bna_lock, flags);
1992 bna_tx_enable(tx);
1993 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1994
1995 return 0;
1996
1997err_return:
1998 bnad_tx_res_free(bnad, res_info);
1999 return err;
2000}
2001
2002/* Setup the rx config for bna_rx_create */
2003/* bnad decides the configuration */
2004static void
2005bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2006{
e29aa339 2007 memset(rx_config, 0, sizeof(*rx_config));
8b230ed8
RM
2008 rx_config->rx_type = BNA_RX_T_REGULAR;
2009 rx_config->num_paths = bnad->num_rxp_per_rx;
078086f3 2010 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
8b230ed8
RM
2011
2012 if (bnad->num_rxp_per_rx > 1) {
2013 rx_config->rss_status = BNA_STATUS_T_ENABLED;
2014 rx_config->rss_config.hash_type =
078086f3
RM
2015 (BFI_ENET_RSS_IPV6 |
2016 BFI_ENET_RSS_IPV6_TCP |
2017 BFI_ENET_RSS_IPV4 |
2018 BFI_ENET_RSS_IPV4_TCP);
8b230ed8
RM
2019 rx_config->rss_config.hash_mask =
2020 bnad->num_rxp_per_rx - 1;
2021 get_random_bytes(rx_config->rss_config.toeplitz_hash_key,
2022 sizeof(rx_config->rss_config.toeplitz_hash_key));
2023 } else {
2024 rx_config->rss_status = BNA_STATUS_T_DISABLED;
2025 memset(&rx_config->rss_config, 0,
2026 sizeof(rx_config->rss_config));
2027 }
e29aa339
RM
2028
2029 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2030 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2031
2032 /* BNA_RXP_SINGLE - one data-buffer queue
2033 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2034 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2035 */
2036 /* TODO: configurable param for queue type */
8b230ed8 2037 rx_config->rxp_type = BNA_RXP_SLR;
8b230ed8 2038
e29aa339
RM
2039 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2040 rx_config->frame_size > 4096) {
2041 /* though size_routing_enable is set in SLR,
2042 * small packets may get routed to same rxq.
2043 * set buf_size to 2048 instead of PAGE_SIZE.
2044 */
2045 rx_config->q0_buf_size = 2048;
2046 /* this should be in multiples of 2 */
2047 rx_config->q0_num_vecs = 4;
2048 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2049 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2050 } else {
2051 rx_config->q0_buf_size = rx_config->frame_size;
2052 rx_config->q0_num_vecs = 1;
2053 rx_config->q0_depth = bnad->rxq_depth;
2054 }
2055
2056 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2057 if (rx_config->rxp_type == BNA_RXP_SLR) {
2058 rx_config->q1_depth = bnad->rxq_depth;
2059 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2060 }
8b230ed8
RM
2061
2062 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
2063}
2064
2be67144
RM
2065static void
2066bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2067{
2068 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2069 int i;
2070
2071 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2072 rx_info->rx_ctrl[i].bnad = bnad;
2073}
2074
8b230ed8 2075/* Called with mutex_lock(&bnad->conf_mutex) held */
e29aa339
RM
2076u32
2077bnad_reinit_rx(struct bnad *bnad)
2078{
2079 struct net_device *netdev = bnad->netdev;
2080 u32 err = 0, current_err = 0;
2081 u32 rx_id = 0, count = 0;
2082 unsigned long flags;
2083
2084 /* destroy and create new rx objects */
2085 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2086 if (!bnad->rx_info[rx_id].rx)
2087 continue;
2088 bnad_destroy_rx(bnad, rx_id);
2089 }
2090
2091 spin_lock_irqsave(&bnad->bna_lock, flags);
2092 bna_enet_mtu_set(&bnad->bna.enet,
2093 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2094 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2095
2096 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2097 count++;
2098 current_err = bnad_setup_rx(bnad, rx_id);
2099 if (current_err && !err) {
2100 err = current_err;
2101 pr_err("RXQ:%u setup failed\n", rx_id);
2102 }
2103 }
2104
2105 /* restore rx configuration */
2106 if (bnad->rx_info[0].rx && !err) {
2107 bnad_restore_vlans(bnad, 0);
2108 bnad_enable_default_bcast(bnad);
2109 spin_lock_irqsave(&bnad->bna_lock, flags);
2110 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2111 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2112 bnad_set_rx_mode(netdev);
2113 }
2114
2115 return count;
2116}
2117
2118/* Called with bnad_conf_lock() held */
8b230ed8 2119void
b3cc6e88 2120bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
2121{
2122 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2123 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2124 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2125 unsigned long flags;
271e8b79 2126 int to_del = 0;
8b230ed8
RM
2127
2128 if (!rx_info->rx)
2129 return;
2130
2131 if (0 == rx_id) {
2132 spin_lock_irqsave(&bnad->bna_lock, flags);
271e8b79
RM
2133 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2134 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
8b230ed8 2135 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
271e8b79
RM
2136 to_del = 1;
2137 }
8b230ed8 2138 spin_unlock_irqrestore(&bnad->bna_lock, flags);
271e8b79 2139 if (to_del)
8b230ed8
RM
2140 del_timer_sync(&bnad->dim_timer);
2141 }
2142
8b230ed8
RM
2143 init_completion(&bnad->bnad_completions.rx_comp);
2144 spin_lock_irqsave(&bnad->bna_lock, flags);
2145 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2146 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2147 wait_for_completion(&bnad->bnad_completions.rx_comp);
2148
2149 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2150 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2151
01b54b14 2152 bnad_napi_delete(bnad, rx_id);
2be67144 2153
8b230ed8
RM
2154 spin_lock_irqsave(&bnad->bna_lock, flags);
2155 bna_rx_destroy(rx_info->rx);
8b230ed8
RM
2156
2157 rx_info->rx = NULL;
3caa1e95 2158 rx_info->rx_id = 0;
b9fa1fbf 2159 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
2160
2161 bnad_rx_res_free(bnad, res_info);
2162}
2163
2164/* Called with mutex_lock(&bnad->conf_mutex) held */
2165int
078086f3 2166bnad_setup_rx(struct bnad *bnad, u32 rx_id)
8b230ed8
RM
2167{
2168 int err;
2169 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2170 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2171 struct bna_intr_info *intr_info =
2172 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2173 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
d91d25d5 2174 static const struct bna_rx_event_cbfn rx_cbfn = {
5216562a 2175 .rcb_setup_cbfn = NULL,
01b54b14 2176 .rcb_destroy_cbfn = NULL,
d91d25d5 2177 .ccb_setup_cbfn = bnad_cb_ccb_setup,
2178 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
5bcf6ac0 2179 .rx_stall_cbfn = bnad_cb_rx_stall,
d91d25d5 2180 .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2181 .rx_post_cbfn = bnad_cb_rx_post,
2182 };
8b230ed8
RM
2183 struct bna_rx *rx;
2184 unsigned long flags;
2185
078086f3
RM
2186 rx_info->rx_id = rx_id;
2187
8b230ed8
RM
2188 /* Initialize the Rx object configuration */
2189 bnad_init_rx_config(bnad, rx_config);
2190
8b230ed8
RM
2191 /* Get BNA's resource requirement for one Rx object */
2192 spin_lock_irqsave(&bnad->bna_lock, flags);
2193 bna_rx_res_req(rx_config, res_info);
2194 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2195
2196 /* Fill Unmap Q memory requirements */
e29aa339
RM
2197 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2198 rx_config->num_paths,
2199 (rx_config->q0_depth *
2200 sizeof(struct bnad_rx_unmap)) +
2201 sizeof(struct bnad_rx_unmap_q));
2202
2203 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2204 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2205 rx_config->num_paths,
2206 (rx_config->q1_depth *
2207 sizeof(struct bnad_rx_unmap) +
2208 sizeof(struct bnad_rx_unmap_q)));
2209 }
8b230ed8
RM
2210 /* Allocate resource */
2211 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2212 if (err)
2213 return err;
2214
2be67144
RM
2215 bnad_rx_ctrl_init(bnad, rx_id);
2216
8b230ed8
RM
2217 /* Ask BNA to create one Rx object, supplying required resources */
2218 spin_lock_irqsave(&bnad->bna_lock, flags);
2219 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2220 rx_info);
3caa1e95
RM
2221 if (!rx) {
2222 err = -ENOMEM;
b9fa1fbf 2223 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 2224 goto err_return;
3caa1e95 2225 }
8b230ed8 2226 rx_info->rx = rx;
b9fa1fbf 2227 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 2228
01b54b14
JH
2229 INIT_WORK(&rx_info->rx_cleanup_work,
2230 (work_func_t)(bnad_rx_cleanup));
2231
2be67144
RM
2232 /*
2233 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2234 * so that IRQ handler cannot schedule NAPI at this point.
2235 */
01b54b14 2236 bnad_napi_add(bnad, rx_id);
2be67144 2237
8b230ed8
RM
2238 /* Register ISR for the Rx object */
2239 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2240 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2241 rx_config->num_paths);
2242 if (err)
2243 goto err_return;
2244 }
2245
8b230ed8
RM
2246 spin_lock_irqsave(&bnad->bna_lock, flags);
2247 if (0 == rx_id) {
2248 /* Set up Dynamic Interrupt Moderation Vector */
2249 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2250 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2251
2252 /* Enable VLAN filtering only on the default Rx */
2253 bna_rx_vlanfilter_enable(rx);
2254
2255 /* Start the DIM timer */
2256 bnad_dim_timer_start(bnad);
2257 }
2258
2259 bna_rx_enable(rx);
2260 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2261
2262 return 0;
2263
2264err_return:
b3cc6e88 2265 bnad_destroy_rx(bnad, rx_id);
8b230ed8
RM
2266 return err;
2267}
2268
2269/* Called with conf_lock & bnad->bna_lock held */
2270void
2271bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2272{
2273 struct bnad_tx_info *tx_info;
2274
2275 tx_info = &bnad->tx_info[0];
2276 if (!tx_info->tx)
2277 return;
2278
2279 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2280}
2281
2282/* Called with conf_lock & bnad->bna_lock held */
2283void
2284bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2285{
2286 struct bnad_rx_info *rx_info;
0120b99c 2287 int i;
8b230ed8
RM
2288
2289 for (i = 0; i < bnad->num_rx; i++) {
2290 rx_info = &bnad->rx_info[i];
2291 if (!rx_info->rx)
2292 continue;
2293 bna_rx_coalescing_timeo_set(rx_info->rx,
2294 bnad->rx_coalescing_timeo);
2295 }
2296}
2297
2298/*
2299 * Called with bnad->bna_lock held
2300 */
a2122d95 2301int
8b230ed8
RM
2302bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr)
2303{
2304 int ret;
2305
2306 if (!is_valid_ether_addr(mac_addr))
2307 return -EADDRNOTAVAIL;
2308
2309 /* If datapath is down, pretend everything went through */
2310 if (!bnad->rx_info[0].rx)
2311 return 0;
2312
2313 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL);
2314 if (ret != BNA_CB_SUCCESS)
2315 return -EADDRNOTAVAIL;
2316
2317 return 0;
2318}
2319
2320/* Should be called with conf_lock held */
a2122d95 2321int
8b230ed8
RM
2322bnad_enable_default_bcast(struct bnad *bnad)
2323{
2324 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2325 int ret;
2326 unsigned long flags;
2327
2328 init_completion(&bnad->bnad_completions.mcast_comp);
2329
2330 spin_lock_irqsave(&bnad->bna_lock, flags);
2331 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr,
2332 bnad_cb_rx_mcast_add);
2333 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2334
2335 if (ret == BNA_CB_SUCCESS)
2336 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2337 else
2338 return -ENODEV;
2339
2340 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2341 return -ENODEV;
2342
2343 return 0;
2344}
2345
19dbff9f 2346/* Called with mutex_lock(&bnad->conf_mutex) held */
a2122d95 2347void
aad75b66
RM
2348bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2349{
f859d7cb 2350 u16 vid;
aad75b66
RM
2351 unsigned long flags;
2352
f859d7cb 2353 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
aad75b66 2354 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 2355 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
aad75b66
RM
2356 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2357 }
2358}
2359
8b230ed8
RM
2360/* Statistics utilities */
2361void
250e061e 2362bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2363{
8b230ed8
RM
2364 int i, j;
2365
2366 for (i = 0; i < bnad->num_rx; i++) {
2367 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2368 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
250e061e 2369 stats->rx_packets += bnad->rx_info[i].
8b230ed8 2370 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
250e061e 2371 stats->rx_bytes += bnad->rx_info[i].
8b230ed8
RM
2372 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2373 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2374 bnad->rx_info[i].rx_ctrl[j].ccb->
2375 rcb[1]->rxq) {
250e061e 2376 stats->rx_packets +=
8b230ed8
RM
2377 bnad->rx_info[i].rx_ctrl[j].
2378 ccb->rcb[1]->rxq->rx_packets;
250e061e 2379 stats->rx_bytes +=
8b230ed8
RM
2380 bnad->rx_info[i].rx_ctrl[j].
2381 ccb->rcb[1]->rxq->rx_bytes;
2382 }
2383 }
2384 }
2385 }
2386 for (i = 0; i < bnad->num_tx; i++) {
2387 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2388 if (bnad->tx_info[i].tcb[j]) {
250e061e 2389 stats->tx_packets +=
8b230ed8 2390 bnad->tx_info[i].tcb[j]->txq->tx_packets;
250e061e 2391 stats->tx_bytes +=
8b230ed8
RM
2392 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2393 }
2394 }
2395 }
2396}
2397
2398/*
2399 * Must be called with the bna_lock held.
2400 */
2401void
250e061e 2402bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
8b230ed8 2403{
078086f3
RM
2404 struct bfi_enet_stats_mac *mac_stats;
2405 u32 bmap;
8b230ed8
RM
2406 int i;
2407
078086f3 2408 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
250e061e 2409 stats->rx_errors =
8b230ed8
RM
2410 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2411 mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2412 mac_stats->rx_undersize;
250e061e 2413 stats->tx_errors = mac_stats->tx_fcs_error +
8b230ed8 2414 mac_stats->tx_undersize;
250e061e
ED
2415 stats->rx_dropped = mac_stats->rx_drop;
2416 stats->tx_dropped = mac_stats->tx_drop;
2417 stats->multicast = mac_stats->rx_multicast;
2418 stats->collisions = mac_stats->tx_total_collision;
8b230ed8 2419
250e061e 2420 stats->rx_length_errors = mac_stats->rx_frame_length_error;
8b230ed8
RM
2421
2422 /* receive ring buffer overflow ?? */
2423
250e061e
ED
2424 stats->rx_crc_errors = mac_stats->rx_fcs_error;
2425 stats->rx_frame_errors = mac_stats->rx_alignment_error;
8b230ed8 2426 /* recv'r fifo overrun */
078086f3
RM
2427 bmap = bna_rx_rid_mask(&bnad->bna);
2428 for (i = 0; bmap; i++) {
8b230ed8 2429 if (bmap & 1) {
250e061e 2430 stats->rx_fifo_errors +=
8b230ed8 2431 bnad->stats.bna_stats->
078086f3 2432 hw_stats.rxf_stats[i].frame_drops;
8b230ed8
RM
2433 break;
2434 }
2435 bmap >>= 1;
2436 }
2437}
2438
2439static void
2440bnad_mbox_irq_sync(struct bnad *bnad)
2441{
2442 u32 irq;
2443 unsigned long flags;
2444
2445 spin_lock_irqsave(&bnad->bna_lock, flags);
2446 if (bnad->cfg_flags & BNAD_CF_MSIX)
8811e267 2447 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
8b230ed8
RM
2448 else
2449 irq = bnad->pcidev->irq;
2450 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2451
2452 synchronize_irq(irq);
2453}
2454
2455/* Utility used by bnad_start_xmit, for doing TSO */
2456static int
2457bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2458{
2459 int err;
2460
8b230ed8
RM
2461 if (skb_header_cloned(skb)) {
2462 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2463 if (err) {
2464 BNAD_UPDATE_CTR(bnad, tso_err);
2465 return err;
2466 }
2467 }
2468
2469 /*
2470 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2471 * excluding the length field.
2472 */
2473 if (skb->protocol == htons(ETH_P_IP)) {
2474 struct iphdr *iph = ip_hdr(skb);
2475
2476 /* Do we really need these? */
2477 iph->tot_len = 0;
2478 iph->check = 0;
2479
2480 tcp_hdr(skb)->check =
2481 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2482 IPPROTO_TCP, 0);
2483 BNAD_UPDATE_CTR(bnad, tso4);
2484 } else {
2485 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2486
8b230ed8
RM
2487 ipv6h->payload_len = 0;
2488 tcp_hdr(skb)->check =
2489 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0,
2490 IPPROTO_TCP, 0);
2491 BNAD_UPDATE_CTR(bnad, tso6);
2492 }
2493
2494 return 0;
2495}
2496
2497/*
2498 * Initialize Q numbers depending on Rx Paths
2499 * Called with bnad->bna_lock held, because of cfg_flags
2500 * access.
2501 */
2502static void
2503bnad_q_num_init(struct bnad *bnad)
2504{
2505 int rxps;
2506
2507 rxps = min((uint)num_online_cpus(),
772b5235 2508 (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
8b230ed8
RM
2509
2510 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2511 rxps = 1; /* INTx */
2512
2513 bnad->num_rx = 1;
2514 bnad->num_tx = 1;
2515 bnad->num_rxp_per_rx = rxps;
2516 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2517}
2518
2519/*
2520 * Adjusts the Q numbers, given a number of msix vectors
2521 * Give preference to RSS as opposed to Tx priority Queues,
2522 * in such a case, just use 1 Tx Q
2523 * Called with bnad->bna_lock held b'cos of cfg_flags access
2524 */
2525static void
078086f3 2526bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
8b230ed8
RM
2527{
2528 bnad->num_txq_per_tx = 1;
2529 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2530 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2531 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2532 bnad->num_rxp_per_rx = msix_vectors -
2533 (bnad->num_tx * bnad->num_txq_per_tx) -
2534 BNAD_MAILBOX_MSIX_VECTORS;
2535 } else
2536 bnad->num_rxp_per_rx = 1;
2537}
2538
078086f3
RM
2539/* Enable / disable ioceth */
2540static int
2541bnad_ioceth_disable(struct bnad *bnad)
8b230ed8
RM
2542{
2543 unsigned long flags;
078086f3 2544 int err = 0;
8b230ed8
RM
2545
2546 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2547 init_completion(&bnad->bnad_completions.ioc_comp);
2548 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
8b230ed8
RM
2549 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2550
078086f3
RM
2551 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2552 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2553
2554 err = bnad->bnad_completions.ioc_comp_status;
2555 return err;
8b230ed8
RM
2556}
2557
2558static int
078086f3 2559bnad_ioceth_enable(struct bnad *bnad)
8b230ed8
RM
2560{
2561 int err = 0;
2562 unsigned long flags;
2563
8b230ed8 2564 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2565 init_completion(&bnad->bnad_completions.ioc_comp);
2566 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2567 bna_ioceth_enable(&bnad->bna.ioceth);
8b230ed8
RM
2568 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2569
078086f3
RM
2570 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2571 msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
8b230ed8 2572
078086f3 2573 err = bnad->bnad_completions.ioc_comp_status;
8b230ed8
RM
2574
2575 return err;
2576}
2577
2578/* Free BNA resources */
2579static void
078086f3
RM
2580bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2581 u32 res_val_max)
8b230ed8
RM
2582{
2583 int i;
8b230ed8 2584
078086f3
RM
2585 for (i = 0; i < res_val_max; i++)
2586 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2587}
2588
2589/* Allocates memory and interrupt resources for BNA */
2590static int
078086f3
RM
2591bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2592 u32 res_val_max)
8b230ed8
RM
2593{
2594 int i, err;
8b230ed8 2595
078086f3
RM
2596 for (i = 0; i < res_val_max; i++) {
2597 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
8b230ed8
RM
2598 if (err)
2599 goto err_return;
2600 }
2601 return 0;
2602
2603err_return:
078086f3 2604 bnad_res_free(bnad, res_info, res_val_max);
8b230ed8
RM
2605 return err;
2606}
2607
2608/* Interrupt enable / disable */
2609static void
2610bnad_enable_msix(struct bnad *bnad)
2611{
2612 int i, ret;
8b230ed8
RM
2613 unsigned long flags;
2614
2615 spin_lock_irqsave(&bnad->bna_lock, flags);
2616 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2617 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2618 return;
2619 }
2620 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2621
2622 if (bnad->msix_table)
2623 return;
2624
8b230ed8 2625 bnad->msix_table =
b7ee31c5 2626 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
8b230ed8
RM
2627
2628 if (!bnad->msix_table)
2629 goto intx_mode;
2630
b7ee31c5 2631 for (i = 0; i < bnad->msix_num; i++)
8b230ed8
RM
2632 bnad->msix_table[i].entry = i;
2633
b7ee31c5 2634 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
8b230ed8
RM
2635 if (ret > 0) {
2636 /* Not enough MSI-X vectors. */
19dbff9f
RM
2637 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2638 ret, bnad->msix_num);
8b230ed8
RM
2639
2640 spin_lock_irqsave(&bnad->bna_lock, flags);
2641 /* ret = #of vectors that we got */
271e8b79
RM
2642 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2643 (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
8b230ed8
RM
2644 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2645
271e8b79 2646 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
8b230ed8 2647 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8 2648
078086f3
RM
2649 if (bnad->msix_num > ret)
2650 goto intx_mode;
2651
8b230ed8
RM
2652 /* Try once more with adjusted numbers */
2653 /* If this fails, fall back to INTx */
2654 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
b7ee31c5 2655 bnad->msix_num);
8b230ed8
RM
2656 if (ret)
2657 goto intx_mode;
2658
2659 } else if (ret < 0)
2660 goto intx_mode;
078086f3
RM
2661
2662 pci_intx(bnad->pcidev, 0);
2663
8b230ed8
RM
2664 return;
2665
2666intx_mode:
19dbff9f 2667 pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n");
8b230ed8
RM
2668
2669 kfree(bnad->msix_table);
2670 bnad->msix_table = NULL;
2671 bnad->msix_num = 0;
8b230ed8
RM
2672 spin_lock_irqsave(&bnad->bna_lock, flags);
2673 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2674 bnad_q_num_init(bnad);
2675 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2676}
2677
2678static void
2679bnad_disable_msix(struct bnad *bnad)
2680{
2681 u32 cfg_flags;
2682 unsigned long flags;
2683
2684 spin_lock_irqsave(&bnad->bna_lock, flags);
2685 cfg_flags = bnad->cfg_flags;
2686 if (bnad->cfg_flags & BNAD_CF_MSIX)
2687 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2688 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2689
2690 if (cfg_flags & BNAD_CF_MSIX) {
2691 pci_disable_msix(bnad->pcidev);
2692 kfree(bnad->msix_table);
2693 bnad->msix_table = NULL;
2694 }
2695}
2696
2697/* Netdev entry points */
2698static int
2699bnad_open(struct net_device *netdev)
2700{
2701 int err;
2702 struct bnad *bnad = netdev_priv(netdev);
2703 struct bna_pause_config pause_config;
8b230ed8
RM
2704 unsigned long flags;
2705
2706 mutex_lock(&bnad->conf_mutex);
2707
2708 /* Tx */
2709 err = bnad_setup_tx(bnad, 0);
2710 if (err)
2711 goto err_return;
2712
2713 /* Rx */
2714 err = bnad_setup_rx(bnad, 0);
2715 if (err)
2716 goto cleanup_tx;
2717
2718 /* Port */
2719 pause_config.tx_pause = 0;
2720 pause_config.rx_pause = 0;
2721
8b230ed8 2722 spin_lock_irqsave(&bnad->bna_lock, flags);
e29aa339
RM
2723 bna_enet_mtu_set(&bnad->bna.enet,
2724 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
078086f3
RM
2725 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2726 bna_enet_enable(&bnad->bna.enet);
8b230ed8
RM
2727 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2728
2729 /* Enable broadcast */
2730 bnad_enable_default_bcast(bnad);
2731
aad75b66
RM
2732 /* Restore VLANs, if any */
2733 bnad_restore_vlans(bnad, 0);
2734
8b230ed8
RM
2735 /* Set the UCAST address */
2736 spin_lock_irqsave(&bnad->bna_lock, flags);
2737 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2739
2740 /* Start the stats timer */
2741 bnad_stats_timer_start(bnad);
2742
2743 mutex_unlock(&bnad->conf_mutex);
2744
2745 return 0;
2746
2747cleanup_tx:
b3cc6e88 2748 bnad_destroy_tx(bnad, 0);
8b230ed8
RM
2749
2750err_return:
2751 mutex_unlock(&bnad->conf_mutex);
2752 return err;
2753}
2754
2755static int
2756bnad_stop(struct net_device *netdev)
2757{
2758 struct bnad *bnad = netdev_priv(netdev);
2759 unsigned long flags;
2760
2761 mutex_lock(&bnad->conf_mutex);
2762
2763 /* Stop the stats timer */
2764 bnad_stats_timer_stop(bnad);
2765
078086f3 2766 init_completion(&bnad->bnad_completions.enet_comp);
8b230ed8
RM
2767
2768 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
2769 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2770 bnad_cb_enet_disabled);
8b230ed8
RM
2771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2772
078086f3 2773 wait_for_completion(&bnad->bnad_completions.enet_comp);
8b230ed8 2774
b3cc6e88
JH
2775 bnad_destroy_tx(bnad, 0);
2776 bnad_destroy_rx(bnad, 0);
8b230ed8
RM
2777
2778 /* Synchronize mailbox IRQ */
2779 bnad_mbox_irq_sync(bnad);
2780
2781 mutex_unlock(&bnad->conf_mutex);
2782
2783 return 0;
2784}
2785
2786/* TX */
5216562a
RM
2787/* Returns 0 for success */
2788static int
2789bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2790 struct sk_buff *skb, struct bna_txq_entry *txqent)
8b230ed8 2791{
5216562a
RM
2792 u16 flags = 0;
2793 u32 gso_size;
2794 u16 vlan_tag = 0;
8b230ed8 2795
eab6d18d 2796 if (vlan_tx_tag_present(skb)) {
5216562a 2797 vlan_tag = (u16)vlan_tx_tag_get(skb);
8b230ed8
RM
2798 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2799 }
2800 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
5216562a
RM
2801 vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2802 | (vlan_tag & 0x1fff);
8b230ed8
RM
2803 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2804 }
8b230ed8
RM
2805 txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2806
2807 if (skb_is_gso(skb)) {
271e8b79 2808 gso_size = skb_shinfo(skb)->gso_size;
5216562a 2809 if (unlikely(gso_size > bnad->netdev->mtu)) {
271e8b79 2810 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
5216562a 2811 return -EINVAL;
271e8b79
RM
2812 }
2813 if (unlikely((gso_size + skb_transport_offset(skb) +
5216562a 2814 tcp_hdrlen(skb)) >= skb->len)) {
271e8b79
RM
2815 txqent->hdr.wi.opcode =
2816 __constant_htons(BNA_TXQ_WI_SEND);
2817 txqent->hdr.wi.lso_mss = 0;
2818 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2819 } else {
2820 txqent->hdr.wi.opcode =
2821 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2822 txqent->hdr.wi.lso_mss = htons(gso_size);
2823 }
2824
5216562a 2825 if (bnad_tso_prepare(bnad, skb)) {
271e8b79 2826 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
5216562a 2827 return -EINVAL;
8b230ed8 2828 }
5216562a 2829
8b230ed8
RM
2830 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2831 txqent->hdr.wi.l4_hdr_size_n_offset =
5216562a
RM
2832 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2833 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2834 } else {
271e8b79 2835 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND);
8b230ed8
RM
2836 txqent->hdr.wi.lso_mss = 0;
2837
5216562a 2838 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
271e8b79 2839 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
5216562a 2840 return -EINVAL;
8b230ed8 2841 }
8b230ed8 2842
271e8b79
RM
2843 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2844 u8 proto = 0;
8b230ed8 2845
271e8b79
RM
2846 if (skb->protocol == __constant_htons(ETH_P_IP))
2847 proto = ip_hdr(skb)->protocol;
5216562a 2848#ifdef NETIF_F_IPV6_CSUM
271e8b79
RM
2849 else if (skb->protocol ==
2850 __constant_htons(ETH_P_IPV6)) {
2851 /* nexthdr may not be TCP immediately. */
2852 proto = ipv6_hdr(skb)->nexthdr;
2853 }
5216562a 2854#endif
271e8b79
RM
2855 if (proto == IPPROTO_TCP) {
2856 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2857 txqent->hdr.wi.l4_hdr_size_n_offset =
2858 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2859 (0, skb_transport_offset(skb)));
2860
2861 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2862
2863 if (unlikely(skb_headlen(skb) <
5216562a
RM
2864 skb_transport_offset(skb) +
2865 tcp_hdrlen(skb))) {
271e8b79 2866 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
5216562a 2867 return -EINVAL;
271e8b79 2868 }
271e8b79
RM
2869 } else if (proto == IPPROTO_UDP) {
2870 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2871 txqent->hdr.wi.l4_hdr_size_n_offset =
2872 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2873 (0, skb_transport_offset(skb)));
2874
2875 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2876 if (unlikely(skb_headlen(skb) <
5216562a 2877 skb_transport_offset(skb) +
271e8b79 2878 sizeof(struct udphdr))) {
271e8b79 2879 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
5216562a 2880 return -EINVAL;
271e8b79
RM
2881 }
2882 } else {
5216562a 2883
271e8b79 2884 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
5216562a 2885 return -EINVAL;
8b230ed8 2886 }
5216562a 2887 } else
271e8b79 2888 txqent->hdr.wi.l4_hdr_size_n_offset = 0;
8b230ed8
RM
2889 }
2890
2891 txqent->hdr.wi.flags = htons(flags);
8b230ed8
RM
2892 txqent->hdr.wi.frame_length = htonl(skb->len);
2893
5216562a
RM
2894 return 0;
2895}
2896
2897/*
2898 * bnad_start_xmit : Netdev entry point for Transmit
2899 * Called under lock held by net_device
2900 */
2901static netdev_tx_t
2902bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2903{
2904 struct bnad *bnad = netdev_priv(netdev);
2905 u32 txq_id = 0;
2906 struct bna_tcb *tcb = NULL;
2907 struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2908 u32 prod, q_depth, vect_id;
2909 u32 wis, vectors, len;
2910 int i;
2911 dma_addr_t dma_addr;
2912 struct bna_txq_entry *txqent;
2913
271e8b79 2914 len = skb_headlen(skb);
8b230ed8 2915
5216562a
RM
2916 /* Sanity checks for the skb */
2917
2918 if (unlikely(skb->len <= ETH_HLEN)) {
2919 dev_kfree_skb(skb);
2920 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2921 return NETDEV_TX_OK;
2922 }
2923 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2924 dev_kfree_skb(skb);
2925 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2926 return NETDEV_TX_OK;
2927 }
2928 if (unlikely(len == 0)) {
2929 dev_kfree_skb(skb);
2930 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2931 return NETDEV_TX_OK;
2932 }
2933
2934 tcb = bnad->tx_info[0].tcb[txq_id];
2935 q_depth = tcb->q_depth;
2936 prod = tcb->producer_index;
8b230ed8 2937
5216562a 2938 unmap_q = tcb->unmap_q;
271e8b79 2939
5216562a
RM
2940 /*
2941 * Takes care of the Tx that is scheduled between clearing the flag
2942 * and the netif_tx_stop_all_queues() call.
2943 */
2944 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2945 dev_kfree_skb(skb);
2946 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2947 return NETDEV_TX_OK;
2948 }
2949
2950 vectors = 1 + skb_shinfo(skb)->nr_frags;
2951 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2952
2953 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2954 dev_kfree_skb(skb);
2955 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2956 return NETDEV_TX_OK;
2957 }
2958
2959 /* Check for available TxQ resources */
2960 if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2961 if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2962 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2963 u32 sent;
2964 sent = bnad_txcmpl_process(bnad, tcb);
2965 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2966 bna_ib_ack(tcb->i_dbell, sent);
2967 smp_mb__before_clear_bit();
2968 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2969 } else {
2970 netif_stop_queue(netdev);
2971 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2972 }
2973
2974 smp_mb();
2975 /*
2976 * Check again to deal with race condition between
2977 * netif_stop_queue here, and netif_wake_queue in
2978 * interrupt handler which is not inside netif tx lock.
2979 */
2980 if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2981 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2982 return NETDEV_TX_BUSY;
2983 } else {
2984 netif_wake_queue(netdev);
2985 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2986 }
2987 }
2988
2989 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2990 head_unmap = &unmap_q[prod];
2991
2992 /* Program the opcode, flags, frame_len, num_vectors in WI */
2993 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
2994 dev_kfree_skb(skb);
2995 return NETDEV_TX_OK;
2996 }
2997 txqent->hdr.wi.reserved = 0;
2998 txqent->hdr.wi.num_vectors = vectors;
2999
3000 head_unmap->skb = skb;
3001 head_unmap->nvecs = 0;
3002
3003 /* Program the vectors */
3004 unmap = head_unmap;
3005 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3006 len, DMA_TO_DEVICE);
3007 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3008 txqent->vector[0].length = htons(len);
3009 dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3010 head_unmap->nvecs++;
3011
3012 for (i = 0, vect_id = 0; i < vectors - 1; i++) {
9e903e08
ED
3013 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
3014 u16 size = skb_frag_size(frag);
8b230ed8 3015
271e8b79 3016 if (unlikely(size == 0)) {
5216562a
RM
3017 /* Undo the changes starting at tcb->producer_index */
3018 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3019 tcb->producer_index);
271e8b79
RM
3020 dev_kfree_skb(skb);
3021 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3022 return NETDEV_TX_OK;
3023 }
3024
3025 len += size;
3026
5216562a
RM
3027 vect_id++;
3028 if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
8b230ed8 3029 vect_id = 0;
5216562a
RM
3030 BNA_QE_INDX_INC(prod, q_depth);
3031 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
271e8b79
RM
3032 txqent->hdr.wi_ext.opcode =
3033 __constant_htons(BNA_TXQ_WI_EXTENSION);
5216562a 3034 unmap = &unmap_q[prod];
8b230ed8
RM
3035 }
3036
4d5b1a67
IC
3037 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3038 0, size, DMA_TO_DEVICE);
8b230ed8 3039 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
5216562a
RM
3040 txqent->vector[vect_id].length = htons(size);
3041 dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3042 dma_addr);
3043 head_unmap->nvecs++;
8b230ed8
RM
3044 }
3045
271e8b79 3046 if (unlikely(len != skb->len)) {
5216562a
RM
3047 /* Undo the changes starting at tcb->producer_index */
3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
271e8b79
RM
3049 dev_kfree_skb(skb);
3050 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3051 return NETDEV_TX_OK;
3052 }
3053
5216562a
RM
3054 BNA_QE_INDX_INC(prod, q_depth);
3055 tcb->producer_index = prod;
8b230ed8
RM
3056
3057 smp_mb();
be7fa326
RM
3058
3059 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3060 return NETDEV_TX_OK;
3061
fee1253e
RM
3062 skb_tx_timestamp(skb);
3063
8b230ed8 3064 bna_txq_prod_indx_doorbell(tcb);
271e8b79 3065 smp_mb();
8b230ed8 3066
8b230ed8
RM
3067 return NETDEV_TX_OK;
3068}
3069
3070/*
3071 * Used spin_lock to synchronize reading of stats structures, which
3072 * is written by BNA under the same lock.
3073 */
250e061e
ED
3074static struct rtnl_link_stats64 *
3075bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
8b230ed8
RM
3076{
3077 struct bnad *bnad = netdev_priv(netdev);
3078 unsigned long flags;
3079
3080 spin_lock_irqsave(&bnad->bna_lock, flags);
3081
250e061e
ED
3082 bnad_netdev_qstats_fill(bnad, stats);
3083 bnad_netdev_hwstats_fill(bnad, stats);
8b230ed8
RM
3084
3085 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3086
250e061e 3087 return stats;
8b230ed8
RM
3088}
3089
fe1624cf
RM
3090static void
3091bnad_set_rx_ucast_fltr(struct bnad *bnad)
3092{
3093 struct net_device *netdev = bnad->netdev;
3094 int uc_count = netdev_uc_count(netdev);
3095 enum bna_cb_status ret;
3096 u8 *mac_list;
3097 struct netdev_hw_addr *ha;
3098 int entry;
3099
3100 if (netdev_uc_empty(bnad->netdev)) {
3101 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3102 return;
3103 }
3104
3105 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3106 goto mode_default;
3107
3108 mac_list = kzalloc(uc_count * ETH_ALEN, GFP_ATOMIC);
3109 if (mac_list == NULL)
3110 goto mode_default;
3111
3112 entry = 0;
3113 netdev_for_each_uc_addr(ha, netdev) {
3114 memcpy(&mac_list[entry * ETH_ALEN],
3115 &ha->addr[0], ETH_ALEN);
3116 entry++;
3117 }
3118
3119 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry,
3120 mac_list, NULL);
3121 kfree(mac_list);
3122
3123 if (ret != BNA_CB_SUCCESS)
3124 goto mode_default;
3125
3126 return;
3127
3128 /* ucast packets not in UCAM are routed to default function */
3129mode_default:
3130 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3131 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL);
3132}
3133
3134static void
3135bnad_set_rx_mcast_fltr(struct bnad *bnad)
3136{
3137 struct net_device *netdev = bnad->netdev;
3138 int mc_count = netdev_mc_count(netdev);
3139 enum bna_cb_status ret;
3140 u8 *mac_list;
3141
3142 if (netdev->flags & IFF_ALLMULTI)
3143 goto mode_allmulti;
3144
3145 if (netdev_mc_empty(netdev))
3146 return;
3147
3148 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3149 goto mode_allmulti;
3150
3151 mac_list = kzalloc((mc_count + 1) * ETH_ALEN, GFP_ATOMIC);
3152
3153 if (mac_list == NULL)
3154 goto mode_allmulti;
3155
3156 memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN);
3157
3158 /* copy rest of the MCAST addresses */
3159 bnad_netdev_mc_list_get(netdev, mac_list);
3160 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1,
3161 mac_list, NULL);
3162 kfree(mac_list);
3163
3164 if (ret != BNA_CB_SUCCESS)
3165 goto mode_allmulti;
3166
3167 return;
3168
3169mode_allmulti:
3170 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3171 bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL);
3172}
3173
a2122d95 3174void
8b230ed8
RM
3175bnad_set_rx_mode(struct net_device *netdev)
3176{
3177 struct bnad *bnad = netdev_priv(netdev);
fe1624cf 3178 enum bna_rxmode new_mode, mode_mask;
8b230ed8
RM
3179 unsigned long flags;
3180
3181 spin_lock_irqsave(&bnad->bna_lock, flags);
3182
fe1624cf
RM
3183 if (bnad->rx_info[0].rx == NULL) {
3184 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3185 return;
8b230ed8
RM
3186 }
3187
fe1624cf
RM
3188 /* clear bnad flags to update it with new settings */
3189 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3190 BNAD_CF_ALLMULTI);
271e8b79 3191
fe1624cf
RM
3192 new_mode = 0;
3193 if (netdev->flags & IFF_PROMISC) {
3194 new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3195 bnad->cfg_flags |= BNAD_CF_PROMISC;
3196 } else {
3197 bnad_set_rx_mcast_fltr(bnad);
8b230ed8 3198
fe1624cf
RM
3199 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3200 new_mode |= BNA_RXMODE_ALLMULTI;
8b230ed8 3201
fe1624cf 3202 bnad_set_rx_ucast_fltr(bnad);
8b230ed8 3203
fe1624cf
RM
3204 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3205 new_mode |= BNA_RXMODE_DEFAULT;
3206 }
8b230ed8 3207
fe1624cf
RM
3208 mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3209 BNA_RXMODE_ALLMULTI;
3210 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
8b230ed8 3211
fe1624cf
RM
3212 if (bnad->cfg_flags & BNAD_CF_PROMISC)
3213 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3214 else
3215 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
8b230ed8 3216
8b230ed8
RM
3217 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3218}
3219
3220/*
3221 * bna_lock is used to sync writes to netdev->addr
3222 * conf_lock cannot be used since this call may be made
3223 * in a non-blocking context.
3224 */
3225static int
3226bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3227{
3228 int err;
3229 struct bnad *bnad = netdev_priv(netdev);
3230 struct sockaddr *sa = (struct sockaddr *)mac_addr;
3231 unsigned long flags;
3232
3233 spin_lock_irqsave(&bnad->bna_lock, flags);
3234
3235 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3236
3237 if (!err)
3238 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len);
3239
3240 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3241
3242 return err;
3243}
3244
3245static int
e29aa339 3246bnad_mtu_set(struct bnad *bnad, int frame_size)
8b230ed8 3247{
8b230ed8
RM
3248 unsigned long flags;
3249
078086f3
RM
3250 init_completion(&bnad->bnad_completions.mtu_comp);
3251
3252 spin_lock_irqsave(&bnad->bna_lock, flags);
e29aa339 3253 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
078086f3
RM
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3255
3256 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3257
3258 return bnad->bnad_completions.mtu_comp_status;
3259}
3260
3261static int
3262bnad_change_mtu(struct net_device *netdev, int new_mtu)
3263{
e29aa339 3264 int err, mtu;
8b230ed8 3265 struct bnad *bnad = netdev_priv(netdev);
e29aa339 3266 u32 rx_count = 0, frame, new_frame;
8b230ed8
RM
3267
3268 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3269 return -EINVAL;
3270
3271 mutex_lock(&bnad->conf_mutex);
3272
e29aa339 3273 mtu = netdev->mtu;
8b230ed8
RM
3274 netdev->mtu = new_mtu;
3275
e29aa339
RM
3276 frame = BNAD_FRAME_SIZE(mtu);
3277 new_frame = BNAD_FRAME_SIZE(new_mtu);
3278
3279 /* check if multi-buffer needs to be enabled */
3280 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3281 netif_running(bnad->netdev)) {
3282 /* only when transition is over 4K */
3283 if ((frame <= 4096 && new_frame > 4096) ||
3284 (frame > 4096 && new_frame <= 4096))
3285 rx_count = bnad_reinit_rx(bnad);
3286 }
3287
3288 /* rx_count > 0 - new rx created
3289 * - Linux set err = 0 and return
3290 */
3291 err = bnad_mtu_set(bnad, new_frame);
078086f3
RM
3292 if (err)
3293 err = -EBUSY;
8b230ed8
RM
3294
3295 mutex_unlock(&bnad->conf_mutex);
3296 return err;
3297}
3298
8e586137 3299static int
80d5c368 3300bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
8b230ed8
RM
3301{
3302 struct bnad *bnad = netdev_priv(netdev);
3303 unsigned long flags;
3304
3305 if (!bnad->rx_info[0].rx)
8e586137 3306 return 0;
8b230ed8
RM
3307
3308 mutex_lock(&bnad->conf_mutex);
3309
3310 spin_lock_irqsave(&bnad->bna_lock, flags);
3311 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
f859d7cb 3312 set_bit(vid, bnad->active_vlans);
8b230ed8
RM
3313 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3314
3315 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3316
3317 return 0;
8b230ed8
RM
3318}
3319
8e586137 3320static int
80d5c368 3321bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
8b230ed8
RM
3322{
3323 struct bnad *bnad = netdev_priv(netdev);
3324 unsigned long flags;
3325
3326 if (!bnad->rx_info[0].rx)
8e586137 3327 return 0;
8b230ed8
RM
3328
3329 mutex_lock(&bnad->conf_mutex);
3330
3331 spin_lock_irqsave(&bnad->bna_lock, flags);
f859d7cb 3332 clear_bit(vid, bnad->active_vlans);
8b230ed8
RM
3333 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3334 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3335
3336 mutex_unlock(&bnad->conf_mutex);
8e586137
JP
3337
3338 return 0;
8b230ed8
RM
3339}
3340
3341#ifdef CONFIG_NET_POLL_CONTROLLER
3342static void
3343bnad_netpoll(struct net_device *netdev)
3344{
3345 struct bnad *bnad = netdev_priv(netdev);
3346 struct bnad_rx_info *rx_info;
3347 struct bnad_rx_ctrl *rx_ctrl;
3348 u32 curr_mask;
3349 int i, j;
3350
3351 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3352 bna_intx_disable(&bnad->bna, curr_mask);
3353 bnad_isr(bnad->pcidev->irq, netdev);
3354 bna_intx_enable(&bnad->bna, curr_mask);
3355 } else {
19dbff9f
RM
3356 /*
3357 * Tx processing may happen in sending context, so no need
3358 * to explicitly process completions here
3359 */
3360
3361 /* Rx processing */
8b230ed8
RM
3362 for (i = 0; i < bnad->num_rx; i++) {
3363 rx_info = &bnad->rx_info[i];
3364 if (!rx_info->rx)
3365 continue;
3366 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3367 rx_ctrl = &rx_info->rx_ctrl[j];
271e8b79 3368 if (rx_ctrl->ccb)
8b230ed8
RM
3369 bnad_netif_rx_schedule_poll(bnad,
3370 rx_ctrl->ccb);
8b230ed8
RM
3371 }
3372 }
3373 }
3374}
3375#endif
3376
3377static const struct net_device_ops bnad_netdev_ops = {
3378 .ndo_open = bnad_open,
3379 .ndo_stop = bnad_stop,
3380 .ndo_start_xmit = bnad_start_xmit,
250e061e 3381 .ndo_get_stats64 = bnad_get_stats64,
8b230ed8 3382 .ndo_set_rx_mode = bnad_set_rx_mode,
8b230ed8
RM
3383 .ndo_validate_addr = eth_validate_addr,
3384 .ndo_set_mac_address = bnad_set_mac_address,
3385 .ndo_change_mtu = bnad_change_mtu,
8b230ed8
RM
3386 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
3387 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
3388#ifdef CONFIG_NET_POLL_CONTROLLER
3389 .ndo_poll_controller = bnad_netpoll
3390#endif
3391};
3392
3393static void
3394bnad_netdev_init(struct bnad *bnad, bool using_dac)
3395{
3396 struct net_device *netdev = bnad->netdev;
3397
e5ee20e7
MM
3398 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3399 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
f646968f 3400 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
8b230ed8 3401
e5ee20e7
MM
3402 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3403 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3404 NETIF_F_TSO | NETIF_F_TSO6;
8b230ed8 3405
e5ee20e7 3406 netdev->features |= netdev->hw_features |
f646968f 3407 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
8b230ed8
RM
3408
3409 if (using_dac)
3410 netdev->features |= NETIF_F_HIGHDMA;
3411
8b230ed8
RM
3412 netdev->mem_start = bnad->mmio_start;
3413 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3414
3415 netdev->netdev_ops = &bnad_netdev_ops;
3416 bnad_set_ethtool_ops(netdev);
3417}
3418
3419/*
3420 * 1. Initialize the bnad structure
3421 * 2. Setup netdev pointer in pci_dev
d95d1081
JH
3422 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3423 * 4. Initialize work queue.
8b230ed8
RM
3424 */
3425static int
3426bnad_init(struct bnad *bnad,
3427 struct pci_dev *pdev, struct net_device *netdev)
3428{
3429 unsigned long flags;
3430
3431 SET_NETDEV_DEV(netdev, &pdev->dev);
3432 pci_set_drvdata(pdev, netdev);
3433
3434 bnad->netdev = netdev;
3435 bnad->pcidev = pdev;
3436 bnad->mmio_start = pci_resource_start(pdev, 0);
3437 bnad->mmio_len = pci_resource_len(pdev, 0);
3438 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
3439 if (!bnad->bar0) {
3440 dev_err(&pdev->dev, "ioremap for bar0 failed\n");
8b230ed8
RM
3441 return -ENOMEM;
3442 }
3443 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
3444 (unsigned long long) bnad->mmio_len);
3445
3446 spin_lock_irqsave(&bnad->bna_lock, flags);
3447 if (!bnad_msix_disable)
3448 bnad->cfg_flags = BNAD_CF_MSIX;
3449
3450 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3451
3452 bnad_q_num_init(bnad);
3453 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3454
3455 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3456 (bnad->num_rx * bnad->num_rxp_per_rx) +
3457 BNAD_MAILBOX_MSIX_VECTORS;
8b230ed8
RM
3458
3459 bnad->txq_depth = BNAD_TXQ_DEPTH;
3460 bnad->rxq_depth = BNAD_RXQ_DEPTH;
8b230ed8
RM
3461
3462 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3463 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3464
01b54b14
JH
3465 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3466 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
ba21fc69
WY
3467 if (!bnad->work_q) {
3468 iounmap(bnad->bar0);
01b54b14 3469 return -ENOMEM;
ba21fc69 3470 }
01b54b14 3471
8b230ed8
RM
3472 return 0;
3473}
3474
3475/*
3476 * Must be called after bnad_pci_uninit()
3477 * so that iounmap() and pci_set_drvdata(NULL)
3478 * happens only after PCI uninitialization.
3479 */
3480static void
3481bnad_uninit(struct bnad *bnad)
3482{
01b54b14
JH
3483 if (bnad->work_q) {
3484 flush_workqueue(bnad->work_q);
3485 destroy_workqueue(bnad->work_q);
3486 bnad->work_q = NULL;
3487 }
3488
8b230ed8
RM
3489 if (bnad->bar0)
3490 iounmap(bnad->bar0);
8b230ed8
RM
3491}
3492
3493/*
3494 * Initialize locks
078086f3 3495 a) Per ioceth mutes used for serializing configuration
8b230ed8
RM
3496 changes from OS interface
3497 b) spin lock used to protect bna state machine
3498 */
3499static void
3500bnad_lock_init(struct bnad *bnad)
3501{
3502 spin_lock_init(&bnad->bna_lock);
3503 mutex_init(&bnad->conf_mutex);
72a9730b 3504 mutex_init(&bnad_list_mutex);
8b230ed8
RM
3505}
3506
3507static void
3508bnad_lock_uninit(struct bnad *bnad)
3509{
3510 mutex_destroy(&bnad->conf_mutex);
72a9730b 3511 mutex_destroy(&bnad_list_mutex);
8b230ed8
RM
3512}
3513
3514/* PCI Initialization */
3515static int
3516bnad_pci_init(struct bnad *bnad,
3517 struct pci_dev *pdev, bool *using_dac)
3518{
3519 int err;
3520
3521 err = pci_enable_device(pdev);
3522 if (err)
3523 return err;
3524 err = pci_request_regions(pdev, BNAD_NAME);
3525 if (err)
3526 goto disable_device;
3e548079 3527 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3db1cd5c 3528 *using_dac = true;
8b230ed8 3529 } else {
3e548079
RK
3530 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3531 if (err)
3532 goto release_regions;
3db1cd5c 3533 *using_dac = false;
8b230ed8
RM
3534 }
3535 pci_set_master(pdev);
3536 return 0;
3537
3538release_regions:
3539 pci_release_regions(pdev);
3540disable_device:
3541 pci_disable_device(pdev);
3542
3543 return err;
3544}
3545
3546static void
3547bnad_pci_uninit(struct pci_dev *pdev)
3548{
3549 pci_release_regions(pdev);
3550 pci_disable_device(pdev);
3551}
3552
c4eef189 3553static int
8b230ed8
RM
3554bnad_pci_probe(struct pci_dev *pdev,
3555 const struct pci_device_id *pcidev_id)
3556{
3caa1e95 3557 bool using_dac;
0120b99c 3558 int err;
8b230ed8
RM
3559 struct bnad *bnad;
3560 struct bna *bna;
3561 struct net_device *netdev;
3562 struct bfa_pcidev pcidev_info;
3563 unsigned long flags;
3564
3565 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n",
3566 pdev, pcidev_id, PCI_FUNC(pdev->devfn));
3567
3568 mutex_lock(&bnad_fwimg_mutex);
3569 if (!cna_get_firmware_buf(pdev)) {
3570 mutex_unlock(&bnad_fwimg_mutex);
3571 pr_warn("Failed to load Firmware Image!\n");
3572 return -ENODEV;
3573 }
3574 mutex_unlock(&bnad_fwimg_mutex);
3575
3576 /*
3577 * Allocates sizeof(struct net_device + struct bnad)
3578 * bnad = netdev->priv
3579 */
3580 netdev = alloc_etherdev(sizeof(struct bnad));
3581 if (!netdev) {
8b230ed8
RM
3582 err = -ENOMEM;
3583 return err;
3584 }
3585 bnad = netdev_priv(netdev);
078086f3 3586 bnad_lock_init(bnad);
72a9730b 3587 bnad_add_to_list(bnad);
078086f3
RM
3588
3589 mutex_lock(&bnad->conf_mutex);
8b230ed8
RM
3590 /*
3591 * PCI initialization
0120b99c 3592 * Output : using_dac = 1 for 64 bit DMA
be7fa326 3593 * = 0 for 32 bit DMA
8b230ed8 3594 */
e905ed57 3595 using_dac = false;
8b230ed8
RM
3596 err = bnad_pci_init(bnad, pdev, &using_dac);
3597 if (err)
44861f44 3598 goto unlock_mutex;
8b230ed8 3599
8b230ed8
RM
3600 /*
3601 * Initialize bnad structure
3602 * Setup relation between pci_dev & netdev
8b230ed8
RM
3603 */
3604 err = bnad_init(bnad, pdev, netdev);
3605 if (err)
3606 goto pci_uninit;
078086f3 3607
8b230ed8
RM
3608 /* Initialize netdev structure, set up ethtool ops */
3609 bnad_netdev_init(bnad, using_dac);
3610
815f41e7
RM
3611 /* Set link to down state */
3612 netif_carrier_off(netdev);
3613
7afc5dbd
KG
3614 /* Setup the debugfs node for this bfad */
3615 if (bna_debugfs_enable)
3616 bnad_debugfs_init(bnad);
3617
8b230ed8 3618 /* Get resource requirement form bna */
078086f3 3619 spin_lock_irqsave(&bnad->bna_lock, flags);
8b230ed8 3620 bna_res_req(&bnad->res_info[0]);
078086f3 3621 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3622
3623 /* Allocate resources from bna */
078086f3 3624 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
8b230ed8 3625 if (err)
078086f3 3626 goto drv_uninit;
8b230ed8
RM
3627
3628 bna = &bnad->bna;
3629
3630 /* Setup pcidev_info for bna_init() */
3631 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3632 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3633 pcidev_info.device_id = bnad->pcidev->device;
3634 pcidev_info.pci_bar_kva = bnad->bar0;
3635
8b230ed8
RM
3636 spin_lock_irqsave(&bnad->bna_lock, flags);
3637 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
8b230ed8
RM
3638 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3639
3640 bnad->stats.bna_stats = &bna->stats;
3641
078086f3
RM
3642 bnad_enable_msix(bnad);
3643 err = bnad_mbox_irq_alloc(bnad);
3644 if (err)
3645 goto res_free;
3646
8b230ed8 3647 /* Set up timers */
078086f3 3648 setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout,
8b230ed8 3649 ((unsigned long)bnad));
078086f3 3650 setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check,
8b230ed8 3651 ((unsigned long)bnad));
078086f3 3652 setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout,
1d32f769 3653 ((unsigned long)bnad));
078086f3 3654 setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
8b230ed8
RM
3655 ((unsigned long)bnad));
3656
3657 /* Now start the timer before calling IOC */
078086f3 3658 mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
8b230ed8
RM
3659 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3660
3661 /*
3662 * Start the chip
078086f3
RM
3663 * If the call back comes with error, we bail out.
3664 * This is a catastrophic error.
8b230ed8 3665 */
078086f3
RM
3666 err = bnad_ioceth_enable(bnad);
3667 if (err) {
3668 pr_err("BNA: Initialization failed err=%d\n",
3669 err);
3670 goto probe_success;
3671 }
3672
3673 spin_lock_irqsave(&bnad->bna_lock, flags);
3674 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3675 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3676 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3677 bna_attr(bna)->num_rxp - 1);
3678 if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3679 bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3680 err = -EIO;
3681 }
3caa1e95
RM
3682 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3683 if (err)
3684 goto disable_ioceth;
3685
3686 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3
RM
3687 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3688 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3689
3690 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
0caa9aae
RM
3691 if (err) {
3692 err = -EIO;
078086f3 3693 goto disable_ioceth;
0caa9aae 3694 }
078086f3
RM
3695
3696 spin_lock_irqsave(&bnad->bna_lock, flags);
3697 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3698 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8
RM
3699
3700 /* Get the burnt-in mac */
3701 spin_lock_irqsave(&bnad->bna_lock, flags);
078086f3 3702 bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr);
8b230ed8
RM
3703 bnad_set_netdev_perm_addr(bnad);
3704 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3705
0caa9aae
RM
3706 mutex_unlock(&bnad->conf_mutex);
3707
8b230ed8
RM
3708 /* Finally, reguister with net_device layer */
3709 err = register_netdev(netdev);
3710 if (err) {
3711 pr_err("BNA : Registering with netdev failed\n");
078086f3 3712 goto probe_uninit;
8b230ed8 3713 }
078086f3 3714 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
8b230ed8 3715
0caa9aae
RM
3716 return 0;
3717
078086f3
RM
3718probe_success:
3719 mutex_unlock(&bnad->conf_mutex);
8b230ed8
RM
3720 return 0;
3721
078086f3 3722probe_uninit:
3fc72370 3723 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3724 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3725disable_ioceth:
3726 bnad_ioceth_disable(bnad);
3727 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3728 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3729 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3730 spin_lock_irqsave(&bnad->bna_lock, flags);
3731 bna_uninit(bna);
3732 spin_unlock_irqrestore(&bnad->bna_lock, flags);
078086f3 3733 bnad_mbox_irq_free(bnad);
8b230ed8 3734 bnad_disable_msix(bnad);
078086f3
RM
3735res_free:
3736 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3737drv_uninit:
7afc5dbd
KG
3738 /* Remove the debugfs node for this bnad */
3739 kfree(bnad->regdata);
3740 bnad_debugfs_uninit(bnad);
078086f3 3741 bnad_uninit(bnad);
8b230ed8
RM
3742pci_uninit:
3743 bnad_pci_uninit(pdev);
44861f44 3744unlock_mutex:
078086f3 3745 mutex_unlock(&bnad->conf_mutex);
72a9730b 3746 bnad_remove_from_list(bnad);
8b230ed8 3747 bnad_lock_uninit(bnad);
8b230ed8
RM
3748 free_netdev(netdev);
3749 return err;
3750}
3751
c4eef189 3752static void
8b230ed8
RM
3753bnad_pci_remove(struct pci_dev *pdev)
3754{
3755 struct net_device *netdev = pci_get_drvdata(pdev);
3756 struct bnad *bnad;
3757 struct bna *bna;
3758 unsigned long flags;
3759
3760 if (!netdev)
3761 return;
3762
3763 pr_info("%s bnad_pci_remove\n", netdev->name);
3764 bnad = netdev_priv(netdev);
3765 bna = &bnad->bna;
3766
078086f3
RM
3767 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3768 unregister_netdev(netdev);
8b230ed8
RM
3769
3770 mutex_lock(&bnad->conf_mutex);
078086f3
RM
3771 bnad_ioceth_disable(bnad);
3772 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3773 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3774 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
8b230ed8
RM
3775 spin_lock_irqsave(&bnad->bna_lock, flags);
3776 bna_uninit(bna);
3777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
8b230ed8 3778
078086f3
RM
3779 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3780 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3781 bnad_mbox_irq_free(bnad);
8b230ed8
RM
3782 bnad_disable_msix(bnad);
3783 bnad_pci_uninit(pdev);
078086f3 3784 mutex_unlock(&bnad->conf_mutex);
72a9730b 3785 bnad_remove_from_list(bnad);
8b230ed8 3786 bnad_lock_uninit(bnad);
7afc5dbd
KG
3787 /* Remove the debugfs node for this bnad */
3788 kfree(bnad->regdata);
3789 bnad_debugfs_uninit(bnad);
8b230ed8
RM
3790 bnad_uninit(bnad);
3791 free_netdev(netdev);
3792}
3793
0120b99c 3794static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
8b230ed8
RM
3795 {
3796 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3797 PCI_DEVICE_ID_BROCADE_CT),
3798 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3799 .class_mask = 0xffff00
586b2816
RM
3800 },
3801 {
3802 PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3803 BFA_PCI_DEVICE_ID_CT2),
3804 .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3805 .class_mask = 0xffff00
3806 },
3807 {0, },
8b230ed8
RM
3808};
3809
3810MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3811
3812static struct pci_driver bnad_pci_driver = {
3813 .name = BNAD_NAME,
3814 .id_table = bnad_pci_id_table,
3815 .probe = bnad_pci_probe,
c4eef189 3816 .remove = bnad_pci_remove,
8b230ed8
RM
3817};
3818
3819static int __init
3820bnad_module_init(void)
3821{
3822 int err;
3823
5aad0011
RM
3824 pr_info("Brocade 10G Ethernet driver - version: %s\n",
3825 BNAD_VERSION);
8b230ed8 3826
8a891429 3827 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
8b230ed8
RM
3828
3829 err = pci_register_driver(&bnad_pci_driver);
3830 if (err < 0) {
3831 pr_err("bna : PCI registration failed in module init "
3832 "(%d)\n", err);
3833 return err;
3834 }
3835
3836 return 0;
3837}
3838
3839static void __exit
3840bnad_module_exit(void)
3841{
3842 pci_unregister_driver(&bnad_pci_driver);
294ca868 3843 release_firmware(bfi_fw);
8b230ed8
RM
3844}
3845
3846module_init(bnad_module_init);
3847module_exit(bnad_module_exit);
3848
3849MODULE_AUTHOR("Brocade");
3850MODULE_LICENSE("GPL");
3851MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver");
3852MODULE_VERSION(BNAD_VERSION);
3853MODULE_FIRMWARE(CNA_FW_FILE_CT);
1bf9fd70 3854MODULE_FIRMWARE(CNA_FW_FILE_CT2);