2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
22 #include <asm/div64.h>
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120 return (adapter->function_mode & FLEX10_MODE ||
121 adapter->function_mode & VNIC_MODE ||
122 adapter->function_mode & UMC_ENABLED);
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 struct be_dma_mem *mem = &q->dma_mem;
129 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134 u16 len, u16 entry_size)
136 struct be_dma_mem *mem = &q->dma_mem;
138 memset(q, 0, sizeof(*q));
140 q->entry_size = entry_size;
141 mem->size = len * entry_size;
142 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146 memset(mem->va, 0, mem->size);
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
154 if (adapter->eeh_err)
157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161 if (!enabled && enable)
162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163 else if (enabled && !enable)
164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168 pci_write_config_dword(adapter->pdev,
169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 val |= qid & DB_RQ_RING_ID_MASK;
176 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
179 iowrite32(val, adapter->db + DB_RQ_OFFSET);
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
185 val |= qid & DB_TXULP_RING_ID_MASK;
186 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
189 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193 bool arm, bool clear_int, u16 num_popped)
196 val |= qid & DB_EQ_RING_ID_MASK;
197 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198 DB_EQ_RING_ID_EXT_MASK_SHIFT);
200 if (adapter->eeh_err)
204 val |= 1 << DB_EQ_REARM_SHIFT;
206 val |= 1 << DB_EQ_CLR_SHIFT;
207 val |= 1 << DB_EQ_EVNT_SHIFT;
208 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209 iowrite32(val, adapter->db + DB_EQ_OFFSET);
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
215 val |= qid & DB_CQ_RING_ID_MASK;
216 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217 DB_CQ_RING_ID_EXT_MASK_SHIFT);
219 if (adapter->eeh_err)
223 val |= 1 << DB_CQ_REARM_SHIFT;
224 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225 iowrite32(val, adapter->db + DB_CQ_OFFSET);
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
230 struct be_adapter *adapter = netdev_priv(netdev);
231 struct sockaddr *addr = p;
233 u8 current_mac[ETH_ALEN];
234 u32 pmac_id = adapter->pmac_id;
236 if (!is_valid_ether_addr(addr->sa_data))
237 return -EADDRNOTAVAIL;
239 status = be_cmd_mac_addr_query(adapter, current_mac,
240 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
244 if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
245 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
246 adapter->if_handle, &adapter->pmac_id, 0);
250 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
255 dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
259 static void populate_be2_stats(struct be_adapter *adapter)
261 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
262 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
263 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
264 struct be_port_rxf_stats_v0 *port_stats =
265 &rxf_stats->port[adapter->port_num];
266 struct be_drv_stats *drvs = &adapter->drv_stats;
268 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
269 drvs->rx_pause_frames = port_stats->rx_pause_frames;
270 drvs->rx_crc_errors = port_stats->rx_crc_errors;
271 drvs->rx_control_frames = port_stats->rx_control_frames;
272 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
273 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
274 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
275 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
276 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
277 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
278 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
279 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
280 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
281 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
282 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
283 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
284 drvs->rx_dropped_header_too_small =
285 port_stats->rx_dropped_header_too_small;
286 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
287 drvs->rx_alignment_symbol_errors =
288 port_stats->rx_alignment_symbol_errors;
290 drvs->tx_pauseframes = port_stats->tx_pauseframes;
291 drvs->tx_controlframes = port_stats->tx_controlframes;
293 if (adapter->port_num)
294 drvs->jabber_events = rxf_stats->port1_jabber_events;
296 drvs->jabber_events = rxf_stats->port0_jabber_events;
297 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
298 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
299 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
300 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
301 drvs->forwarded_packets = rxf_stats->forwarded_packets;
302 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
303 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
304 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
305 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
308 static void populate_be3_stats(struct be_adapter *adapter)
310 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
312 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
313 struct be_port_rxf_stats_v1 *port_stats =
314 &rxf_stats->port[adapter->port_num];
315 struct be_drv_stats *drvs = &adapter->drv_stats;
317 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
318 drvs->rx_pause_frames = port_stats->rx_pause_frames;
319 drvs->rx_crc_errors = port_stats->rx_crc_errors;
320 drvs->rx_control_frames = port_stats->rx_control_frames;
321 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
322 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
323 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
324 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
325 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
326 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
327 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
328 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
329 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
330 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
331 drvs->rx_dropped_header_too_small =
332 port_stats->rx_dropped_header_too_small;
333 drvs->rx_input_fifo_overflow_drop =
334 port_stats->rx_input_fifo_overflow_drop;
335 drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
338 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
339 drvs->tx_pauseframes = port_stats->tx_pauseframes;
340 drvs->tx_controlframes = port_stats->tx_controlframes;
341 drvs->jabber_events = port_stats->jabber_events;
342 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
343 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
344 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
345 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
346 drvs->forwarded_packets = rxf_stats->forwarded_packets;
347 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
348 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
350 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
353 static void populate_lancer_stats(struct be_adapter *adapter)
356 struct be_drv_stats *drvs = &adapter->drv_stats;
357 struct lancer_pport_stats *pport_stats =
358 pport_stats_from_cmd(adapter);
360 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
361 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
362 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
363 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
364 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
365 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
366 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
367 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
368 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
369 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
370 drvs->rx_dropped_tcp_length =
371 pport_stats->rx_dropped_invalid_tcp_length;
372 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
373 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
374 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
375 drvs->rx_dropped_header_too_small =
376 pport_stats->rx_dropped_header_too_small;
377 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
378 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
379 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
380 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
382 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
383 drvs->jabber_events = pport_stats->rx_jabbers;
384 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
385 drvs->forwarded_packets = pport_stats->num_forwards_lo;
386 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
387 drvs->rx_drops_too_many_frags =
388 pport_stats->rx_drops_too_many_frags_lo;
391 static void accumulate_16bit_val(u32 *acc, u16 val)
393 #define lo(x) (x & 0xFFFF)
394 #define hi(x) (x & 0xFFFF0000)
395 bool wrapped = val < lo(*acc);
396 u32 newacc = hi(*acc) + val;
400 ACCESS_ONCE(*acc) = newacc;
403 void be_parse_stats(struct be_adapter *adapter)
405 struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
406 struct be_rx_obj *rxo;
409 if (adapter->generation == BE_GEN3) {
410 if (lancer_chip(adapter))
411 populate_lancer_stats(adapter);
413 populate_be3_stats(adapter);
415 populate_be2_stats(adapter);
418 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
419 for_all_rx_queues(adapter, rxo, i) {
420 /* below erx HW counter can actually wrap around after
421 * 65535. Driver accumulates a 32-bit value
423 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
424 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
428 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
429 struct rtnl_link_stats64 *stats)
431 struct be_adapter *adapter = netdev_priv(netdev);
432 struct be_drv_stats *drvs = &adapter->drv_stats;
433 struct be_rx_obj *rxo;
434 struct be_tx_obj *txo;
439 for_all_rx_queues(adapter, rxo, i) {
440 const struct be_rx_stats *rx_stats = rx_stats(rxo);
442 start = u64_stats_fetch_begin_bh(&rx_stats->sync);
443 pkts = rx_stats(rxo)->rx_pkts;
444 bytes = rx_stats(rxo)->rx_bytes;
445 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
446 stats->rx_packets += pkts;
447 stats->rx_bytes += bytes;
448 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
449 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
450 rx_stats(rxo)->rx_drops_no_frags;
453 for_all_tx_queues(adapter, txo, i) {
454 const struct be_tx_stats *tx_stats = tx_stats(txo);
456 start = u64_stats_fetch_begin_bh(&tx_stats->sync);
457 pkts = tx_stats(txo)->tx_pkts;
458 bytes = tx_stats(txo)->tx_bytes;
459 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
460 stats->tx_packets += pkts;
461 stats->tx_bytes += bytes;
464 /* bad pkts received */
465 stats->rx_errors = drvs->rx_crc_errors +
466 drvs->rx_alignment_symbol_errors +
467 drvs->rx_in_range_errors +
468 drvs->rx_out_range_errors +
469 drvs->rx_frame_too_long +
470 drvs->rx_dropped_too_small +
471 drvs->rx_dropped_too_short +
472 drvs->rx_dropped_header_too_small +
473 drvs->rx_dropped_tcp_length +
474 drvs->rx_dropped_runt;
476 /* detailed rx errors */
477 stats->rx_length_errors = drvs->rx_in_range_errors +
478 drvs->rx_out_range_errors +
479 drvs->rx_frame_too_long;
481 stats->rx_crc_errors = drvs->rx_crc_errors;
483 /* frame alignment errors */
484 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
486 /* receiver fifo overrun */
487 /* drops_no_pbuf is no per i/f, it's per BE card */
488 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
489 drvs->rx_input_fifo_overflow_drop +
490 drvs->rx_drops_no_pbuf;
494 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
496 struct net_device *netdev = adapter->netdev;
498 /* when link status changes, link speed must be re-queried from card */
499 adapter->link_speed = -1;
500 if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
501 netif_carrier_on(netdev);
502 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
504 netif_carrier_off(netdev);
505 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
509 static void be_tx_stats_update(struct be_tx_obj *txo,
510 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
512 struct be_tx_stats *stats = tx_stats(txo);
514 u64_stats_update_begin(&stats->sync);
516 stats->tx_wrbs += wrb_cnt;
517 stats->tx_bytes += copied;
518 stats->tx_pkts += (gso_segs ? gso_segs : 1);
521 u64_stats_update_end(&stats->sync);
524 /* Determine number of WRB entries needed to xmit data in an skb */
525 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
528 int cnt = (skb->len > skb->data_len);
530 cnt += skb_shinfo(skb)->nr_frags;
532 /* to account for hdr wrb */
534 if (lancer_chip(adapter) || !(cnt & 1)) {
537 /* add a dummy to make it an even num */
541 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
547 wrb->frag_pa_hi = upper_32_bits(addr);
548 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
549 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
552 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
553 struct sk_buff *skb, u32 wrb_cnt, u32 len)
558 memset(hdr, 0, sizeof(*hdr));
560 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
562 if (skb_is_gso(skb)) {
563 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
564 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
565 hdr, skb_shinfo(skb)->gso_size);
566 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
567 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
568 if (lancer_chip(adapter) && adapter->sli_family ==
569 LANCER_A0_SLI_FAMILY) {
570 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
572 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
574 else if (is_udp_pkt(skb))
575 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
578 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
580 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
581 else if (is_udp_pkt(skb))
582 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
585 if (vlan_tx_tag_present(skb)) {
586 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
587 vlan_tag = vlan_tx_tag_get(skb);
588 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
589 /* If vlan priority provided by OS is NOT in available bmap */
590 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
591 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
592 adapter->recommended_prio;
593 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
596 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
597 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
598 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
599 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
602 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
607 be_dws_le_to_cpu(wrb, sizeof(*wrb));
609 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
612 dma_unmap_single(dev, dma, wrb->frag_len,
615 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
619 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
620 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
624 struct device *dev = &adapter->pdev->dev;
625 struct sk_buff *first_skb = skb;
626 struct be_eth_wrb *wrb;
627 struct be_eth_hdr_wrb *hdr;
628 bool map_single = false;
631 hdr = queue_head_node(txq);
633 map_head = txq->head;
635 if (skb->len > skb->data_len) {
636 int len = skb_headlen(skb);
637 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
638 if (dma_mapping_error(dev, busaddr))
641 wrb = queue_head_node(txq);
642 wrb_fill(wrb, busaddr, len);
643 be_dws_cpu_to_le(wrb, sizeof(*wrb));
648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
649 const struct skb_frag_struct *frag =
650 &skb_shinfo(skb)->frags[i];
651 busaddr = skb_frag_dma_map(dev, frag, 0,
652 skb_frag_size(frag), DMA_TO_DEVICE);
653 if (dma_mapping_error(dev, busaddr))
655 wrb = queue_head_node(txq);
656 wrb_fill(wrb, busaddr, skb_frag_size(frag));
657 be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 copied += skb_frag_size(frag);
663 wrb = queue_head_node(txq);
665 be_dws_cpu_to_le(wrb, sizeof(*wrb));
669 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
670 be_dws_cpu_to_le(hdr, sizeof(*hdr));
674 txq->head = map_head;
676 wrb = queue_head_node(txq);
677 unmap_tx_frag(dev, wrb, map_single);
679 copied -= wrb->frag_len;
685 static netdev_tx_t be_xmit(struct sk_buff *skb,
686 struct net_device *netdev)
688 struct be_adapter *adapter = netdev_priv(netdev);
689 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
690 struct be_queue_info *txq = &txo->q;
691 u32 wrb_cnt = 0, copied = 0;
692 u32 start = txq->head;
693 bool dummy_wrb, stopped = false;
695 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
697 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
699 /* record the sent skb in the sent_skb table */
700 BUG_ON(txo->sent_skb_list[start]);
701 txo->sent_skb_list[start] = skb;
703 /* Ensure txq has space for the next skb; Else stop the queue
704 * *BEFORE* ringing the tx doorbell, so that we serialze the
705 * tx compls of the current transmit which'll wake up the queue
707 atomic_add(wrb_cnt, &txq->used);
708 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
710 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
714 be_txq_notify(adapter, txq->id, wrb_cnt);
716 be_tx_stats_update(txo, wrb_cnt, copied,
717 skb_shinfo(skb)->gso_segs, stopped);
720 dev_kfree_skb_any(skb);
725 static int be_change_mtu(struct net_device *netdev, int new_mtu)
727 struct be_adapter *adapter = netdev_priv(netdev);
728 if (new_mtu < BE_MIN_MTU ||
729 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
730 (ETH_HLEN + ETH_FCS_LEN))) {
731 dev_info(&adapter->pdev->dev,
732 "MTU must be between %d and %d bytes\n",
734 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
737 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
738 netdev->mtu, new_mtu);
739 netdev->mtu = new_mtu;
744 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
745 * If the user configures more, place BE in vlan promiscuous mode.
747 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
749 u16 vtag[BE_NUM_VLANS_SUPPORTED];
755 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
756 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
757 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
760 /* No need to further configure vids if in promiscuous mode */
761 if (adapter->promiscuous)
764 if (adapter->vlans_added <= adapter->max_vlans) {
765 /* Construct VLAN Table to give to HW */
766 for (i = 0; i < VLAN_N_VID; i++) {
767 if (adapter->vlan_tag[i]) {
768 vtag[ntags] = cpu_to_le16(i);
772 status = be_cmd_vlan_config(adapter, adapter->if_handle,
775 status = be_cmd_vlan_config(adapter, adapter->if_handle,
782 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
784 struct be_adapter *adapter = netdev_priv(netdev);
786 adapter->vlans_added++;
787 if (!be_physfn(adapter))
790 adapter->vlan_tag[vid] = 1;
791 if (adapter->vlans_added <= (adapter->max_vlans + 1))
792 be_vid_config(adapter, false, 0);
795 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
797 struct be_adapter *adapter = netdev_priv(netdev);
799 adapter->vlans_added--;
801 if (!be_physfn(adapter))
804 adapter->vlan_tag[vid] = 0;
805 if (adapter->vlans_added <= adapter->max_vlans)
806 be_vid_config(adapter, false, 0);
809 static void be_set_rx_mode(struct net_device *netdev)
811 struct be_adapter *adapter = netdev_priv(netdev);
813 if (netdev->flags & IFF_PROMISC) {
814 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
815 adapter->promiscuous = true;
819 /* BE was previously in promiscuous mode; disable it */
820 if (adapter->promiscuous) {
821 adapter->promiscuous = false;
822 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
824 if (adapter->vlans_added)
825 be_vid_config(adapter, false, 0);
828 /* Enable multicast promisc if num configured exceeds what we support */
829 if (netdev->flags & IFF_ALLMULTI ||
830 netdev_mc_count(netdev) > BE_MAX_MC) {
831 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
835 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
840 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
842 struct be_adapter *adapter = netdev_priv(netdev);
845 if (!adapter->sriov_enabled)
848 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
851 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
852 status = be_cmd_pmac_del(adapter,
853 adapter->vf_cfg[vf].vf_if_handle,
854 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
856 status = be_cmd_pmac_add(adapter, mac,
857 adapter->vf_cfg[vf].vf_if_handle,
858 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
861 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
864 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
869 static int be_get_vf_config(struct net_device *netdev, int vf,
870 struct ifla_vf_info *vi)
872 struct be_adapter *adapter = netdev_priv(netdev);
874 if (!adapter->sriov_enabled)
881 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
882 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
884 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
889 static int be_set_vf_vlan(struct net_device *netdev,
890 int vf, u16 vlan, u8 qos)
892 struct be_adapter *adapter = netdev_priv(netdev);
895 if (!adapter->sriov_enabled)
898 if ((vf >= num_vfs) || (vlan > 4095))
902 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
903 adapter->vlans_added++;
905 adapter->vf_cfg[vf].vf_vlan_tag = 0;
906 adapter->vlans_added--;
909 status = be_vid_config(adapter, true, vf);
912 dev_info(&adapter->pdev->dev,
913 "VLAN %d config on VF %d failed\n", vlan, vf);
917 static int be_set_vf_tx_rate(struct net_device *netdev,
920 struct be_adapter *adapter = netdev_priv(netdev);
923 if (!adapter->sriov_enabled)
926 if ((vf >= num_vfs) || (rate < 0))
932 adapter->vf_cfg[vf].vf_tx_rate = rate;
933 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
936 dev_info(&adapter->pdev->dev,
937 "tx rate %d on VF %d failed\n", rate, vf);
941 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
943 struct be_eq_obj *rx_eq = &rxo->rx_eq;
944 struct be_rx_stats *stats = rx_stats(rxo);
946 ulong delta = now - stats->rx_jiffies;
948 unsigned int start, eqd;
950 if (!rx_eq->enable_aic)
954 if (time_before(now, stats->rx_jiffies)) {
955 stats->rx_jiffies = now;
959 /* Update once a second */
964 start = u64_stats_fetch_begin_bh(&stats->sync);
965 pkts = stats->rx_pkts;
966 } while (u64_stats_fetch_retry_bh(&stats->sync, start));
968 stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
969 stats->rx_pkts_prev = pkts;
970 stats->rx_jiffies = now;
971 eqd = stats->rx_pps / 110000;
973 if (eqd > rx_eq->max_eqd)
974 eqd = rx_eq->max_eqd;
975 if (eqd < rx_eq->min_eqd)
976 eqd = rx_eq->min_eqd;
979 if (eqd != rx_eq->cur_eqd) {
980 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
981 rx_eq->cur_eqd = eqd;
985 static void be_rx_stats_update(struct be_rx_obj *rxo,
986 struct be_rx_compl_info *rxcp)
988 struct be_rx_stats *stats = rx_stats(rxo);
990 u64_stats_update_begin(&stats->sync);
992 stats->rx_bytes += rxcp->pkt_size;
994 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
995 stats->rx_mcast_pkts++;
997 stats->rx_compl_err++;
998 u64_stats_update_end(&stats->sync);
1001 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1003 /* L4 checksum is not reliable for non TCP/UDP packets.
1004 * Also ignore ipcksm for ipv6 pkts */
1005 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1006 (rxcp->ip_csum || rxcp->ipv6);
1009 static struct be_rx_page_info *
1010 get_rx_page_info(struct be_adapter *adapter,
1011 struct be_rx_obj *rxo,
1014 struct be_rx_page_info *rx_page_info;
1015 struct be_queue_info *rxq = &rxo->q;
1017 rx_page_info = &rxo->page_info_tbl[frag_idx];
1018 BUG_ON(!rx_page_info->page);
1020 if (rx_page_info->last_page_user) {
1021 dma_unmap_page(&adapter->pdev->dev,
1022 dma_unmap_addr(rx_page_info, bus),
1023 adapter->big_page_size, DMA_FROM_DEVICE);
1024 rx_page_info->last_page_user = false;
1027 atomic_dec(&rxq->used);
1028 return rx_page_info;
1031 /* Throwaway the data in the Rx completion */
1032 static void be_rx_compl_discard(struct be_adapter *adapter,
1033 struct be_rx_obj *rxo,
1034 struct be_rx_compl_info *rxcp)
1036 struct be_queue_info *rxq = &rxo->q;
1037 struct be_rx_page_info *page_info;
1038 u16 i, num_rcvd = rxcp->num_rcvd;
1040 for (i = 0; i < num_rcvd; i++) {
1041 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1042 put_page(page_info->page);
1043 memset(page_info, 0, sizeof(*page_info));
1044 index_inc(&rxcp->rxq_idx, rxq->len);
1049 * skb_fill_rx_data forms a complete skb for an ether frame
1050 * indicated by rxcp.
1052 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1053 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1055 struct be_queue_info *rxq = &rxo->q;
1056 struct be_rx_page_info *page_info;
1058 u16 hdr_len, curr_frag_len, remaining;
1061 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1062 start = page_address(page_info->page) + page_info->page_offset;
1065 /* Copy data in the first descriptor of this completion */
1066 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1068 /* Copy the header portion into skb_data */
1069 hdr_len = min(BE_HDR_LEN, curr_frag_len);
1070 memcpy(skb->data, start, hdr_len);
1071 skb->len = curr_frag_len;
1072 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1073 /* Complete packet has now been moved to data */
1074 put_page(page_info->page);
1076 skb->tail += curr_frag_len;
1078 skb_shinfo(skb)->nr_frags = 1;
1079 skb_frag_set_page(skb, 0, page_info->page);
1080 skb_shinfo(skb)->frags[0].page_offset =
1081 page_info->page_offset + hdr_len;
1082 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1083 skb->data_len = curr_frag_len - hdr_len;
1084 skb->truesize += rx_frag_size;
1085 skb->tail += hdr_len;
1087 page_info->page = NULL;
1089 if (rxcp->pkt_size <= rx_frag_size) {
1090 BUG_ON(rxcp->num_rcvd != 1);
1094 /* More frags present for this completion */
1095 index_inc(&rxcp->rxq_idx, rxq->len);
1096 remaining = rxcp->pkt_size - curr_frag_len;
1097 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1098 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1099 curr_frag_len = min(remaining, rx_frag_size);
1101 /* Coalesce all frags from the same physical page in one slot */
1102 if (page_info->page_offset == 0) {
1105 skb_frag_set_page(skb, j, page_info->page);
1106 skb_shinfo(skb)->frags[j].page_offset =
1107 page_info->page_offset;
1108 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1109 skb_shinfo(skb)->nr_frags++;
1111 put_page(page_info->page);
1114 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1115 skb->len += curr_frag_len;
1116 skb->data_len += curr_frag_len;
1117 skb->truesize += rx_frag_size;
1118 remaining -= curr_frag_len;
1119 index_inc(&rxcp->rxq_idx, rxq->len);
1120 page_info->page = NULL;
1122 BUG_ON(j > MAX_SKB_FRAGS);
1125 /* Process the RX completion indicated by rxcp when GRO is disabled */
1126 static void be_rx_compl_process(struct be_adapter *adapter,
1127 struct be_rx_obj *rxo,
1128 struct be_rx_compl_info *rxcp)
1130 struct net_device *netdev = adapter->netdev;
1131 struct sk_buff *skb;
1133 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1134 if (unlikely(!skb)) {
1135 rx_stats(rxo)->rx_drops_no_skbs++;
1136 be_rx_compl_discard(adapter, rxo, rxcp);
1140 skb_fill_rx_data(adapter, rxo, skb, rxcp);
1142 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1143 skb->ip_summed = CHECKSUM_UNNECESSARY;
1145 skb_checksum_none_assert(skb);
1147 skb->protocol = eth_type_trans(skb, netdev);
1148 if (adapter->netdev->features & NETIF_F_RXHASH)
1149 skb->rxhash = rxcp->rss_hash;
1153 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1155 netif_receive_skb(skb);
1158 /* Process the RX completion indicated by rxcp when GRO is enabled */
1159 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1160 struct be_rx_obj *rxo,
1161 struct be_rx_compl_info *rxcp)
1163 struct be_rx_page_info *page_info;
1164 struct sk_buff *skb = NULL;
1165 struct be_queue_info *rxq = &rxo->q;
1166 struct be_eq_obj *eq_obj = &rxo->rx_eq;
1167 u16 remaining, curr_frag_len;
1170 skb = napi_get_frags(&eq_obj->napi);
1172 be_rx_compl_discard(adapter, rxo, rxcp);
1176 remaining = rxcp->pkt_size;
1177 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1178 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1180 curr_frag_len = min(remaining, rx_frag_size);
1182 /* Coalesce all frags from the same physical page in one slot */
1183 if (i == 0 || page_info->page_offset == 0) {
1184 /* First frag or Fresh page */
1186 skb_frag_set_page(skb, j, page_info->page);
1187 skb_shinfo(skb)->frags[j].page_offset =
1188 page_info->page_offset;
1189 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1191 put_page(page_info->page);
1193 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1194 skb->truesize += rx_frag_size;
1195 remaining -= curr_frag_len;
1196 index_inc(&rxcp->rxq_idx, rxq->len);
1197 memset(page_info, 0, sizeof(*page_info));
1199 BUG_ON(j > MAX_SKB_FRAGS);
1201 skb_shinfo(skb)->nr_frags = j + 1;
1202 skb->len = rxcp->pkt_size;
1203 skb->data_len = rxcp->pkt_size;
1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
1205 if (adapter->netdev->features & NETIF_F_RXHASH)
1206 skb->rxhash = rxcp->rss_hash;
1209 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1211 napi_gro_frags(&eq_obj->napi);
1214 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1215 struct be_eth_rx_compl *compl,
1216 struct be_rx_compl_info *rxcp)
1219 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1220 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1221 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1222 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1223 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1225 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1227 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1229 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1231 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1233 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1235 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1237 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1239 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1241 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1244 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1247 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1248 struct be_eth_rx_compl *compl,
1249 struct be_rx_compl_info *rxcp)
1252 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1253 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1254 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1255 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1256 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1258 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1260 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1262 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1264 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1266 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1268 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1270 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1272 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1274 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1277 rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1280 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1282 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1283 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1284 struct be_adapter *adapter = rxo->adapter;
1286 /* For checking the valid bit it is Ok to use either definition as the
1287 * valid bit is at the same position in both v0 and v1 Rx compl */
1288 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1292 be_dws_le_to_cpu(compl, sizeof(*compl));
1294 if (adapter->be3_native)
1295 be_parse_rx_compl_v1(adapter, compl, rxcp);
1297 be_parse_rx_compl_v0(adapter, compl, rxcp);
1300 /* vlanf could be wrongly set in some cards.
1301 * ignore if vtm is not set */
1302 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1305 if (!lancer_chip(adapter))
1306 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1308 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1309 !adapter->vlan_tag[rxcp->vlan_tag])
1313 /* As the compl has been parsed, reset it; we wont touch it again */
1314 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1316 queue_tail_inc(&rxo->cq);
1320 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1322 u32 order = get_order(size);
1326 return alloc_pages(gfp, order);
1330 * Allocate a page, split it to fragments of size rx_frag_size and post as
1331 * receive buffers to BE
1333 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1335 struct be_adapter *adapter = rxo->adapter;
1336 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1337 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1338 struct be_queue_info *rxq = &rxo->q;
1339 struct page *pagep = NULL;
1340 struct be_eth_rx_d *rxd;
1341 u64 page_dmaaddr = 0, frag_dmaaddr;
1342 u32 posted, page_offset = 0;
1344 page_info = &rxo->page_info_tbl[rxq->head];
1345 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1347 pagep = be_alloc_pages(adapter->big_page_size, gfp);
1348 if (unlikely(!pagep)) {
1349 rx_stats(rxo)->rx_post_fail++;
1352 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1353 0, adapter->big_page_size,
1355 page_info->page_offset = 0;
1358 page_info->page_offset = page_offset + rx_frag_size;
1360 page_offset = page_info->page_offset;
1361 page_info->page = pagep;
1362 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1363 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1365 rxd = queue_head_node(rxq);
1366 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1367 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1369 /* Any space left in the current big page for another frag? */
1370 if ((page_offset + rx_frag_size + rx_frag_size) >
1371 adapter->big_page_size) {
1373 page_info->last_page_user = true;
1376 prev_page_info = page_info;
1377 queue_head_inc(rxq);
1378 page_info = &page_info_tbl[rxq->head];
1381 prev_page_info->last_page_user = true;
1384 atomic_add(posted, &rxq->used);
1385 be_rxq_notify(adapter, rxq->id, posted);
1386 } else if (atomic_read(&rxq->used) == 0) {
1387 /* Let be_worker replenish when memory is available */
1388 rxo->rx_post_starved = true;
1392 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1394 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1396 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1400 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1402 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1404 queue_tail_inc(tx_cq);
1408 static u16 be_tx_compl_process(struct be_adapter *adapter,
1409 struct be_tx_obj *txo, u16 last_index)
1411 struct be_queue_info *txq = &txo->q;
1412 struct be_eth_wrb *wrb;
1413 struct sk_buff **sent_skbs = txo->sent_skb_list;
1414 struct sk_buff *sent_skb;
1415 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1416 bool unmap_skb_hdr = true;
1418 sent_skb = sent_skbs[txq->tail];
1420 sent_skbs[txq->tail] = NULL;
1422 /* skip header wrb */
1423 queue_tail_inc(txq);
1426 cur_index = txq->tail;
1427 wrb = queue_tail_node(txq);
1428 unmap_tx_frag(&adapter->pdev->dev, wrb,
1429 (unmap_skb_hdr && skb_headlen(sent_skb)));
1430 unmap_skb_hdr = false;
1433 queue_tail_inc(txq);
1434 } while (cur_index != last_index);
1436 kfree_skb(sent_skb);
1440 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1442 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1448 eqe->evt = le32_to_cpu(eqe->evt);
1449 queue_tail_inc(&eq_obj->q);
1453 static int event_handle(struct be_adapter *adapter,
1454 struct be_eq_obj *eq_obj,
1457 struct be_eq_entry *eqe;
1460 while ((eqe = event_get(eq_obj)) != NULL) {
1465 /* Deal with any spurious interrupts that come
1471 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1473 napi_schedule(&eq_obj->napi);
1478 /* Just read and notify events without processing them.
1479 * Used at the time of destroying event queues */
1480 static void be_eq_clean(struct be_adapter *adapter,
1481 struct be_eq_obj *eq_obj)
1483 struct be_eq_entry *eqe;
1486 while ((eqe = event_get(eq_obj)) != NULL) {
1492 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1495 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1497 struct be_rx_page_info *page_info;
1498 struct be_queue_info *rxq = &rxo->q;
1499 struct be_queue_info *rx_cq = &rxo->cq;
1500 struct be_rx_compl_info *rxcp;
1503 /* First cleanup pending rx completions */
1504 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1505 be_rx_compl_discard(adapter, rxo, rxcp);
1506 be_cq_notify(adapter, rx_cq->id, false, 1);
1509 /* Then free posted rx buffer that were not used */
1510 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1511 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1512 page_info = get_rx_page_info(adapter, rxo, tail);
1513 put_page(page_info->page);
1514 memset(page_info, 0, sizeof(*page_info));
1516 BUG_ON(atomic_read(&rxq->used));
1517 rxq->tail = rxq->head = 0;
1520 static void be_tx_compl_clean(struct be_adapter *adapter,
1521 struct be_tx_obj *txo)
1523 struct be_queue_info *tx_cq = &txo->cq;
1524 struct be_queue_info *txq = &txo->q;
1525 struct be_eth_tx_compl *txcp;
1526 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1527 struct sk_buff **sent_skbs = txo->sent_skb_list;
1528 struct sk_buff *sent_skb;
1531 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1533 while ((txcp = be_tx_compl_get(tx_cq))) {
1534 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1536 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1540 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1541 atomic_sub(num_wrbs, &txq->used);
1546 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1552 if (atomic_read(&txq->used))
1553 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1554 atomic_read(&txq->used));
1556 /* free posted tx for which compls will never arrive */
1557 while (atomic_read(&txq->used)) {
1558 sent_skb = sent_skbs[txq->tail];
1559 end_idx = txq->tail;
1561 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1563 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1564 atomic_sub(num_wrbs, &txq->used);
1568 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1570 struct be_queue_info *q;
1572 q = &adapter->mcc_obj.q;
1574 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1575 be_queue_free(adapter, q);
1577 q = &adapter->mcc_obj.cq;
1579 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1580 be_queue_free(adapter, q);
1583 /* Must be called only after TX qs are created as MCC shares TX EQ */
1584 static int be_mcc_queues_create(struct be_adapter *adapter)
1586 struct be_queue_info *q, *cq;
1588 /* Alloc MCC compl queue */
1589 cq = &adapter->mcc_obj.cq;
1590 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1591 sizeof(struct be_mcc_compl)))
1594 /* Ask BE to create MCC compl queue; share TX's eq */
1595 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1598 /* Alloc MCC queue */
1599 q = &adapter->mcc_obj.q;
1600 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1601 goto mcc_cq_destroy;
1603 /* Ask BE to create MCC queue */
1604 if (be_cmd_mccq_create(adapter, q, cq))
1610 be_queue_free(adapter, q);
1612 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1614 be_queue_free(adapter, cq);
1619 static void be_tx_queues_destroy(struct be_adapter *adapter)
1621 struct be_queue_info *q;
1622 struct be_tx_obj *txo;
1625 for_all_tx_queues(adapter, txo, i) {
1628 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1629 be_queue_free(adapter, q);
1633 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1634 be_queue_free(adapter, q);
1637 /* Clear any residual events */
1638 be_eq_clean(adapter, &adapter->tx_eq);
1640 q = &adapter->tx_eq.q;
1642 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1643 be_queue_free(adapter, q);
1646 static int be_num_txqs_want(struct be_adapter *adapter)
1648 if ((num_vfs && adapter->sriov_enabled) ||
1649 be_is_mc(adapter) ||
1650 lancer_chip(adapter) || !be_physfn(adapter) ||
1651 adapter->generation == BE_GEN2)
1657 /* One TX event queue is shared by all TX compl qs */
1658 static int be_tx_queues_create(struct be_adapter *adapter)
1660 struct be_queue_info *eq, *q, *cq;
1661 struct be_tx_obj *txo;
1664 adapter->num_tx_qs = be_num_txqs_want(adapter);
1665 if (adapter->num_tx_qs != MAX_TX_QS)
1666 netif_set_real_num_tx_queues(adapter->netdev,
1667 adapter->num_tx_qs);
1669 adapter->tx_eq.max_eqd = 0;
1670 adapter->tx_eq.min_eqd = 0;
1671 adapter->tx_eq.cur_eqd = 96;
1672 adapter->tx_eq.enable_aic = false;
1674 eq = &adapter->tx_eq.q;
1675 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1676 sizeof(struct be_eq_entry)))
1679 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1681 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1683 for_all_tx_queues(adapter, txo, i) {
1685 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1686 sizeof(struct be_eth_tx_compl)))
1689 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1693 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1694 sizeof(struct be_eth_wrb)))
1697 if (be_cmd_txq_create(adapter, q, cq))
1703 be_tx_queues_destroy(adapter);
1707 static void be_rx_queues_destroy(struct be_adapter *adapter)
1709 struct be_queue_info *q;
1710 struct be_rx_obj *rxo;
1713 for_all_rx_queues(adapter, rxo, i) {
1714 be_queue_free(adapter, &rxo->q);
1718 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1719 be_queue_free(adapter, q);
1723 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1724 be_queue_free(adapter, q);
1728 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1730 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1731 !adapter->sriov_enabled && be_physfn(adapter) &&
1732 !be_is_mc(adapter)) {
1733 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1735 dev_warn(&adapter->pdev->dev,
1736 "No support for multiple RX queues\n");
1741 static int be_rx_queues_create(struct be_adapter *adapter)
1743 struct be_queue_info *eq, *q, *cq;
1744 struct be_rx_obj *rxo;
1747 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1748 msix_enabled(adapter) ?
1749 adapter->num_msix_vec - 1 : 1);
1750 if (adapter->num_rx_qs != MAX_RX_QS)
1751 dev_warn(&adapter->pdev->dev,
1752 "Can create only %d RX queues", adapter->num_rx_qs);
1754 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1755 for_all_rx_queues(adapter, rxo, i) {
1756 rxo->adapter = adapter;
1757 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1758 rxo->rx_eq.enable_aic = true;
1762 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1763 sizeof(struct be_eq_entry));
1767 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1771 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1775 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1776 sizeof(struct be_eth_rx_compl));
1780 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1784 /* Rx Q - will be created in be_open() */
1786 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1787 sizeof(struct be_eth_rx_d));
1795 be_rx_queues_destroy(adapter);
1799 static bool event_peek(struct be_eq_obj *eq_obj)
1801 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1808 static irqreturn_t be_intx(int irq, void *dev)
1810 struct be_adapter *adapter = dev;
1811 struct be_rx_obj *rxo;
1812 int isr, i, tx = 0 , rx = 0;
1814 if (lancer_chip(adapter)) {
1815 if (event_peek(&adapter->tx_eq))
1816 tx = event_handle(adapter, &adapter->tx_eq, false);
1817 for_all_rx_queues(adapter, rxo, i) {
1818 if (event_peek(&rxo->rx_eq))
1819 rx |= event_handle(adapter, &rxo->rx_eq, true);
1826 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1827 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1831 if ((1 << adapter->tx_eq.eq_idx & isr))
1832 event_handle(adapter, &adapter->tx_eq, false);
1834 for_all_rx_queues(adapter, rxo, i) {
1835 if ((1 << rxo->rx_eq.eq_idx & isr))
1836 event_handle(adapter, &rxo->rx_eq, true);
1843 static irqreturn_t be_msix_rx(int irq, void *dev)
1845 struct be_rx_obj *rxo = dev;
1846 struct be_adapter *adapter = rxo->adapter;
1848 event_handle(adapter, &rxo->rx_eq, true);
1853 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1855 struct be_adapter *adapter = dev;
1857 event_handle(adapter, &adapter->tx_eq, false);
1862 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1864 return (rxcp->tcpf && !rxcp->err) ? true : false;
1867 static int be_poll_rx(struct napi_struct *napi, int budget)
1869 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1870 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1871 struct be_adapter *adapter = rxo->adapter;
1872 struct be_queue_info *rx_cq = &rxo->cq;
1873 struct be_rx_compl_info *rxcp;
1876 rx_stats(rxo)->rx_polls++;
1877 for (work_done = 0; work_done < budget; work_done++) {
1878 rxcp = be_rx_compl_get(rxo);
1882 /* Is it a flush compl that has no data */
1883 if (unlikely(rxcp->num_rcvd == 0))
1886 /* Discard compl with partial DMA Lancer B0 */
1887 if (unlikely(!rxcp->pkt_size)) {
1888 be_rx_compl_discard(adapter, rxo, rxcp);
1892 /* On BE drop pkts that arrive due to imperfect filtering in
1893 * promiscuous mode on some skews
1895 if (unlikely(rxcp->port != adapter->port_num &&
1896 !lancer_chip(adapter))) {
1897 be_rx_compl_discard(adapter, rxo, rxcp);
1902 be_rx_compl_process_gro(adapter, rxo, rxcp);
1904 be_rx_compl_process(adapter, rxo, rxcp);
1906 be_rx_stats_update(rxo, rxcp);
1909 be_cq_notify(adapter, rx_cq->id, false, work_done);
1911 /* Refill the queue */
1912 if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1913 be_post_rx_frags(rxo, GFP_ATOMIC);
1916 if (work_done < budget) {
1917 napi_complete(napi);
1919 be_cq_notify(adapter, rx_cq->id, true, 0);
1924 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1925 * For TX/MCC we don't honour budget; consume everything
1927 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1929 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1930 struct be_adapter *adapter =
1931 container_of(tx_eq, struct be_adapter, tx_eq);
1932 struct be_tx_obj *txo;
1933 struct be_eth_tx_compl *txcp;
1934 int tx_compl, mcc_compl, status = 0;
1938 for_all_tx_queues(adapter, txo, i) {
1941 while ((txcp = be_tx_compl_get(&txo->cq))) {
1942 num_wrbs += be_tx_compl_process(adapter, txo,
1943 AMAP_GET_BITS(struct amap_eth_tx_compl,
1948 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1950 atomic_sub(num_wrbs, &txo->q.used);
1952 /* As Tx wrbs have been freed up, wake up netdev queue
1953 * if it was stopped due to lack of tx wrbs. */
1954 if (__netif_subqueue_stopped(adapter->netdev, i) &&
1955 atomic_read(&txo->q.used) < txo->q.len / 2) {
1956 netif_wake_subqueue(adapter->netdev, i);
1959 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1960 tx_stats(txo)->tx_compl += tx_compl;
1961 u64_stats_update_end(&tx_stats(txo)->sync_compl);
1965 mcc_compl = be_process_mcc(adapter, &status);
1968 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1969 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1972 napi_complete(napi);
1974 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
1975 adapter->drv_stats.tx_events++;
1979 void be_detect_dump_ue(struct be_adapter *adapter)
1981 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
1982 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
1985 if (lancer_chip(adapter)) {
1986 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
1987 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
1988 sliport_err1 = ioread32(adapter->db +
1989 SLIPORT_ERROR1_OFFSET);
1990 sliport_err2 = ioread32(adapter->db +
1991 SLIPORT_ERROR2_OFFSET);
1994 pci_read_config_dword(adapter->pdev,
1995 PCICFG_UE_STATUS_LOW, &ue_lo);
1996 pci_read_config_dword(adapter->pdev,
1997 PCICFG_UE_STATUS_HIGH, &ue_hi);
1998 pci_read_config_dword(adapter->pdev,
1999 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2000 pci_read_config_dword(adapter->pdev,
2001 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2003 ue_lo = (ue_lo & (~ue_lo_mask));
2004 ue_hi = (ue_hi & (~ue_hi_mask));
2007 if (ue_lo || ue_hi ||
2008 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2009 adapter->ue_detected = true;
2010 adapter->eeh_err = true;
2011 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2015 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2017 dev_err(&adapter->pdev->dev,
2018 "UE: %s bit set\n", ue_status_low_desc[i]);
2022 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2024 dev_err(&adapter->pdev->dev,
2025 "UE: %s bit set\n", ue_status_hi_desc[i]);
2029 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2030 dev_err(&adapter->pdev->dev,
2031 "sliport status 0x%x\n", sliport_status);
2032 dev_err(&adapter->pdev->dev,
2033 "sliport error1 0x%x\n", sliport_err1);
2034 dev_err(&adapter->pdev->dev,
2035 "sliport error2 0x%x\n", sliport_err2);
2039 static void be_worker(struct work_struct *work)
2041 struct be_adapter *adapter =
2042 container_of(work, struct be_adapter, work.work);
2043 struct be_rx_obj *rxo;
2046 if (!adapter->ue_detected)
2047 be_detect_dump_ue(adapter);
2049 /* when interrupts are not yet enabled, just reap any pending
2050 * mcc completions */
2051 if (!netif_running(adapter->netdev)) {
2052 int mcc_compl, status = 0;
2054 mcc_compl = be_process_mcc(adapter, &status);
2057 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2058 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2064 if (!adapter->stats_cmd_sent) {
2065 if (lancer_chip(adapter))
2066 lancer_cmd_get_pport_stats(adapter,
2067 &adapter->stats_cmd);
2069 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2072 for_all_rx_queues(adapter, rxo, i) {
2073 be_rx_eqd_update(adapter, rxo);
2075 if (rxo->rx_post_starved) {
2076 rxo->rx_post_starved = false;
2077 be_post_rx_frags(rxo, GFP_KERNEL);
2082 adapter->work_counter++;
2083 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2086 static void be_msix_disable(struct be_adapter *adapter)
2088 if (msix_enabled(adapter)) {
2089 pci_disable_msix(adapter->pdev);
2090 adapter->num_msix_vec = 0;
2094 static void be_msix_enable(struct be_adapter *adapter)
2096 #define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
2097 int i, status, num_vec;
2099 num_vec = be_num_rxqs_want(adapter) + 1;
2101 for (i = 0; i < num_vec; i++)
2102 adapter->msix_entries[i].entry = i;
2104 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2107 } else if (status >= BE_MIN_MSIX_VECTORS) {
2109 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2115 adapter->num_msix_vec = num_vec;
2119 static int be_sriov_enable(struct be_adapter *adapter)
2121 be_check_sriov_fn_type(adapter);
2122 #ifdef CONFIG_PCI_IOV
2123 if (be_physfn(adapter) && num_vfs) {
2127 pos = pci_find_ext_capability(adapter->pdev,
2128 PCI_EXT_CAP_ID_SRIOV);
2129 pci_read_config_word(adapter->pdev,
2130 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2132 if (num_vfs > nvfs) {
2133 dev_info(&adapter->pdev->dev,
2134 "Device supports %d VFs and not %d\n",
2139 status = pci_enable_sriov(adapter->pdev, num_vfs);
2140 adapter->sriov_enabled = status ? false : true;
2142 if (adapter->sriov_enabled) {
2143 adapter->vf_cfg = kcalloc(num_vfs,
2144 sizeof(struct be_vf_cfg),
2146 if (!adapter->vf_cfg)
2154 static void be_sriov_disable(struct be_adapter *adapter)
2156 #ifdef CONFIG_PCI_IOV
2157 if (adapter->sriov_enabled) {
2158 pci_disable_sriov(adapter->pdev);
2159 kfree(adapter->vf_cfg);
2160 adapter->sriov_enabled = false;
2165 static inline int be_msix_vec_get(struct be_adapter *adapter,
2166 struct be_eq_obj *eq_obj)
2168 return adapter->msix_entries[eq_obj->eq_idx].vector;
2171 static int be_request_irq(struct be_adapter *adapter,
2172 struct be_eq_obj *eq_obj,
2173 void *handler, char *desc, void *context)
2175 struct net_device *netdev = adapter->netdev;
2178 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2179 vec = be_msix_vec_get(adapter, eq_obj);
2180 return request_irq(vec, handler, 0, eq_obj->desc, context);
2183 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2186 int vec = be_msix_vec_get(adapter, eq_obj);
2187 free_irq(vec, context);
2190 static int be_msix_register(struct be_adapter *adapter)
2192 struct be_rx_obj *rxo;
2196 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2201 for_all_rx_queues(adapter, rxo, i) {
2202 sprintf(qname, "rxq%d", i);
2203 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2212 be_free_irq(adapter, &adapter->tx_eq, adapter);
2214 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2215 be_free_irq(adapter, &rxo->rx_eq, rxo);
2218 dev_warn(&adapter->pdev->dev,
2219 "MSIX Request IRQ failed - err %d\n", status);
2220 be_msix_disable(adapter);
2224 static int be_irq_register(struct be_adapter *adapter)
2226 struct net_device *netdev = adapter->netdev;
2229 if (msix_enabled(adapter)) {
2230 status = be_msix_register(adapter);
2233 /* INTx is not supported for VF */
2234 if (!be_physfn(adapter))
2239 netdev->irq = adapter->pdev->irq;
2240 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2243 dev_err(&adapter->pdev->dev,
2244 "INTx request IRQ failed - err %d\n", status);
2248 adapter->isr_registered = true;
2252 static void be_irq_unregister(struct be_adapter *adapter)
2254 struct net_device *netdev = adapter->netdev;
2255 struct be_rx_obj *rxo;
2258 if (!adapter->isr_registered)
2262 if (!msix_enabled(adapter)) {
2263 free_irq(netdev->irq, adapter);
2268 be_free_irq(adapter, &adapter->tx_eq, adapter);
2270 for_all_rx_queues(adapter, rxo, i)
2271 be_free_irq(adapter, &rxo->rx_eq, rxo);
2274 adapter->isr_registered = false;
2277 static void be_rx_queues_clear(struct be_adapter *adapter)
2279 struct be_queue_info *q;
2280 struct be_rx_obj *rxo;
2283 for_all_rx_queues(adapter, rxo, i) {
2286 be_cmd_rxq_destroy(adapter, q);
2287 /* After the rxq is invalidated, wait for a grace time
2288 * of 1ms for all dma to end and the flush compl to
2292 be_rx_q_clean(adapter, rxo);
2295 /* Clear any residual events */
2298 be_eq_clean(adapter, &rxo->rx_eq);
2302 static int be_close(struct net_device *netdev)
2304 struct be_adapter *adapter = netdev_priv(netdev);
2305 struct be_rx_obj *rxo;
2306 struct be_tx_obj *txo;
2307 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2310 be_async_mcc_disable(adapter);
2312 if (!lancer_chip(adapter))
2313 be_intr_set(adapter, false);
2315 for_all_rx_queues(adapter, rxo, i)
2316 napi_disable(&rxo->rx_eq.napi);
2318 napi_disable(&tx_eq->napi);
2320 if (lancer_chip(adapter)) {
2321 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2322 for_all_rx_queues(adapter, rxo, i)
2323 be_cq_notify(adapter, rxo->cq.id, false, 0);
2324 for_all_tx_queues(adapter, txo, i)
2325 be_cq_notify(adapter, txo->cq.id, false, 0);
2328 if (msix_enabled(adapter)) {
2329 vec = be_msix_vec_get(adapter, tx_eq);
2330 synchronize_irq(vec);
2332 for_all_rx_queues(adapter, rxo, i) {
2333 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2334 synchronize_irq(vec);
2337 synchronize_irq(netdev->irq);
2339 be_irq_unregister(adapter);
2341 /* Wait for all pending tx completions to arrive so that
2342 * all tx skbs are freed.
2344 for_all_tx_queues(adapter, txo, i)
2345 be_tx_compl_clean(adapter, txo);
2347 be_rx_queues_clear(adapter);
2351 static int be_rx_queues_setup(struct be_adapter *adapter)
2353 struct be_rx_obj *rxo;
2355 u8 rsstable[MAX_RSS_QS];
2357 for_all_rx_queues(adapter, rxo, i) {
2358 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2359 rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2361 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2366 if (be_multi_rxq(adapter)) {
2367 for_all_rss_queues(adapter, rxo, i)
2368 rsstable[i] = rxo->rss_id;
2370 rc = be_cmd_rss_config(adapter, rsstable,
2371 adapter->num_rx_qs - 1);
2376 /* First time posting */
2377 for_all_rx_queues(adapter, rxo, i) {
2378 be_post_rx_frags(rxo, GFP_KERNEL);
2379 napi_enable(&rxo->rx_eq.napi);
2384 static int be_open(struct net_device *netdev)
2386 struct be_adapter *adapter = netdev_priv(netdev);
2387 struct be_eq_obj *tx_eq = &adapter->tx_eq;
2388 struct be_rx_obj *rxo;
2391 status = be_rx_queues_setup(adapter);
2395 napi_enable(&tx_eq->napi);
2397 be_irq_register(adapter);
2399 if (!lancer_chip(adapter))
2400 be_intr_set(adapter, true);
2402 /* The evt queues are created in unarmed state; arm them */
2403 for_all_rx_queues(adapter, rxo, i) {
2404 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2405 be_cq_notify(adapter, rxo->cq.id, true, 0);
2407 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2409 /* Now that interrupts are on we can process async mcc */
2410 be_async_mcc_enable(adapter);
2414 be_close(adapter->netdev);
2418 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2420 struct be_dma_mem cmd;
2424 memset(mac, 0, ETH_ALEN);
2426 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2427 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2431 memset(cmd.va, 0, cmd.size);
2434 status = pci_write_config_dword(adapter->pdev,
2435 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2437 dev_err(&adapter->pdev->dev,
2438 "Could not enable Wake-on-lan\n");
2439 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2443 status = be_cmd_enable_magic_wol(adapter,
2444 adapter->netdev->dev_addr, &cmd);
2445 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2446 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2448 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2449 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2450 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2453 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2458 * Generate a seed MAC address from the PF MAC Address using jhash.
2459 * MAC Address for VFs are assigned incrementally starting from the seed.
2460 * These addresses are programmed in the ASIC by the PF and the VF driver
2461 * queries for the MAC address during its probe.
2463 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2469 be_vf_eth_addr_generate(adapter, mac);
2471 for (vf = 0; vf < num_vfs; vf++) {
2472 status = be_cmd_pmac_add(adapter, mac,
2473 adapter->vf_cfg[vf].vf_if_handle,
2474 &adapter->vf_cfg[vf].vf_pmac_id,
2477 dev_err(&adapter->pdev->dev,
2478 "Mac address add failed for VF %d\n", vf);
2480 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2487 static void be_vf_clear(struct be_adapter *adapter)
2491 for (vf = 0; vf < num_vfs; vf++) {
2492 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2493 be_cmd_pmac_del(adapter,
2494 adapter->vf_cfg[vf].vf_if_handle,
2495 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2498 for (vf = 0; vf < num_vfs; vf++)
2499 if (adapter->vf_cfg[vf].vf_if_handle)
2500 be_cmd_if_destroy(adapter,
2501 adapter->vf_cfg[vf].vf_if_handle, vf + 1);
2504 static int be_clear(struct be_adapter *adapter)
2506 if (be_physfn(adapter) && adapter->sriov_enabled)
2507 be_vf_clear(adapter);
2509 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
2511 be_mcc_queues_destroy(adapter);
2512 be_rx_queues_destroy(adapter);
2513 be_tx_queues_destroy(adapter);
2514 adapter->eq_next_idx = 0;
2516 adapter->be3_native = false;
2517 adapter->promiscuous = false;
2519 /* tell fw we're done with firing cmds */
2520 be_cmd_fw_clean(adapter);
2524 static int be_vf_setup(struct be_adapter *adapter)
2526 u32 cap_flags, en_flags, vf;
2530 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2531 for (vf = 0; vf < num_vfs; vf++) {
2532 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2533 &adapter->vf_cfg[vf].vf_if_handle,
2537 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
2540 if (!lancer_chip(adapter)) {
2541 status = be_vf_eth_addr_config(adapter);
2546 for (vf = 0; vf < num_vfs; vf++) {
2547 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2551 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2558 static int be_setup(struct be_adapter *adapter)
2560 struct net_device *netdev = adapter->netdev;
2561 u32 cap_flags, en_flags;
2566 /* Allow all priorities by default. A GRP5 evt may modify this */
2567 adapter->vlan_prio_bmap = 0xff;
2568 adapter->link_speed = -1;
2570 be_cmd_req_native_mode(adapter);
2572 status = be_tx_queues_create(adapter);
2576 status = be_rx_queues_create(adapter);
2580 status = be_mcc_queues_create(adapter);
2584 memset(mac, 0, ETH_ALEN);
2585 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2586 true /*permanent */, 0);
2589 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2590 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2592 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2593 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2594 cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2595 BE_IF_FLAGS_PROMISCUOUS;
2596 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2597 cap_flags |= BE_IF_FLAGS_RSS;
2598 en_flags |= BE_IF_FLAGS_RSS;
2600 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2601 netdev->dev_addr, &adapter->if_handle,
2602 &adapter->pmac_id, 0);
2606 /* For BEx, the VF's permanent mac queried from card is incorrect.
2607 * Query the mac configued by the PF using if_handle
2609 if (!be_physfn(adapter) && !lancer_chip(adapter)) {
2610 status = be_cmd_mac_addr_query(adapter, mac,
2611 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2613 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2614 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2618 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2620 status = be_vid_config(adapter, false, 0);
2624 be_set_rx_mode(adapter->netdev);
2626 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2629 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2630 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2636 pcie_set_readrq(adapter->pdev, 4096);
2638 if (be_physfn(adapter) && adapter->sriov_enabled) {
2639 status = be_vf_setup(adapter);
2650 #define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2651 static bool be_flash_redboot(struct be_adapter *adapter,
2652 const u8 *p, u32 img_start, int image_size,
2659 crc_offset = hdr_size + img_start + image_size - 4;
2663 status = be_cmd_get_flash_crc(adapter, flashed_crc,
2666 dev_err(&adapter->pdev->dev,
2667 "could not get crc from flash, not flashing redboot\n");
2671 /*update redboot only if crc does not match*/
2672 if (!memcmp(flashed_crc, p, 4))
2678 static bool phy_flashing_required(struct be_adapter *adapter)
2681 struct be_phy_info phy_info;
2683 status = be_cmd_get_phy_info(adapter, &phy_info);
2686 if ((phy_info.phy_type == TN_8022) &&
2687 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2693 static int be_flash_data(struct be_adapter *adapter,
2694 const struct firmware *fw,
2695 struct be_dma_mem *flash_cmd, int num_of_images)
2698 int status = 0, i, filehdr_size = 0;
2699 u32 total_bytes = 0, flash_op;
2701 const u8 *p = fw->data;
2702 struct be_cmd_write_flashrom *req = flash_cmd->va;
2703 const struct flash_comp *pflashcomp;
2706 static const struct flash_comp gen3_flash_types[10] = {
2707 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2708 FLASH_IMAGE_MAX_SIZE_g3},
2709 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2710 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2711 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2712 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2713 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2714 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2715 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2716 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2717 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2718 FLASH_IMAGE_MAX_SIZE_g3},
2719 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2720 FLASH_IMAGE_MAX_SIZE_g3},
2721 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2722 FLASH_IMAGE_MAX_SIZE_g3},
2723 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2724 FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2725 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2726 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2728 static const struct flash_comp gen2_flash_types[8] = {
2729 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2730 FLASH_IMAGE_MAX_SIZE_g2},
2731 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2732 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2733 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2734 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2735 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2736 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2737 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2738 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2739 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2740 FLASH_IMAGE_MAX_SIZE_g2},
2741 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2742 FLASH_IMAGE_MAX_SIZE_g2},
2743 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2744 FLASH_IMAGE_MAX_SIZE_g2}
2747 if (adapter->generation == BE_GEN3) {
2748 pflashcomp = gen3_flash_types;
2749 filehdr_size = sizeof(struct flash_file_hdr_g3);
2750 num_comp = ARRAY_SIZE(gen3_flash_types);
2752 pflashcomp = gen2_flash_types;
2753 filehdr_size = sizeof(struct flash_file_hdr_g2);
2754 num_comp = ARRAY_SIZE(gen2_flash_types);
2756 for (i = 0; i < num_comp; i++) {
2757 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2758 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2760 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2761 if (!phy_flashing_required(adapter))
2764 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2765 (!be_flash_redboot(adapter, fw->data,
2766 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2767 (num_of_images * sizeof(struct image_hdr)))))
2770 p += filehdr_size + pflashcomp[i].offset
2771 + (num_of_images * sizeof(struct image_hdr));
2772 if (p + pflashcomp[i].size > fw->data + fw->size)
2774 total_bytes = pflashcomp[i].size;
2775 while (total_bytes) {
2776 if (total_bytes > 32*1024)
2777 num_bytes = 32*1024;
2779 num_bytes = total_bytes;
2780 total_bytes -= num_bytes;
2782 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2783 flash_op = FLASHROM_OPER_PHY_FLASH;
2785 flash_op = FLASHROM_OPER_FLASH;
2787 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2788 flash_op = FLASHROM_OPER_PHY_SAVE;
2790 flash_op = FLASHROM_OPER_SAVE;
2792 memcpy(req->params.data_buf, p, num_bytes);
2794 status = be_cmd_write_flashrom(adapter, flash_cmd,
2795 pflashcomp[i].optype, flash_op, num_bytes);
2797 if ((status == ILLEGAL_IOCTL_REQ) &&
2798 (pflashcomp[i].optype ==
2801 dev_err(&adapter->pdev->dev,
2802 "cmd to write to flash rom failed.\n");
2810 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2814 if (fhdr->build[0] == '3')
2816 else if (fhdr->build[0] == '2')
2822 static int lancer_fw_download(struct be_adapter *adapter,
2823 const struct firmware *fw)
2825 #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2826 #define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2827 struct be_dma_mem flash_cmd;
2828 const u8 *data_ptr = NULL;
2829 u8 *dest_image_ptr = NULL;
2830 size_t image_size = 0;
2832 u32 data_written = 0;
2837 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2838 dev_err(&adapter->pdev->dev,
2839 "FW Image not properly aligned. "
2840 "Length must be 4 byte aligned.\n");
2842 goto lancer_fw_exit;
2845 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2846 + LANCER_FW_DOWNLOAD_CHUNK;
2847 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2848 &flash_cmd.dma, GFP_KERNEL);
2849 if (!flash_cmd.va) {
2851 dev_err(&adapter->pdev->dev,
2852 "Memory allocation failure while flashing\n");
2853 goto lancer_fw_exit;
2856 dest_image_ptr = flash_cmd.va +
2857 sizeof(struct lancer_cmd_req_write_object);
2858 image_size = fw->size;
2859 data_ptr = fw->data;
2861 while (image_size) {
2862 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2864 /* Copy the image chunk content. */
2865 memcpy(dest_image_ptr, data_ptr, chunk_size);
2867 status = lancer_cmd_write_object(adapter, &flash_cmd,
2868 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2869 &data_written, &add_status);
2874 offset += data_written;
2875 data_ptr += data_written;
2876 image_size -= data_written;
2880 /* Commit the FW written */
2881 status = lancer_cmd_write_object(adapter, &flash_cmd,
2882 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2883 &data_written, &add_status);
2886 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2889 dev_err(&adapter->pdev->dev,
2890 "Firmware load error. "
2891 "Status code: 0x%x Additional Status: 0x%x\n",
2892 status, add_status);
2893 goto lancer_fw_exit;
2896 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2901 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2903 struct flash_file_hdr_g2 *fhdr;
2904 struct flash_file_hdr_g3 *fhdr3;
2905 struct image_hdr *img_hdr_ptr = NULL;
2906 struct be_dma_mem flash_cmd;
2908 int status = 0, i = 0, num_imgs = 0;
2911 fhdr = (struct flash_file_hdr_g2 *) p;
2913 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2914 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2915 &flash_cmd.dma, GFP_KERNEL);
2916 if (!flash_cmd.va) {
2918 dev_err(&adapter->pdev->dev,
2919 "Memory allocation failure while flashing\n");
2923 if ((adapter->generation == BE_GEN3) &&
2924 (get_ufigen_type(fhdr) == BE_GEN3)) {
2925 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2926 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2927 for (i = 0; i < num_imgs; i++) {
2928 img_hdr_ptr = (struct image_hdr *) (fw->data +
2929 (sizeof(struct flash_file_hdr_g3) +
2930 i * sizeof(struct image_hdr)));
2931 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2932 status = be_flash_data(adapter, fw, &flash_cmd,
2935 } else if ((adapter->generation == BE_GEN2) &&
2936 (get_ufigen_type(fhdr) == BE_GEN2)) {
2937 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2939 dev_err(&adapter->pdev->dev,
2940 "UFI and Interface are not compatible for flashing\n");
2944 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2947 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2951 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2957 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2959 const struct firmware *fw;
2962 if (!netif_running(adapter->netdev)) {
2963 dev_err(&adapter->pdev->dev,
2964 "Firmware load not allowed (interface is down)\n");
2968 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2972 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2974 if (lancer_chip(adapter))
2975 status = lancer_fw_download(adapter, fw);
2977 status = be_fw_download(adapter, fw);
2980 release_firmware(fw);
2984 static struct net_device_ops be_netdev_ops = {
2985 .ndo_open = be_open,
2986 .ndo_stop = be_close,
2987 .ndo_start_xmit = be_xmit,
2988 .ndo_set_rx_mode = be_set_rx_mode,
2989 .ndo_set_mac_address = be_mac_addr_set,
2990 .ndo_change_mtu = be_change_mtu,
2991 .ndo_get_stats64 = be_get_stats64,
2992 .ndo_validate_addr = eth_validate_addr,
2993 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2994 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2995 .ndo_set_vf_mac = be_set_vf_mac,
2996 .ndo_set_vf_vlan = be_set_vf_vlan,
2997 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
2998 .ndo_get_vf_config = be_get_vf_config
3001 static void be_netdev_init(struct net_device *netdev)
3003 struct be_adapter *adapter = netdev_priv(netdev);
3004 struct be_rx_obj *rxo;
3007 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3008 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3010 if (be_multi_rxq(adapter))
3011 netdev->hw_features |= NETIF_F_RXHASH;
3013 netdev->features |= netdev->hw_features |
3014 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3016 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3017 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3019 netdev->flags |= IFF_MULTICAST;
3021 netif_set_gso_max_size(netdev, 65535);
3023 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3025 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3027 for_all_rx_queues(adapter, rxo, i)
3028 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3031 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3035 static void be_unmap_pci_bars(struct be_adapter *adapter)
3038 iounmap(adapter->csr);
3040 iounmap(adapter->db);
3043 static int be_map_pci_bars(struct be_adapter *adapter)
3048 if (lancer_chip(adapter)) {
3049 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3050 pci_resource_len(adapter->pdev, 0));
3057 if (be_physfn(adapter)) {
3058 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3059 pci_resource_len(adapter->pdev, 2));
3062 adapter->csr = addr;
3065 if (adapter->generation == BE_GEN2) {
3068 if (be_physfn(adapter))
3073 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3074 pci_resource_len(adapter->pdev, db_reg));
3081 be_unmap_pci_bars(adapter);
3086 static void be_ctrl_cleanup(struct be_adapter *adapter)
3088 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3090 be_unmap_pci_bars(adapter);
3093 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3096 mem = &adapter->rx_filter;
3098 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3102 static int be_ctrl_init(struct be_adapter *adapter)
3104 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3105 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3106 struct be_dma_mem *rx_filter = &adapter->rx_filter;
3109 status = be_map_pci_bars(adapter);
3113 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3114 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3115 mbox_mem_alloc->size,
3116 &mbox_mem_alloc->dma,
3118 if (!mbox_mem_alloc->va) {
3120 goto unmap_pci_bars;
3122 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3123 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3124 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3125 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3127 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3128 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3129 &rx_filter->dma, GFP_KERNEL);
3130 if (rx_filter->va == NULL) {
3134 memset(rx_filter->va, 0, rx_filter->size);
3136 mutex_init(&adapter->mbox_lock);
3137 spin_lock_init(&adapter->mcc_lock);
3138 spin_lock_init(&adapter->mcc_cq_lock);
3140 init_completion(&adapter->flash_compl);
3141 pci_save_state(adapter->pdev);
3145 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3146 mbox_mem_alloc->va, mbox_mem_alloc->dma);
3149 be_unmap_pci_bars(adapter);
3155 static void be_stats_cleanup(struct be_adapter *adapter)
3157 struct be_dma_mem *cmd = &adapter->stats_cmd;
3160 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3164 static int be_stats_init(struct be_adapter *adapter)
3166 struct be_dma_mem *cmd = &adapter->stats_cmd;
3168 if (adapter->generation == BE_GEN2) {
3169 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3171 if (lancer_chip(adapter))
3172 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3174 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3176 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3178 if (cmd->va == NULL)
3180 memset(cmd->va, 0, cmd->size);
3184 static void __devexit be_remove(struct pci_dev *pdev)
3186 struct be_adapter *adapter = pci_get_drvdata(pdev);
3191 cancel_delayed_work_sync(&adapter->work);
3193 unregister_netdev(adapter->netdev);
3197 be_stats_cleanup(adapter);
3199 be_ctrl_cleanup(adapter);
3201 be_sriov_disable(adapter);
3203 be_msix_disable(adapter);
3205 pci_set_drvdata(pdev, NULL);
3206 pci_release_regions(pdev);
3207 pci_disable_device(pdev);
3209 free_netdev(adapter->netdev);
3212 static int be_get_config(struct be_adapter *adapter)
3216 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3217 &adapter->function_mode, &adapter->function_caps);
3221 if (adapter->function_mode & FLEX10_MODE)
3222 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3224 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3226 status = be_cmd_get_cntl_attributes(adapter);
3233 static int be_dev_family_check(struct be_adapter *adapter)
3235 struct pci_dev *pdev = adapter->pdev;
3236 u32 sli_intf = 0, if_type;
3238 switch (pdev->device) {
3241 adapter->generation = BE_GEN2;
3245 adapter->generation = BE_GEN3;
3249 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3250 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3251 SLI_INTF_IF_TYPE_SHIFT;
3253 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3255 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3258 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3259 SLI_INTF_FAMILY_SHIFT);
3260 adapter->generation = BE_GEN3;
3263 adapter->generation = 0;
3268 static int lancer_wait_ready(struct be_adapter *adapter)
3270 #define SLIPORT_READY_TIMEOUT 500
3274 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3275 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3282 if (i == SLIPORT_READY_TIMEOUT)
3288 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3291 u32 sliport_status, err, reset_needed;
3292 status = lancer_wait_ready(adapter);
3294 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3295 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3296 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3297 if (err && reset_needed) {
3298 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3299 adapter->db + SLIPORT_CONTROL_OFFSET);
3301 /* check adapter has corrected the error */
3302 status = lancer_wait_ready(adapter);
3303 sliport_status = ioread32(adapter->db +
3304 SLIPORT_STATUS_OFFSET);
3305 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3306 SLIPORT_STATUS_RN_MASK);
3307 if (status || sliport_status)
3309 } else if (err || reset_needed) {
3316 static int __devinit be_probe(struct pci_dev *pdev,
3317 const struct pci_device_id *pdev_id)
3320 struct be_adapter *adapter;
3321 struct net_device *netdev;
3323 status = pci_enable_device(pdev);
3327 status = pci_request_regions(pdev, DRV_NAME);
3330 pci_set_master(pdev);
3332 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3333 if (netdev == NULL) {
3337 adapter = netdev_priv(netdev);
3338 adapter->pdev = pdev;
3339 pci_set_drvdata(pdev, adapter);
3341 status = be_dev_family_check(adapter);
3345 adapter->netdev = netdev;
3346 SET_NETDEV_DEV(netdev, &pdev->dev);
3348 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3350 netdev->features |= NETIF_F_HIGHDMA;
3352 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3354 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3359 status = be_sriov_enable(adapter);
3363 status = be_ctrl_init(adapter);
3367 if (lancer_chip(adapter)) {
3368 status = lancer_test_and_set_rdy_state(adapter);
3370 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3375 /* sync up with fw's ready state */
3376 if (be_physfn(adapter)) {
3377 status = be_cmd_POST(adapter);
3382 /* tell fw we're ready to fire cmds */
3383 status = be_cmd_fw_init(adapter);
3387 status = be_cmd_reset_function(adapter);
3391 status = be_stats_init(adapter);
3395 status = be_get_config(adapter);
3399 /* The INTR bit may be set in the card when probed by a kdump kernel
3402 if (!lancer_chip(adapter))
3403 be_intr_set(adapter, false);
3405 be_msix_enable(adapter);
3407 INIT_DELAYED_WORK(&adapter->work, be_worker);
3408 adapter->rx_fc = adapter->tx_fc = true;
3410 status = be_setup(adapter);
3414 be_netdev_init(netdev);
3415 status = register_netdev(netdev);
3419 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3421 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3427 be_msix_disable(adapter);
3429 be_stats_cleanup(adapter);
3431 be_ctrl_cleanup(adapter);
3433 be_sriov_disable(adapter);
3435 free_netdev(netdev);
3436 pci_set_drvdata(pdev, NULL);
3438 pci_release_regions(pdev);
3440 pci_disable_device(pdev);
3442 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3446 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3448 struct be_adapter *adapter = pci_get_drvdata(pdev);
3449 struct net_device *netdev = adapter->netdev;
3451 cancel_delayed_work_sync(&adapter->work);
3453 be_setup_wol(adapter, true);
3455 netif_device_detach(netdev);
3456 if (netif_running(netdev)) {
3463 be_msix_disable(adapter);
3464 pci_save_state(pdev);
3465 pci_disable_device(pdev);
3466 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3470 static int be_resume(struct pci_dev *pdev)
3473 struct be_adapter *adapter = pci_get_drvdata(pdev);
3474 struct net_device *netdev = adapter->netdev;
3476 netif_device_detach(netdev);
3478 status = pci_enable_device(pdev);
3482 pci_set_power_state(pdev, 0);
3483 pci_restore_state(pdev);
3485 be_msix_enable(adapter);
3486 /* tell fw we're ready to fire cmds */
3487 status = be_cmd_fw_init(adapter);
3492 if (netif_running(netdev)) {
3497 netif_device_attach(netdev);
3500 be_setup_wol(adapter, false);
3502 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3507 * An FLR will stop BE from DMAing any data.
3509 static void be_shutdown(struct pci_dev *pdev)
3511 struct be_adapter *adapter = pci_get_drvdata(pdev);
3516 cancel_delayed_work_sync(&adapter->work);
3518 netif_device_detach(adapter->netdev);
3521 be_setup_wol(adapter, true);
3523 be_cmd_reset_function(adapter);
3525 pci_disable_device(pdev);
3528 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3529 pci_channel_state_t state)
3531 struct be_adapter *adapter = pci_get_drvdata(pdev);
3532 struct net_device *netdev = adapter->netdev;
3534 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3536 adapter->eeh_err = true;
3538 netif_device_detach(netdev);
3540 if (netif_running(netdev)) {
3547 if (state == pci_channel_io_perm_failure)
3548 return PCI_ERS_RESULT_DISCONNECT;
3550 pci_disable_device(pdev);
3552 return PCI_ERS_RESULT_NEED_RESET;
3555 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3557 struct be_adapter *adapter = pci_get_drvdata(pdev);
3560 dev_info(&adapter->pdev->dev, "EEH reset\n");
3561 adapter->eeh_err = false;
3563 status = pci_enable_device(pdev);
3565 return PCI_ERS_RESULT_DISCONNECT;
3567 pci_set_master(pdev);
3568 pci_set_power_state(pdev, 0);
3569 pci_restore_state(pdev);
3571 /* Check if card is ok and fw is ready */
3572 status = be_cmd_POST(adapter);
3574 return PCI_ERS_RESULT_DISCONNECT;
3576 return PCI_ERS_RESULT_RECOVERED;
3579 static void be_eeh_resume(struct pci_dev *pdev)
3582 struct be_adapter *adapter = pci_get_drvdata(pdev);
3583 struct net_device *netdev = adapter->netdev;
3585 dev_info(&adapter->pdev->dev, "EEH resume\n");
3587 pci_save_state(pdev);
3589 /* tell fw we're ready to fire cmds */
3590 status = be_cmd_fw_init(adapter);
3594 status = be_setup(adapter);
3598 if (netif_running(netdev)) {
3599 status = be_open(netdev);
3603 netif_device_attach(netdev);
3606 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3609 static struct pci_error_handlers be_eeh_handlers = {
3610 .error_detected = be_eeh_err_detected,
3611 .slot_reset = be_eeh_reset,
3612 .resume = be_eeh_resume,
3615 static struct pci_driver be_driver = {
3617 .id_table = be_dev_ids,
3619 .remove = be_remove,
3620 .suspend = be_suspend,
3621 .resume = be_resume,
3622 .shutdown = be_shutdown,
3623 .err_handler = &be_eeh_handlers
3626 static int __init be_init_module(void)
3628 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3629 rx_frag_size != 2048) {
3630 printk(KERN_WARNING DRV_NAME
3631 " : Module param rx_frag_size must be 2048/4096/8192."
3633 rx_frag_size = 2048;
3636 return pci_register_driver(&be_driver);
3638 module_init(be_init_module);
3640 static void __exit be_exit_module(void)
3642 pci_unregister_driver(&be_driver);
3644 module_exit(be_exit_module);