2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/bitops.h>
38 #include <linux/compiler.h>
39 #include <linux/list.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/if_vlan.h>
43 #include <linux/net_tstamp.h>
44 #ifdef CONFIG_MLX4_EN_DCB
45 #include <linux/dcbnl.h>
47 #include <linux/cpu_rmap.h>
48 #include <linux/ptp_clock_kernel.h>
50 #include <linux/mlx4/device.h>
51 #include <linux/mlx4/qp.h>
52 #include <linux/mlx4/cq.h>
53 #include <linux/mlx4/srq.h>
54 #include <linux/mlx4/doorbell.h>
55 #include <linux/mlx4/cmd.h>
59 #define DRV_NAME "mlx4_en"
60 #define DRV_VERSION "2.2-1"
61 #define DRV_RELDATE "Feb 2014"
63 #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
70 #define MLX4_EN_PAGE_SHIFT 12
71 #define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
72 #define DEF_RX_RINGS 16
73 #define MAX_RX_RINGS 128
74 #define MIN_RX_RINGS 4
76 #define HEADROOM (2048 / TXBB_SIZE + 1)
77 #define STAMP_STRIDE 64
78 #define STAMP_DWORDS (STAMP_STRIDE / 4)
79 #define STAMP_SHIFT 31
80 #define STAMP_VAL 0x7fffffff
81 #define STATS_DELAY (HZ / 4)
82 #define SERVICE_TASK_DELAY (HZ / 4)
83 #define MAX_NUM_OF_FS_RULES 256
85 #define MLX4_EN_FILTER_HASH_SHIFT 4
86 #define MLX4_EN_FILTER_EXPIRY_QUOTA 60
88 /* Typical TSO descriptor with 16 gather entries is 352 bytes... */
89 #define MAX_DESC_SIZE 512
90 #define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
93 * OS related constants and tunables
96 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
98 #define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
100 /* Use the maximum between 16384 and a single page */
101 #define MLX4_EN_ALLOC_SIZE PAGE_ALIGN(16384)
103 #define MLX4_EN_ALLOC_PREFER_ORDER PAGE_ALLOC_COSTLY_ORDER
105 /* Receive fragment sizes; we use at most 3 fragments (for 9600 byte MTU
106 * and 4K allocations) */
108 FRAG_SZ0 = 1536 - NET_IP_ALIGN,
111 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
113 #define MLX4_EN_MAX_RX_FRAGS 4
115 /* Maximum ring sizes */
116 #define MLX4_EN_MAX_TX_SIZE 8192
117 #define MLX4_EN_MAX_RX_SIZE 8192
119 /* Minimum ring size for our page-allocation scheme to work */
120 #define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
121 #define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
123 #define MLX4_EN_SMALL_PKT_SIZE 64
124 #define MLX4_EN_MIN_TX_RING_P_UP 1
125 #define MLX4_EN_MAX_TX_RING_P_UP 32
126 #define MLX4_EN_NUM_UP 8
127 #define MLX4_EN_DEF_TX_RING_SIZE 512
128 #define MLX4_EN_DEF_RX_RING_SIZE 1024
129 #define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
132 #define MLX4_EN_DEFAULT_TX_WORK 256
134 /* Target number of packets to coalesce with interrupt moderation */
135 #define MLX4_EN_RX_COAL_TARGET 44
136 #define MLX4_EN_RX_COAL_TIME 0x10
138 #define MLX4_EN_TX_COAL_PKTS 16
139 #define MLX4_EN_TX_COAL_TIME 0x10
141 #define MLX4_EN_RX_RATE_LOW 400000
142 #define MLX4_EN_RX_COAL_TIME_LOW 0
143 #define MLX4_EN_RX_RATE_HIGH 450000
144 #define MLX4_EN_RX_COAL_TIME_HIGH 128
145 #define MLX4_EN_RX_SIZE_THRESH 1024
146 #define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
147 #define MLX4_EN_SAMPLE_INTERVAL 0
148 #define MLX4_EN_AVG_PKT_SMALL 256
150 #define MLX4_EN_AUTO_CONF 0xffff
152 #define MLX4_EN_DEF_RX_PAUSE 1
153 #define MLX4_EN_DEF_TX_PAUSE 1
155 /* Interval between successive polls in the Tx routine when polling is used
156 instead of interrupts (in per-core Tx rings) - should be power of 2 */
157 #define MLX4_EN_TX_POLL_MODER 16
158 #define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
160 #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
161 #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
162 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
164 #define MLX4_EN_MIN_MTU 46
165 #define ETH_BCAST 0xffffffffffffULL
167 #define MLX4_EN_LOOPBACK_RETRIES 5
168 #define MLX4_EN_LOOPBACK_TIMEOUT 100
170 #ifdef MLX4_EN_PERF_STAT
171 /* Number of samples to 'average' */
173 #define AVG_FACTOR 1024
174 #define NUM_PERF_STATS NUM_PERF_COUNTERS
176 #define INC_PERF_COUNTER(cnt) (++(cnt))
177 #define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
178 #define AVG_PERF_COUNTER(cnt, sample) \
179 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
180 #define GET_PERF_COUNTER(cnt) (cnt)
181 #define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
185 #define NUM_PERF_STATS 0
186 #define INC_PERF_COUNTER(cnt) do {} while (0)
187 #define ADD_PERF_COUNTER(cnt, add) do {} while (0)
188 #define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
189 #define GET_PERF_COUNTER(cnt) (0)
190 #define GET_AVG_PERF_COUNTER(cnt) (0)
191 #endif /* MLX4_EN_PERF_STAT */
193 /* Constants for TX flow */
195 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
213 #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
214 #define XNOR(x, y) (!(x) == !(y))
217 struct mlx4_en_tx_info {
228 } ____cacheline_aligned_in_smp;
231 #define MLX4_EN_BIT_DESC_OWN 0x80000000
232 #define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
233 #define MLX4_EN_MEMTYPE_PAD 0x100
234 #define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
237 struct mlx4_en_tx_desc {
238 struct mlx4_wqe_ctrl_seg ctrl;
240 struct mlx4_wqe_data_seg data; /* at least one data segment */
241 struct mlx4_wqe_lso_seg lso;
242 struct mlx4_wqe_inline_seg inl;
246 #define MLX4_EN_USE_SRQ 0x01000000
248 #define MLX4_EN_CX3_LOW_ID 0x1000
249 #define MLX4_EN_CX3_HIGH_ID 0x1005
251 struct mlx4_en_rx_alloc {
258 struct mlx4_en_tx_ring {
259 /* cache line used and dirtied in tx completion
260 * (mlx4_en_free_tx_buf())
264 unsigned long wake_queue;
266 /* cache line used and dirtied in mlx4_en_xmit() */
267 u32 prod ____cacheline_aligned_in_smp;
269 unsigned long packets;
270 unsigned long tx_csum;
271 unsigned long tso_packets;
272 unsigned long xmit_more;
274 unsigned long queue_stopped;
276 /* Following part should be mostly read */
277 cpumask_t affinity_mask;
279 struct mlx4_hwq_resources wqres;
280 u32 size; /* number of TXBBs */
283 u16 cqn; /* index of port CQ associated with this ring */
288 struct mlx4_en_tx_info *tx_info;
290 struct mlx4_qp_context context;
292 enum mlx4_qp_state qp_state;
296 struct netdev_queue *tx_queue;
297 int hwtstamp_tx_type;
298 } ____cacheline_aligned_in_smp;
300 struct mlx4_en_rx_desc {
301 /* actual number of entries depends on rx ring stride */
302 struct mlx4_wqe_data_seg data[0];
305 struct mlx4_en_rx_ring {
306 struct mlx4_hwq_resources wqres;
307 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
308 u32 size ; /* number of Rx descs*/
313 u16 cqn; /* index of port CQ associated with this ring */
321 unsigned long packets;
322 #ifdef CONFIG_NET_RX_BUSY_POLL
323 unsigned long yields;
324 unsigned long misses;
325 unsigned long cleaned;
327 unsigned long csum_ok;
328 unsigned long csum_none;
329 unsigned long csum_complete;
330 int hwtstamp_rx_filter;
331 cpumask_var_t affinity_mask;
336 struct mlx4_hwq_resources wqres;
338 struct net_device *dev;
339 struct napi_struct napi;
346 struct mlx4_cqe *buf;
347 #define MLX4_EN_OPCODE_ERROR 0x1e
349 #ifdef CONFIG_NET_RX_BUSY_POLL
351 #define MLX4_EN_CQ_STATE_IDLE 0
352 #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
353 #define MLX4_EN_CQ_STATE_POLL 2 /* poll owns this CQ */
354 #define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATE_NAPI | MLX4_EN_CQ_STATE_POLL)
355 #define MLX4_EN_CQ_STATE_NAPI_YIELD 4 /* NAPI yielded this CQ */
356 #define MLX4_EN_CQ_STATE_POLL_YIELD 8 /* poll yielded this CQ */
357 #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
358 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
359 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
360 #endif /* CONFIG_NET_RX_BUSY_POLL */
361 struct irq_desc *irq_desc;
364 struct mlx4_en_port_profile {
378 struct mlx4_en_profile {
385 u8 num_tx_rings_p_up;
386 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
390 struct mlx4_dev *dev;
391 struct pci_dev *pdev;
392 struct mutex state_lock;
393 struct net_device *pndev[MLX4_MAX_PORTS + 1];
396 struct mlx4_en_profile profile;
398 struct workqueue_struct *workqueue;
399 struct device *dma_device;
400 void __iomem *uar_map;
401 struct mlx4_uar priv_uar;
405 u8 mac_removed[MLX4_MAX_PORTS + 1];
408 struct cyclecounter cycles;
409 struct timecounter clock;
410 unsigned long last_overflow_check;
411 unsigned long overflow_period;
412 struct ptp_clock *ptp_clock;
413 struct ptp_clock_info ptp_clock_info;
417 struct mlx4_en_rss_map {
419 struct mlx4_qp qps[MAX_RX_RINGS];
420 enum mlx4_qp_state state[MAX_RX_RINGS];
421 struct mlx4_qp indir_qp;
422 enum mlx4_qp_state indir_state;
425 enum mlx4_en_port_flag {
426 MLX4_EN_PORT_ANC = 1<<0, /* Auto-negotiation complete */
427 MLX4_EN_PORT_ANE = 1<<1, /* Auto-negotiation enabled */
430 struct mlx4_en_port_state {
437 struct mlx4_en_pkt_stats {
438 unsigned long broadcast;
439 unsigned long rx_prio[8];
440 unsigned long tx_prio[8];
441 #define NUM_PKT_STATS 17
444 struct mlx4_en_port_stats {
445 unsigned long tso_packets;
446 unsigned long xmit_more;
447 unsigned long queue_stopped;
448 unsigned long wake_queue;
449 unsigned long tx_timeout;
450 unsigned long rx_alloc_failed;
451 unsigned long rx_chksum_good;
452 unsigned long rx_chksum_none;
453 unsigned long rx_chksum_complete;
454 unsigned long tx_chksum_offload;
455 #define NUM_PORT_STATS 9
458 struct mlx4_en_perf_stats {
465 #define NUM_PERF_COUNTERS 6
468 enum mlx4_en_mclist_act {
474 struct mlx4_en_mc_list {
475 struct list_head list;
476 enum mlx4_en_mclist_act action;
482 struct mlx4_en_frag_info {
484 u16 frag_prefix_size;
488 #ifdef CONFIG_MLX4_EN_DCB
489 /* Minimal TC BW - setting to 0 will block traffic */
490 #define MLX4_EN_BW_MIN 1
491 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
493 #define MLX4_EN_TC_ETS 7
497 struct ethtool_flow_id {
498 struct list_head list;
499 struct ethtool_rx_flow_spec flow_spec;
504 MLX4_EN_FLAG_PROMISC = (1 << 0),
505 MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
506 /* whether we need to enable hardware loopback by putting dmac
509 MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
510 /* whether we need to drop packets that hardware loopback-ed */
511 MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
512 MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
513 MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
516 #define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
517 #define MLX4_EN_MAC_HASH_IDX 5
519 struct mlx4_en_priv {
520 struct mlx4_en_dev *mdev;
521 struct mlx4_en_port_profile *prof;
522 struct net_device *dev;
523 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
524 struct net_device_stats stats;
525 struct net_device_stats ret_stats;
526 struct mlx4_en_port_state port_state;
527 spinlock_t stats_lock;
528 struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
529 /* To allow rules removal while port is going down */
530 struct list_head ethtool_list;
532 unsigned long last_moder_packets[MAX_RX_RINGS];
533 unsigned long last_moder_tx_packets;
534 unsigned long last_moder_bytes[MAX_RX_RINGS];
535 unsigned long last_moder_jiffies;
536 int last_moder_time[MAX_RX_RINGS];
546 u16 adaptive_rx_coal;
549 u32 validate_loopback;
551 struct mlx4_hwq_resources res;
559 unsigned char current_mac[ETH_ALEN + 2];
566 struct mlx4_en_rss_map rss_map;
569 u8 num_tx_rings_p_up;
574 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
578 struct mlx4_en_tx_ring **tx_ring;
579 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
580 struct mlx4_en_cq **tx_cq;
581 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
582 struct mlx4_qp drop_qp;
583 struct work_struct rx_mode_task;
584 struct work_struct watchdog_task;
585 struct work_struct linkstate_task;
586 struct delayed_work stats_task;
587 struct delayed_work service_task;
588 #ifdef CONFIG_MLX4_EN_VXLAN
589 struct work_struct vxlan_add_task;
590 struct work_struct vxlan_del_task;
592 struct mlx4_en_perf_stats pstats;
593 struct mlx4_en_pkt_stats pkstats;
594 struct mlx4_en_port_stats port_stats;
596 struct list_head mc_list;
597 struct list_head curr_list;
599 struct mlx4_en_stat_out_mbox hw_stats;
604 struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
605 struct hwtstamp_config hwtstamp_config;
607 #ifdef CONFIG_MLX4_EN_DCB
609 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
611 #ifdef CONFIG_RFS_ACCEL
612 spinlock_t filters_lock;
614 struct list_head filters;
615 struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
624 MLX4_EN_WOL_MAGIC = (1ULL << 61),
625 MLX4_EN_WOL_ENABLED = (1ULL << 62),
628 struct mlx4_mac_entry {
629 struct hlist_node hlist;
630 unsigned char mac[ETH_ALEN + 2];
635 static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz)
637 return buf + idx * cqe_sz;
640 #ifdef CONFIG_NET_RX_BUSY_POLL
641 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
643 spin_lock_init(&cq->poll_lock);
644 cq->state = MLX4_EN_CQ_STATE_IDLE;
647 /* called from the device poll rutine to get ownership of a cq */
648 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
651 spin_lock(&cq->poll_lock);
652 if (cq->state & MLX4_CQ_LOCKED) {
653 WARN_ON(cq->state & MLX4_EN_CQ_STATE_NAPI);
654 cq->state |= MLX4_EN_CQ_STATE_NAPI_YIELD;
657 /* we don't care if someone yielded */
658 cq->state = MLX4_EN_CQ_STATE_NAPI;
659 spin_unlock(&cq->poll_lock);
663 /* returns true is someone tried to get the cq while napi had it */
664 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
667 spin_lock(&cq->poll_lock);
668 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_POLL |
669 MLX4_EN_CQ_STATE_NAPI_YIELD));
671 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
673 cq->state = MLX4_EN_CQ_STATE_IDLE;
674 spin_unlock(&cq->poll_lock);
678 /* called from mlx4_en_low_latency_poll() */
679 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
682 spin_lock_bh(&cq->poll_lock);
683 if ((cq->state & MLX4_CQ_LOCKED)) {
684 struct net_device *dev = cq->dev;
685 struct mlx4_en_priv *priv = netdev_priv(dev);
686 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
688 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
692 /* preserve yield marks */
693 cq->state |= MLX4_EN_CQ_STATE_POLL;
694 spin_unlock_bh(&cq->poll_lock);
698 /* returns true if someone tried to get the cq while it was locked */
699 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
702 spin_lock_bh(&cq->poll_lock);
703 WARN_ON(cq->state & (MLX4_EN_CQ_STATE_NAPI));
705 if (cq->state & MLX4_EN_CQ_STATE_POLL_YIELD)
707 cq->state = MLX4_EN_CQ_STATE_IDLE;
708 spin_unlock_bh(&cq->poll_lock);
712 /* true if a socket is polling, even if it did not get the lock */
713 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
715 WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
716 return cq->state & CQ_USER_PEND;
719 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
723 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
728 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
733 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
738 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
743 static inline bool mlx4_en_cq_busy_polling(struct mlx4_en_cq *cq)
747 #endif /* CONFIG_NET_RX_BUSY_POLL */
749 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
751 void mlx4_en_update_loopback_state(struct net_device *dev,
752 netdev_features_t features);
754 void mlx4_en_destroy_netdev(struct net_device *dev);
755 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
756 struct mlx4_en_port_profile *prof);
758 int mlx4_en_start_port(struct net_device *dev);
759 void mlx4_en_stop_port(struct net_device *dev, int detach);
761 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
762 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
764 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
765 int entries, int ring, enum cq_type mode, int node);
766 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
767 int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
769 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
770 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
771 int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
773 void mlx4_en_tx_irq(struct mlx4_cq *mcq);
774 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
775 void *accel_priv, select_queue_fallback_t fallback);
776 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
778 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
779 struct mlx4_en_tx_ring **pring,
780 int qpn, u32 size, u16 stride,
781 int node, int queue_index);
782 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
783 struct mlx4_en_tx_ring **pring);
784 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
785 struct mlx4_en_tx_ring *ring,
786 int cq, int user_prio);
787 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
788 struct mlx4_en_tx_ring *ring);
789 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
790 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
791 struct mlx4_en_rx_ring **pring,
792 u32 size, u16 stride, int node);
793 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
794 struct mlx4_en_rx_ring **pring,
795 u32 size, u16 stride);
796 int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
797 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
798 struct mlx4_en_rx_ring *ring);
799 int mlx4_en_process_rx_cq(struct net_device *dev,
800 struct mlx4_en_cq *cq,
802 int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
803 int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
804 void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
805 int is_tx, int rss, int qpn, int cqn, int user_prio,
806 struct mlx4_qp_context *context);
807 void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
808 int mlx4_en_map_buffer(struct mlx4_buf *buf);
809 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
811 void mlx4_en_calc_rx_buf(struct net_device *dev);
812 int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
813 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
814 int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
815 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
816 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
817 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
819 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
820 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
822 int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
823 int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
825 #ifdef CONFIG_MLX4_EN_DCB
826 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
827 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
830 int mlx4_en_setup_tc(struct net_device *dev, u8 up);
832 #ifdef CONFIG_RFS_ACCEL
833 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
836 #define MLX4_EN_NUM_SELF_TEST 5
837 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
838 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
840 #define DEV_FEATURE_CHANGED(dev, new_features, feature) \
841 ((dev->features & feature) ^ (new_features & feature))
843 int mlx4_en_reset_config(struct net_device *dev,
844 struct hwtstamp_config ts_config,
845 netdev_features_t new_features);
848 * Functions for time stamping
850 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
851 void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
852 struct skb_shared_hwtstamps *hwts,
854 void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev);
855 void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev);
859 extern const struct ethtool_ops mlx4_en_ethtool_ops;
864 * printk / logging functions
868 void en_print(const char *level, const struct mlx4_en_priv *priv,
869 const char *format, ...);
871 #define en_dbg(mlevel, priv, format, ...) \
873 if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
874 en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
876 #define en_warn(priv, format, ...) \
877 en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
878 #define en_err(priv, format, ...) \
879 en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
880 #define en_info(priv, format, ...) \
881 en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
883 #define mlx4_err(mdev, format, ...) \
884 pr_err(DRV_NAME " %s: " format, \
885 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
886 #define mlx4_info(mdev, format, ...) \
887 pr_info(DRV_NAME " %s: " format, \
888 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
889 #define mlx4_warn(mdev, format, ...) \
890 pr_warn(DRV_NAME " %s: " format, \
891 dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)