2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
41 #include <net/busy_poll.h>
43 #include <linux/mlx4/driver.h>
44 #include <linux/mlx4/device.h>
45 #include <linux/mlx4/cmd.h>
46 #include <linux/mlx4/cq.h>
51 int mlx4_en_setup_tc(struct net_device *dev, u8 up)
53 struct mlx4_en_priv *priv = netdev_priv(dev);
55 unsigned int offset = 0;
57 if (up && up != MLX4_EN_NUM_UP)
60 netdev_set_num_tc(dev, up);
62 /* Partition Tx queues evenly amongst UP's */
63 for (i = 0; i < up; i++) {
64 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
65 offset += priv->num_tx_rings_p_up;
71 #ifdef CONFIG_NET_RX_BUSY_POLL
72 /* must be called with local_bh_disable()d */
73 static int mlx4_en_low_latency_recv(struct napi_struct *napi)
75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
76 struct net_device *dev = cq->dev;
77 struct mlx4_en_priv *priv = netdev_priv(dev);
78 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
82 return LL_FLUSH_FAILED;
84 if (!mlx4_en_cq_lock_poll(cq))
87 done = mlx4_en_process_rx_cq(dev, cq, 4);
89 rx_ring->cleaned += done;
93 mlx4_en_cq_unlock_poll(cq);
97 #endif /* CONFIG_NET_RX_BUSY_POLL */
99 #ifdef CONFIG_RFS_ACCEL
101 struct mlx4_en_filter {
102 struct list_head next;
103 struct work_struct work;
112 struct mlx4_en_priv *priv;
113 u32 flow_id; /* RFS infrastructure id */
114 int id; /* mlx4_en driver id */
115 u64 reg_id; /* Flow steering API id */
116 u8 activated; /* Used to prevent expiry before filter
119 struct hlist_node filter_chain;
122 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
124 static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
128 return MLX4_NET_TRANS_RULE_ID_UDP;
130 return MLX4_NET_TRANS_RULE_ID_TCP;
132 return -EPROTONOSUPPORT;
136 static void mlx4_en_filter_work(struct work_struct *work)
138 struct mlx4_en_filter *filter = container_of(work,
139 struct mlx4_en_filter,
141 struct mlx4_en_priv *priv = filter->priv;
142 struct mlx4_spec_list spec_tcp_udp = {
143 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
146 .dst_port = filter->dst_port,
147 .dst_port_msk = (__force __be16)-1,
148 .src_port = filter->src_port,
149 .src_port_msk = (__force __be16)-1,
153 struct mlx4_spec_list spec_ip = {
154 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
157 .dst_ip = filter->dst_ip,
158 .dst_ip_msk = (__force __be32)-1,
159 .src_ip = filter->src_ip,
160 .src_ip_msk = (__force __be32)-1,
164 struct mlx4_spec_list spec_eth = {
165 .id = MLX4_NET_TRANS_RULE_ID_ETH,
167 struct mlx4_net_trans_rule rule = {
168 .list = LIST_HEAD_INIT(rule.list),
169 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
172 .promisc_mode = MLX4_FS_REGULAR,
174 .priority = MLX4_DOMAIN_RFS,
177 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
179 if (spec_tcp_udp.id < 0) {
180 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
184 list_add_tail(&spec_eth.list, &rule.list);
185 list_add_tail(&spec_ip.list, &rule.list);
186 list_add_tail(&spec_tcp_udp.list, &rule.list);
188 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
189 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
190 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
192 filter->activated = 0;
194 if (filter->reg_id) {
195 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
196 if (rc && rc != -ENOENT)
197 en_err(priv, "Error detaching flow. rc = %d\n", rc);
200 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
202 en_err(priv, "Error attaching flow. err = %d\n", rc);
205 mlx4_en_filter_rfs_expire(priv);
207 filter->activated = 1;
210 static inline struct hlist_head *
211 filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
212 __be16 src_port, __be16 dst_port)
217 l = (__force unsigned long)src_port |
218 ((__force unsigned long)dst_port << 2);
219 l ^= (__force unsigned long)(src_ip ^ dst_ip);
221 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
223 return &priv->filter_hash[bucket_idx];
226 static struct mlx4_en_filter *
227 mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
228 __be32 dst_ip, u8 ip_proto, __be16 src_port,
229 __be16 dst_port, u32 flow_id)
231 struct mlx4_en_filter *filter = NULL;
233 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
238 filter->rxq_index = rxq_index;
239 INIT_WORK(&filter->work, mlx4_en_filter_work);
241 filter->src_ip = src_ip;
242 filter->dst_ip = dst_ip;
243 filter->ip_proto = ip_proto;
244 filter->src_port = src_port;
245 filter->dst_port = dst_port;
247 filter->flow_id = flow_id;
249 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
251 list_add_tail(&filter->next, &priv->filters);
252 hlist_add_head(&filter->filter_chain,
253 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
259 static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
261 struct mlx4_en_priv *priv = filter->priv;
264 list_del(&filter->next);
266 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
267 if (rc && rc != -ENOENT)
268 en_err(priv, "Error detaching flow. rc = %d\n", rc);
273 static inline struct mlx4_en_filter *
274 mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
275 u8 ip_proto, __be16 src_port, __be16 dst_port)
277 struct mlx4_en_filter *filter;
278 struct mlx4_en_filter *ret = NULL;
280 hlist_for_each_entry(filter,
281 filter_hash_bucket(priv, src_ip, dst_ip,
284 if (filter->src_ip == src_ip &&
285 filter->dst_ip == dst_ip &&
286 filter->ip_proto == ip_proto &&
287 filter->src_port == src_port &&
288 filter->dst_port == dst_port) {
298 mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
299 u16 rxq_index, u32 flow_id)
301 struct mlx4_en_priv *priv = netdev_priv(net_dev);
302 struct mlx4_en_filter *filter;
303 const struct iphdr *ip;
310 int nhoff = skb_network_offset(skb);
313 if (skb->protocol != htons(ETH_P_IP))
314 return -EPROTONOSUPPORT;
316 ip = (const struct iphdr *)(skb->data + nhoff);
317 if (ip_is_fragment(ip))
318 return -EPROTONOSUPPORT;
320 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
321 return -EPROTONOSUPPORT;
322 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
324 ip_proto = ip->protocol;
330 spin_lock_bh(&priv->filters_lock);
331 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
334 if (filter->rxq_index == rxq_index)
337 filter->rxq_index = rxq_index;
339 filter = mlx4_en_filter_alloc(priv, rxq_index,
340 src_ip, dst_ip, ip_proto,
341 src_port, dst_port, flow_id);
348 queue_work(priv->mdev->workqueue, &filter->work);
353 spin_unlock_bh(&priv->filters_lock);
358 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
360 struct mlx4_en_filter *filter, *tmp;
363 spin_lock_bh(&priv->filters_lock);
364 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
365 list_move(&filter->next, &del_list);
366 hlist_del(&filter->filter_chain);
368 spin_unlock_bh(&priv->filters_lock);
370 list_for_each_entry_safe(filter, tmp, &del_list, next) {
371 cancel_work_sync(&filter->work);
372 mlx4_en_filter_free(filter);
376 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
378 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
382 spin_lock_bh(&priv->filters_lock);
383 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
384 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
387 if (filter->activated &&
388 !work_pending(&filter->work) &&
389 rps_may_expire_flow(priv->dev,
390 filter->rxq_index, filter->flow_id,
392 list_move(&filter->next, &del_list);
393 hlist_del(&filter->filter_chain);
395 last_filter = filter;
400 if (last_filter && (&last_filter->next != priv->filters.next))
401 list_move(&priv->filters, &last_filter->next);
403 spin_unlock_bh(&priv->filters_lock);
405 list_for_each_entry_safe(filter, tmp, &del_list, next)
406 mlx4_en_filter_free(filter);
410 static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
411 __be16 proto, u16 vid)
413 struct mlx4_en_priv *priv = netdev_priv(dev);
414 struct mlx4_en_dev *mdev = priv->mdev;
418 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
420 set_bit(vid, priv->active_vlans);
422 /* Add VID to port VLAN filter */
423 mutex_lock(&mdev->state_lock);
424 if (mdev->device_up && priv->port_up) {
425 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
427 en_err(priv, "Failed configuring VLAN filter\n");
429 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
430 en_dbg(HW, priv, "failed adding vlan %d\n", vid);
431 mutex_unlock(&mdev->state_lock);
436 static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
437 __be16 proto, u16 vid)
439 struct mlx4_en_priv *priv = netdev_priv(dev);
440 struct mlx4_en_dev *mdev = priv->mdev;
443 en_dbg(HW, priv, "Killing VID:%d\n", vid);
445 clear_bit(vid, priv->active_vlans);
447 /* Remove VID from port VLAN filter */
448 mutex_lock(&mdev->state_lock);
449 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
451 if (mdev->device_up && priv->port_up) {
452 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
454 en_err(priv, "Failed configuring VLAN filter\n");
456 mutex_unlock(&mdev->state_lock);
461 static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
464 for (i = ETH_ALEN - 1; i >= 0; --i) {
465 dst_mac[i] = src_mac & 0xff;
468 memset(&dst_mac[ETH_ALEN], 0, 2);
472 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
473 int qpn, u64 *reg_id)
476 struct mlx4_spec_list spec_eth_outer = { {NULL} };
477 struct mlx4_spec_list spec_vxlan = { {NULL} };
478 struct mlx4_spec_list spec_eth_inner = { {NULL} };
480 struct mlx4_net_trans_rule rule = {
481 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
484 .promisc_mode = MLX4_FS_REGULAR,
485 .priority = MLX4_DOMAIN_NIC,
488 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
490 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
491 return 0; /* do nothing */
493 rule.port = priv->port;
495 INIT_LIST_HEAD(&rule.list);
497 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
498 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
499 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
501 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
502 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
504 list_add_tail(&spec_eth_outer.list, &rule.list);
505 list_add_tail(&spec_vxlan.list, &rule.list);
506 list_add_tail(&spec_eth_inner.list, &rule.list);
508 err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
510 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
513 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
518 static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
519 unsigned char *mac, int *qpn, u64 *reg_id)
521 struct mlx4_en_dev *mdev = priv->mdev;
522 struct mlx4_dev *dev = mdev->dev;
525 switch (dev->caps.steering_mode) {
526 case MLX4_STEERING_MODE_B0: {
531 memcpy(&gid[10], mac, ETH_ALEN);
534 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
537 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
538 struct mlx4_spec_list spec_eth = { {NULL} };
539 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
541 struct mlx4_net_trans_rule rule = {
542 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
545 .promisc_mode = MLX4_FS_REGULAR,
546 .priority = MLX4_DOMAIN_NIC,
549 rule.port = priv->port;
551 INIT_LIST_HEAD(&rule.list);
553 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
554 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
555 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
556 list_add_tail(&spec_eth.list, &rule.list);
558 err = mlx4_flow_attach(dev, &rule, reg_id);
565 en_warn(priv, "Failed Attaching Unicast\n");
570 static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
571 unsigned char *mac, int qpn, u64 reg_id)
573 struct mlx4_en_dev *mdev = priv->mdev;
574 struct mlx4_dev *dev = mdev->dev;
576 switch (dev->caps.steering_mode) {
577 case MLX4_STEERING_MODE_B0: {
582 memcpy(&gid[10], mac, ETH_ALEN);
585 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
588 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
589 mlx4_flow_detach(dev, reg_id);
593 en_err(priv, "Invalid steering mode.\n");
597 static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
599 struct mlx4_en_dev *mdev = priv->mdev;
600 struct mlx4_dev *dev = mdev->dev;
601 struct mlx4_mac_entry *entry;
605 int *qpn = &priv->base_qpn;
606 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
608 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
609 priv->dev->dev_addr);
610 index = mlx4_register_mac(dev, priv->port, mac);
613 en_err(priv, "Failed adding MAC: %pM\n",
614 priv->dev->dev_addr);
618 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
619 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
620 *qpn = base_qpn + index;
624 err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
625 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
627 en_err(priv, "Failed to reserve qp for mac registration\n");
631 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, ®_id);
635 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
636 &priv->tunnel_reg_id);
640 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
645 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
646 entry->reg_id = reg_id;
648 hlist_add_head_rcu(&entry->hlist,
649 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
654 if (priv->tunnel_reg_id)
655 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
657 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
660 mlx4_qp_release_range(dev, *qpn, 1);
663 mlx4_unregister_mac(dev, priv->port, mac);
667 static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
669 struct mlx4_en_dev *mdev = priv->mdev;
670 struct mlx4_dev *dev = mdev->dev;
671 int qpn = priv->base_qpn;
674 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
675 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
676 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
677 priv->dev->dev_addr);
678 mlx4_unregister_mac(dev, priv->port, mac);
680 struct mlx4_mac_entry *entry;
681 struct hlist_node *tmp;
682 struct hlist_head *bucket;
685 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
686 bucket = &priv->mac_hash[i];
687 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
688 mac = mlx4_en_mac_to_u64(entry->mac);
689 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
691 mlx4_en_uc_steer_release(priv, entry->mac,
694 mlx4_unregister_mac(dev, priv->port, mac);
695 hlist_del_rcu(&entry->hlist);
696 kfree_rcu(entry, rcu);
700 if (priv->tunnel_reg_id) {
701 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
702 priv->tunnel_reg_id = 0;
705 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
707 mlx4_qp_release_range(dev, qpn, 1);
708 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
712 static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
713 unsigned char *new_mac, unsigned char *prev_mac)
715 struct mlx4_en_dev *mdev = priv->mdev;
716 struct mlx4_dev *dev = mdev->dev;
718 u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
720 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
721 struct hlist_head *bucket;
722 unsigned int mac_hash;
723 struct mlx4_mac_entry *entry;
724 struct hlist_node *tmp;
725 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
727 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
728 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
729 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
730 mlx4_en_uc_steer_release(priv, entry->mac,
732 mlx4_unregister_mac(dev, priv->port,
734 hlist_del_rcu(&entry->hlist);
736 memcpy(entry->mac, new_mac, ETH_ALEN);
738 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
739 hlist_add_head_rcu(&entry->hlist,
740 &priv->mac_hash[mac_hash]);
741 mlx4_register_mac(dev, priv->port, new_mac_u64);
742 err = mlx4_en_uc_steer_add(priv, new_mac,
751 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
754 u64 mlx4_en_mac_to_u64(u8 *addr)
759 for (i = 0; i < ETH_ALEN; i++) {
766 static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
771 /* Remove old MAC and insert the new one */
772 err = mlx4_en_replace_mac(priv, priv->base_qpn,
773 priv->dev->dev_addr, priv->prev_mac);
775 en_err(priv, "Failed changing HW MAC address\n");
776 memcpy(priv->prev_mac, priv->dev->dev_addr,
777 sizeof(priv->prev_mac));
779 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
784 static int mlx4_en_set_mac(struct net_device *dev, void *addr)
786 struct mlx4_en_priv *priv = netdev_priv(dev);
787 struct mlx4_en_dev *mdev = priv->mdev;
788 struct sockaddr *saddr = addr;
791 if (!is_valid_ether_addr(saddr->sa_data))
792 return -EADDRNOTAVAIL;
794 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
796 mutex_lock(&mdev->state_lock);
797 err = mlx4_en_do_set_mac(priv);
798 mutex_unlock(&mdev->state_lock);
803 static void mlx4_en_clear_list(struct net_device *dev)
805 struct mlx4_en_priv *priv = netdev_priv(dev);
806 struct mlx4_en_mc_list *tmp, *mc_to_del;
808 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
809 list_del(&mc_to_del->list);
814 static void mlx4_en_cache_mclist(struct net_device *dev)
816 struct mlx4_en_priv *priv = netdev_priv(dev);
817 struct netdev_hw_addr *ha;
818 struct mlx4_en_mc_list *tmp;
820 mlx4_en_clear_list(dev);
821 netdev_for_each_mc_addr(ha, dev) {
822 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
824 mlx4_en_clear_list(dev);
827 memcpy(tmp->addr, ha->addr, ETH_ALEN);
828 list_add_tail(&tmp->list, &priv->mc_list);
832 static void update_mclist_flags(struct mlx4_en_priv *priv,
833 struct list_head *dst,
834 struct list_head *src)
836 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
839 /* Find all the entries that should be removed from dst,
840 * These are the entries that are not found in src
842 list_for_each_entry(dst_tmp, dst, list) {
844 list_for_each_entry(src_tmp, src, list) {
845 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
851 dst_tmp->action = MCLIST_REM;
854 /* Add entries that exist in src but not in dst
855 * mark them as need to add
857 list_for_each_entry(src_tmp, src, list) {
859 list_for_each_entry(dst_tmp, dst, list) {
860 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
861 dst_tmp->action = MCLIST_NONE;
867 new_mc = kmemdup(src_tmp,
868 sizeof(struct mlx4_en_mc_list),
873 new_mc->action = MCLIST_ADD;
874 list_add_tail(&new_mc->list, dst);
879 static void mlx4_en_set_rx_mode(struct net_device *dev)
881 struct mlx4_en_priv *priv = netdev_priv(dev);
886 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
889 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
890 struct mlx4_en_dev *mdev)
894 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
895 if (netif_msg_rx_status(priv))
896 en_warn(priv, "Entering promiscuous mode\n");
897 priv->flags |= MLX4_EN_FLAG_PROMISC;
899 /* Enable promiscouos mode */
900 switch (mdev->dev->caps.steering_mode) {
901 case MLX4_STEERING_MODE_DEVICE_MANAGED:
902 err = mlx4_flow_steer_promisc_add(mdev->dev,
905 MLX4_FS_ALL_DEFAULT);
907 en_err(priv, "Failed enabling promiscuous mode\n");
908 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
911 case MLX4_STEERING_MODE_B0:
912 err = mlx4_unicast_promisc_add(mdev->dev,
916 en_err(priv, "Failed enabling unicast promiscuous mode\n");
918 /* Add the default qp number as multicast
921 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
922 err = mlx4_multicast_promisc_add(mdev->dev,
926 en_err(priv, "Failed enabling multicast promiscuous mode\n");
927 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
931 case MLX4_STEERING_MODE_A0:
932 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
937 en_err(priv, "Failed enabling promiscuous mode\n");
941 /* Disable port multicast filter (unconditionally) */
942 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
943 0, MLX4_MCAST_DISABLE);
945 en_err(priv, "Failed disabling multicast filter\n");
947 /* Disable port VLAN filter */
948 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
950 en_err(priv, "Failed disabling VLAN filter\n");
954 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
955 struct mlx4_en_dev *mdev)
959 if (netif_msg_rx_status(priv))
960 en_warn(priv, "Leaving promiscuous mode\n");
961 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
963 /* Disable promiscouos mode */
964 switch (mdev->dev->caps.steering_mode) {
965 case MLX4_STEERING_MODE_DEVICE_MANAGED:
966 err = mlx4_flow_steer_promisc_remove(mdev->dev,
968 MLX4_FS_ALL_DEFAULT);
970 en_err(priv, "Failed disabling promiscuous mode\n");
971 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
974 case MLX4_STEERING_MODE_B0:
975 err = mlx4_unicast_promisc_remove(mdev->dev,
979 en_err(priv, "Failed disabling unicast promiscuous mode\n");
980 /* Disable Multicast promisc */
981 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
982 err = mlx4_multicast_promisc_remove(mdev->dev,
986 en_err(priv, "Failed disabling multicast promiscuous mode\n");
987 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
991 case MLX4_STEERING_MODE_A0:
992 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
996 en_err(priv, "Failed disabling promiscuous mode\n");
1000 /* Enable port VLAN filter */
1001 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
1003 en_err(priv, "Failed enabling VLAN filter\n");
1006 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1007 struct net_device *dev,
1008 struct mlx4_en_dev *mdev)
1010 struct mlx4_en_mc_list *mclist, *tmp;
1012 u8 mc_list[16] = {0};
1015 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
1016 if (dev->flags & IFF_ALLMULTI) {
1017 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1018 0, MLX4_MCAST_DISABLE);
1020 en_err(priv, "Failed disabling multicast filter\n");
1022 /* Add the default qp number as multicast promisc */
1023 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
1024 switch (mdev->dev->caps.steering_mode) {
1025 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1026 err = mlx4_flow_steer_promisc_add(mdev->dev,
1029 MLX4_FS_MC_DEFAULT);
1032 case MLX4_STEERING_MODE_B0:
1033 err = mlx4_multicast_promisc_add(mdev->dev,
1038 case MLX4_STEERING_MODE_A0:
1042 en_err(priv, "Failed entering multicast promisc mode\n");
1043 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1046 /* Disable Multicast promisc */
1047 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1048 switch (mdev->dev->caps.steering_mode) {
1049 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1050 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1052 MLX4_FS_MC_DEFAULT);
1055 case MLX4_STEERING_MODE_B0:
1056 err = mlx4_multicast_promisc_remove(mdev->dev,
1061 case MLX4_STEERING_MODE_A0:
1065 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1066 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1069 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1070 0, MLX4_MCAST_DISABLE);
1072 en_err(priv, "Failed disabling multicast filter\n");
1074 /* Flush mcast filter and init it with broadcast address */
1075 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1076 1, MLX4_MCAST_CONFIG);
1078 /* Update multicast list - we cache all addresses so they won't
1079 * change while HW is updated holding the command semaphor */
1080 netif_addr_lock_bh(dev);
1081 mlx4_en_cache_mclist(dev);
1082 netif_addr_unlock_bh(dev);
1083 list_for_each_entry(mclist, &priv->mc_list, list) {
1084 mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
1085 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1086 mcast_addr, 0, MLX4_MCAST_CONFIG);
1088 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1089 0, MLX4_MCAST_ENABLE);
1091 en_err(priv, "Failed enabling multicast filter\n");
1093 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1094 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1095 if (mclist->action == MCLIST_REM) {
1096 /* detach this address and delete from list */
1097 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1098 mc_list[5] = priv->port;
1099 err = mlx4_multicast_detach(mdev->dev,
1100 &priv->rss_map.indir_qp,
1105 en_err(priv, "Fail to detach multicast address\n");
1107 if (mclist->tunnel_reg_id) {
1108 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1110 en_err(priv, "Failed to detach multicast address\n");
1113 /* remove from list */
1114 list_del(&mclist->list);
1116 } else if (mclist->action == MCLIST_ADD) {
1117 /* attach the address */
1118 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1119 /* needed for B0 steering support */
1120 mc_list[5] = priv->port;
1121 err = mlx4_multicast_attach(mdev->dev,
1122 &priv->rss_map.indir_qp,
1128 en_err(priv, "Fail to attach multicast address\n");
1130 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1131 &mclist->tunnel_reg_id);
1133 en_err(priv, "Failed to attach multicast address\n");
1139 static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1140 struct net_device *dev,
1141 struct mlx4_en_dev *mdev)
1143 struct netdev_hw_addr *ha;
1144 struct mlx4_mac_entry *entry;
1145 struct hlist_node *tmp;
1149 struct hlist_head *bucket;
1154 /* Note that we do not need to protect our mac_hash traversal with rcu,
1155 * since all modification code is protected by mdev->state_lock
1158 /* find what to remove */
1159 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1160 bucket = &priv->mac_hash[i];
1161 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1163 netdev_for_each_uc_addr(ha, dev) {
1164 if (ether_addr_equal_64bits(entry->mac,
1171 /* MAC address of the port is not in uc list */
1172 if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
1176 mac = mlx4_en_mac_to_u64(entry->mac);
1177 mlx4_en_uc_steer_release(priv, entry->mac,
1180 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1182 hlist_del_rcu(&entry->hlist);
1183 kfree_rcu(entry, rcu);
1184 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1185 entry->mac, priv->port);
1191 /* if we didn't remove anything, there is no use in trying to add
1192 * again once we are in a forced promisc mode state
1194 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1197 prev_flags = priv->flags;
1198 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1200 /* find what to add */
1201 netdev_for_each_uc_addr(ha, dev) {
1203 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
1204 hlist_for_each_entry(entry, bucket, hlist) {
1205 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1212 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1214 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1215 ha->addr, priv->port);
1216 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1219 mac = mlx4_en_mac_to_u64(ha->addr);
1220 memcpy(entry->mac, ha->addr, ETH_ALEN);
1221 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1223 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1224 ha->addr, priv->port, err);
1226 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1229 err = mlx4_en_uc_steer_add(priv, ha->addr,
1233 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1234 ha->addr, priv->port, err);
1235 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1237 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1240 unsigned int mac_hash;
1241 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1242 ha->addr, priv->port);
1243 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1244 bucket = &priv->mac_hash[mac_hash];
1245 hlist_add_head_rcu(&entry->hlist, bucket);
1250 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1251 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1253 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1254 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1259 static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1261 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1263 struct mlx4_en_dev *mdev = priv->mdev;
1264 struct net_device *dev = priv->dev;
1266 mutex_lock(&mdev->state_lock);
1267 if (!mdev->device_up) {
1268 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1271 if (!priv->port_up) {
1272 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1276 if (!netif_carrier_ok(dev)) {
1277 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1278 if (priv->port_state.link_state) {
1279 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1280 netif_carrier_on(dev);
1281 en_dbg(LINK, priv, "Link Up\n");
1286 if (dev->priv_flags & IFF_UNICAST_FLT)
1287 mlx4_en_do_uc_filter(priv, dev, mdev);
1289 /* Promsicuous mode: disable all filters */
1290 if ((dev->flags & IFF_PROMISC) ||
1291 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
1292 mlx4_en_set_promisc_mode(priv, mdev);
1296 /* Not in promiscuous mode */
1297 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1298 mlx4_en_clear_promisc_mode(priv, mdev);
1300 mlx4_en_do_multicast(priv, dev, mdev);
1302 mutex_unlock(&mdev->state_lock);
1305 #ifdef CONFIG_NET_POLL_CONTROLLER
1306 static void mlx4_en_netpoll(struct net_device *dev)
1308 struct mlx4_en_priv *priv = netdev_priv(dev);
1309 struct mlx4_en_cq *cq;
1310 unsigned long flags;
1313 for (i = 0; i < priv->rx_ring_num; i++) {
1314 cq = priv->rx_cq[i];
1315 spin_lock_irqsave(&cq->lock, flags);
1316 napi_synchronize(&cq->napi);
1317 mlx4_en_process_rx_cq(dev, cq, 0);
1318 spin_unlock_irqrestore(&cq->lock, flags);
1323 static void mlx4_en_tx_timeout(struct net_device *dev)
1325 struct mlx4_en_priv *priv = netdev_priv(dev);
1326 struct mlx4_en_dev *mdev = priv->mdev;
1329 if (netif_msg_timer(priv))
1330 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
1332 for (i = 0; i < priv->tx_ring_num; i++) {
1333 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1335 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1336 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1337 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1340 priv->port_stats.tx_timeout++;
1341 en_dbg(DRV, priv, "Scheduling watchdog\n");
1342 queue_work(mdev->workqueue, &priv->watchdog_task);
1346 static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
1348 struct mlx4_en_priv *priv = netdev_priv(dev);
1350 spin_lock_bh(&priv->stats_lock);
1351 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
1352 spin_unlock_bh(&priv->stats_lock);
1354 return &priv->ret_stats;
1357 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1359 struct mlx4_en_cq *cq;
1362 /* If we haven't received a specific coalescing setting
1363 * (module param), we set the moderation parameters as follows:
1364 * - moder_cnt is set to the number of mtu sized packets to
1365 * satisfy our coalescing target.
1366 * - moder_time is set to a fixed value.
1368 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
1369 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
1370 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1371 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
1372 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1373 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
1375 /* Setup cq moderation params */
1376 for (i = 0; i < priv->rx_ring_num; i++) {
1377 cq = priv->rx_cq[i];
1378 cq->moder_cnt = priv->rx_frames;
1379 cq->moder_time = priv->rx_usecs;
1380 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1381 priv->last_moder_packets[i] = 0;
1382 priv->last_moder_bytes[i] = 0;
1385 for (i = 0; i < priv->tx_ring_num; i++) {
1386 cq = priv->tx_cq[i];
1387 cq->moder_cnt = priv->tx_frames;
1388 cq->moder_time = priv->tx_usecs;
1391 /* Reset auto-moderation params */
1392 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1393 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1394 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1395 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1396 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
1397 priv->adaptive_rx_coal = 1;
1398 priv->last_moder_jiffies = 0;
1399 priv->last_moder_tx_packets = 0;
1402 static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1404 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
1405 struct mlx4_en_cq *cq;
1406 unsigned long packets;
1408 unsigned long avg_pkt_size;
1409 unsigned long rx_packets;
1410 unsigned long rx_bytes;
1411 unsigned long rx_pkt_diff;
1415 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1418 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1419 spin_lock_bh(&priv->stats_lock);
1420 rx_packets = priv->rx_ring[ring]->packets;
1421 rx_bytes = priv->rx_ring[ring]->bytes;
1422 spin_unlock_bh(&priv->stats_lock);
1424 rx_pkt_diff = ((unsigned long) (rx_packets -
1425 priv->last_moder_packets[ring]));
1426 packets = rx_pkt_diff;
1427 rate = packets * HZ / period;
1428 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
1429 priv->last_moder_bytes[ring])) / packets : 0;
1431 /* Apply auto-moderation only when packet rate
1432 * exceeds a rate that it matters */
1433 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1434 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
1435 if (rate < priv->pkt_rate_low)
1436 moder_time = priv->rx_usecs_low;
1437 else if (rate > priv->pkt_rate_high)
1438 moder_time = priv->rx_usecs_high;
1440 moder_time = (rate - priv->pkt_rate_low) *
1441 (priv->rx_usecs_high - priv->rx_usecs_low) /
1442 (priv->pkt_rate_high - priv->pkt_rate_low) +
1445 moder_time = priv->rx_usecs_low;
1448 if (moder_time != priv->last_moder_time[ring]) {
1449 priv->last_moder_time[ring] = moder_time;
1450 cq = priv->rx_cq[ring];
1451 cq->moder_time = moder_time;
1452 cq->moder_cnt = priv->rx_frames;
1453 err = mlx4_en_set_cq_moder(priv, cq);
1455 en_err(priv, "Failed modifying moderation for cq:%d\n",
1458 priv->last_moder_packets[ring] = rx_packets;
1459 priv->last_moder_bytes[ring] = rx_bytes;
1462 priv->last_moder_jiffies = jiffies;
1465 static void mlx4_en_do_get_stats(struct work_struct *work)
1467 struct delayed_work *delay = to_delayed_work(work);
1468 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1470 struct mlx4_en_dev *mdev = priv->mdev;
1473 mutex_lock(&mdev->state_lock);
1474 if (mdev->device_up) {
1475 if (priv->port_up) {
1476 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1478 en_dbg(HW, priv, "Could not update stats\n");
1480 mlx4_en_auto_moderation(priv);
1483 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1485 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1486 mlx4_en_do_set_mac(priv);
1487 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1489 mutex_unlock(&mdev->state_lock);
1492 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1495 static void mlx4_en_service_task(struct work_struct *work)
1497 struct delayed_work *delay = to_delayed_work(work);
1498 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1500 struct mlx4_en_dev *mdev = priv->mdev;
1502 mutex_lock(&mdev->state_lock);
1503 if (mdev->device_up) {
1504 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1505 mlx4_en_ptp_overflow_check(mdev);
1507 queue_delayed_work(mdev->workqueue, &priv->service_task,
1508 SERVICE_TASK_DELAY);
1510 mutex_unlock(&mdev->state_lock);
1513 static void mlx4_en_linkstate(struct work_struct *work)
1515 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1517 struct mlx4_en_dev *mdev = priv->mdev;
1518 int linkstate = priv->link_state;
1520 mutex_lock(&mdev->state_lock);
1521 /* If observable port state changed set carrier state and
1522 * report to system log */
1523 if (priv->last_link_state != linkstate) {
1524 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
1525 en_info(priv, "Link Down\n");
1526 netif_carrier_off(priv->dev);
1528 en_info(priv, "Link Up\n");
1529 netif_carrier_on(priv->dev);
1532 priv->last_link_state = linkstate;
1533 mutex_unlock(&mdev->state_lock);
1537 int mlx4_en_start_port(struct net_device *dev)
1539 struct mlx4_en_priv *priv = netdev_priv(dev);
1540 struct mlx4_en_dev *mdev = priv->mdev;
1541 struct mlx4_en_cq *cq;
1542 struct mlx4_en_tx_ring *tx_ring;
1548 u8 mc_list[16] = {0};
1550 if (priv->port_up) {
1551 en_dbg(DRV, priv, "start port called while port already up\n");
1555 INIT_LIST_HEAD(&priv->mc_list);
1556 INIT_LIST_HEAD(&priv->curr_list);
1557 INIT_LIST_HEAD(&priv->ethtool_list);
1558 memset(&priv->ethtool_rules[0], 0,
1559 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
1561 /* Calculate Rx buf size */
1562 dev->mtu = min(dev->mtu, priv->max_mtu);
1563 mlx4_en_calc_rx_buf(dev);
1564 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
1566 /* Configure rx cq's and rings */
1567 err = mlx4_en_activate_rx_rings(priv);
1569 en_err(priv, "Failed to activate RX rings\n");
1572 for (i = 0; i < priv->rx_ring_num; i++) {
1573 cq = priv->rx_cq[i];
1575 mlx4_en_cq_init_lock(cq);
1577 err = mlx4_en_activate_cq(priv, cq, i);
1579 en_err(priv, "Failed activating Rx CQ\n");
1582 for (j = 0; j < cq->size; j++)
1583 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1584 err = mlx4_en_set_cq_moder(priv, cq);
1586 en_err(priv, "Failed setting cq moderation parameters");
1587 mlx4_en_deactivate_cq(priv, cq);
1590 mlx4_en_arm_cq(priv, cq);
1591 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1596 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
1597 err = mlx4_en_get_qp(priv);
1599 en_err(priv, "Failed getting eth qp\n");
1602 mdev->mac_removed[priv->port] = 0;
1604 err = mlx4_en_config_rss_steer(priv);
1606 en_err(priv, "Failed configuring rss steering\n");
1610 err = mlx4_en_create_drop_qp(priv);
1614 /* Configure tx cq's and rings */
1615 for (i = 0; i < priv->tx_ring_num; i++) {
1617 cq = priv->tx_cq[i];
1618 err = mlx4_en_activate_cq(priv, cq, i);
1620 en_err(priv, "Failed allocating Tx CQ\n");
1623 err = mlx4_en_set_cq_moder(priv, cq);
1625 en_err(priv, "Failed setting cq moderation parameters");
1626 mlx4_en_deactivate_cq(priv, cq);
1629 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
1630 cq->buf->wqe_index = cpu_to_be16(0xffff);
1632 /* Configure ring */
1633 tx_ring = priv->tx_ring[i];
1634 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1635 i / priv->num_tx_rings_p_up);
1637 en_err(priv, "Failed allocating Tx ring\n");
1638 mlx4_en_deactivate_cq(priv, cq);
1641 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1643 /* Arm CQ for TX completions */
1644 mlx4_en_arm_cq(priv, cq);
1646 /* Set initial ownership of all Tx TXBBs to SW (1) */
1647 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1648 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
1652 /* Configure port */
1653 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1654 priv->rx_skb_size + ETH_FCS_LEN,
1655 priv->prof->tx_pause,
1657 priv->prof->rx_pause,
1658 priv->prof->rx_ppp);
1660 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1664 /* Set default qp number */
1665 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1667 en_err(priv, "Failed setting default qp numbers\n");
1671 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1672 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
1674 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1681 en_dbg(HW, priv, "Initializing port\n");
1682 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1684 en_err(priv, "Failed Initializing port\n");
1688 /* Attach rx QP to bradcast address */
1689 memset(&mc_list[10], 0xff, ETH_ALEN);
1690 mc_list[5] = priv->port; /* needed for B0 steering support */
1691 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1692 priv->port, 0, MLX4_PROT_ETH,
1693 &priv->broadcast_id))
1694 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1696 /* Must redo promiscuous mode setup. */
1697 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1699 /* Schedule multicast task to populate multicast list */
1700 queue_work(mdev->workqueue, &priv->rx_mode_task);
1702 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1704 priv->port_up = true;
1705 netif_tx_start_all_queues(dev);
1706 netif_device_attach(dev);
1711 while (tx_index--) {
1712 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1713 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1715 mlx4_en_destroy_drop_qp(priv);
1717 mlx4_en_release_rss_steer(priv);
1719 mlx4_en_put_qp(priv);
1722 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1723 for (i = 0; i < priv->rx_ring_num; i++)
1724 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1726 return err; /* need to close devices */
1730 void mlx4_en_stop_port(struct net_device *dev, int detach)
1732 struct mlx4_en_priv *priv = netdev_priv(dev);
1733 struct mlx4_en_dev *mdev = priv->mdev;
1734 struct mlx4_en_mc_list *mclist, *tmp;
1735 struct ethtool_flow_id *flow, *tmp_flow;
1737 u8 mc_list[16] = {0};
1739 if (!priv->port_up) {
1740 en_dbg(DRV, priv, "stop port called while port already down\n");
1745 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1747 /* Synchronize with tx routine */
1748 netif_tx_lock_bh(dev);
1750 netif_device_detach(dev);
1751 netif_tx_stop_all_queues(dev);
1752 netif_tx_unlock_bh(dev);
1754 netif_tx_disable(dev);
1756 /* Set port as not active */
1757 priv->port_up = false;
1759 /* Promsicuous mode */
1760 if (mdev->dev->caps.steering_mode ==
1761 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1762 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1763 MLX4_EN_FLAG_MC_PROMISC);
1764 mlx4_flow_steer_promisc_remove(mdev->dev,
1766 MLX4_FS_ALL_DEFAULT);
1767 mlx4_flow_steer_promisc_remove(mdev->dev,
1769 MLX4_FS_MC_DEFAULT);
1770 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1771 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1773 /* Disable promiscouos mode */
1774 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1777 /* Disable Multicast promisc */
1778 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1779 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1781 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1785 /* Detach All multicasts */
1786 memset(&mc_list[10], 0xff, ETH_ALEN);
1787 mc_list[5] = priv->port; /* needed for B0 steering support */
1788 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
1789 MLX4_PROT_ETH, priv->broadcast_id);
1790 list_for_each_entry(mclist, &priv->curr_list, list) {
1791 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1792 mc_list[5] = priv->port;
1793 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
1794 mc_list, MLX4_PROT_ETH, mclist->reg_id);
1796 mlx4_en_clear_list(dev);
1797 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1798 list_del(&mclist->list);
1802 /* Flush multicast filter */
1803 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1805 /* Remove flow steering rules for the port*/
1806 if (mdev->dev->caps.steering_mode ==
1807 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1809 list_for_each_entry_safe(flow, tmp_flow,
1810 &priv->ethtool_list, list) {
1811 mlx4_flow_detach(mdev->dev, flow->id);
1812 list_del(&flow->list);
1816 mlx4_en_destroy_drop_qp(priv);
1819 for (i = 0; i < priv->tx_ring_num; i++) {
1820 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1821 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1825 for (i = 0; i < priv->tx_ring_num; i++)
1826 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1829 mlx4_en_release_rss_steer(priv);
1831 /* Unregister Mac address for the port */
1832 mlx4_en_put_qp(priv);
1833 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
1834 mdev->mac_removed[priv->port] = 1;
1837 for (i = 0; i < priv->rx_ring_num; i++) {
1838 struct mlx4_en_cq *cq = priv->rx_cq[i];
1841 while (!mlx4_en_cq_lock_napi(cq)) {
1842 pr_info("CQ %d locked\n", i);
1847 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
1849 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1850 mlx4_en_deactivate_cq(priv, cq);
1854 static void mlx4_en_restart(struct work_struct *work)
1856 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1858 struct mlx4_en_dev *mdev = priv->mdev;
1859 struct net_device *dev = priv->dev;
1861 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1863 mutex_lock(&mdev->state_lock);
1864 if (priv->port_up) {
1865 mlx4_en_stop_port(dev, 1);
1866 if (mlx4_en_start_port(dev))
1867 en_err(priv, "Failed restarting port %d\n", priv->port);
1869 mutex_unlock(&mdev->state_lock);
1872 static void mlx4_en_clear_stats(struct net_device *dev)
1874 struct mlx4_en_priv *priv = netdev_priv(dev);
1875 struct mlx4_en_dev *mdev = priv->mdev;
1878 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
1879 en_dbg(HW, priv, "Failed dumping statistics\n");
1881 memset(&priv->stats, 0, sizeof(priv->stats));
1882 memset(&priv->pstats, 0, sizeof(priv->pstats));
1883 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
1884 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1886 for (i = 0; i < priv->tx_ring_num; i++) {
1887 priv->tx_ring[i]->bytes = 0;
1888 priv->tx_ring[i]->packets = 0;
1889 priv->tx_ring[i]->tx_csum = 0;
1891 for (i = 0; i < priv->rx_ring_num; i++) {
1892 priv->rx_ring[i]->bytes = 0;
1893 priv->rx_ring[i]->packets = 0;
1894 priv->rx_ring[i]->csum_ok = 0;
1895 priv->rx_ring[i]->csum_none = 0;
1899 static int mlx4_en_open(struct net_device *dev)
1901 struct mlx4_en_priv *priv = netdev_priv(dev);
1902 struct mlx4_en_dev *mdev = priv->mdev;
1905 mutex_lock(&mdev->state_lock);
1907 if (!mdev->device_up) {
1908 en_err(priv, "Cannot open - device down/disabled\n");
1913 /* Reset HW statistics and SW counters */
1914 mlx4_en_clear_stats(dev);
1916 err = mlx4_en_start_port(dev);
1918 en_err(priv, "Failed starting port:%d\n", priv->port);
1921 mutex_unlock(&mdev->state_lock);
1926 static int mlx4_en_close(struct net_device *dev)
1928 struct mlx4_en_priv *priv = netdev_priv(dev);
1929 struct mlx4_en_dev *mdev = priv->mdev;
1931 en_dbg(IFDOWN, priv, "Close port called\n");
1933 mutex_lock(&mdev->state_lock);
1935 mlx4_en_stop_port(dev, 0);
1936 netif_carrier_off(dev);
1938 mutex_unlock(&mdev->state_lock);
1942 void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1946 #ifdef CONFIG_RFS_ACCEL
1947 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
1948 priv->dev->rx_cpu_rmap = NULL;
1951 for (i = 0; i < priv->tx_ring_num; i++) {
1952 if (priv->tx_ring && priv->tx_ring[i])
1953 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1954 if (priv->tx_cq && priv->tx_cq[i])
1955 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1958 for (i = 0; i < priv->rx_ring_num; i++) {
1959 if (priv->rx_ring[i])
1960 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1961 priv->prof->rx_ring_size, priv->stride);
1963 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1966 if (priv->base_tx_qpn) {
1967 mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
1968 priv->base_tx_qpn = 0;
1972 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1974 struct mlx4_en_port_profile *prof = priv->prof;
1979 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
1981 en_err(priv, "failed reserving range for TX rings\n");
1985 /* Create tx Rings */
1986 for (i = 0; i < priv->tx_ring_num; i++) {
1987 node = cpu_to_node(i % num_online_cpus());
1988 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
1989 prof->tx_ring_size, i, TX, node))
1992 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
1993 priv->base_tx_qpn + i,
1994 prof->tx_ring_size, TXBB_SIZE,
1999 /* Create rx Rings */
2000 for (i = 0; i < priv->rx_ring_num; i++) {
2001 node = cpu_to_node(i % num_online_cpus());
2002 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
2003 prof->rx_ring_size, i, RX, node))
2006 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
2007 prof->rx_ring_size, priv->stride,
2012 #ifdef CONFIG_RFS_ACCEL
2013 if (priv->mdev->dev->caps.comp_pool) {
2014 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
2015 if (!priv->dev->rx_cpu_rmap)
2023 en_err(priv, "Failed to allocate NIC resources\n");
2024 for (i = 0; i < priv->rx_ring_num; i++) {
2025 if (priv->rx_ring[i])
2026 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2030 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2032 for (i = 0; i < priv->tx_ring_num; i++) {
2033 if (priv->tx_ring[i])
2034 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
2036 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
2042 void mlx4_en_destroy_netdev(struct net_device *dev)
2044 struct mlx4_en_priv *priv = netdev_priv(dev);
2045 struct mlx4_en_dev *mdev = priv->mdev;
2047 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
2049 /* Unregister device - this will close the port if it was up */
2050 if (priv->registered)
2051 unregister_netdev(dev);
2053 if (priv->allocated)
2054 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2056 cancel_delayed_work(&priv->stats_task);
2057 cancel_delayed_work(&priv->service_task);
2058 /* flush any pending task for this netdev */
2059 flush_workqueue(mdev->workqueue);
2061 /* Detach the netdev so tasks would not attempt to access it */
2062 mutex_lock(&mdev->state_lock);
2063 mdev->pndev[priv->port] = NULL;
2064 mutex_unlock(&mdev->state_lock);
2066 mlx4_en_free_resources(priv);
2068 kfree(priv->tx_ring);
2074 static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2076 struct mlx4_en_priv *priv = netdev_priv(dev);
2077 struct mlx4_en_dev *mdev = priv->mdev;
2080 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
2083 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
2084 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
2089 if (netif_running(dev)) {
2090 mutex_lock(&mdev->state_lock);
2091 if (!mdev->device_up) {
2092 /* NIC is probably restarting - let watchdog task reset
2094 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
2096 mlx4_en_stop_port(dev, 1);
2097 err = mlx4_en_start_port(dev);
2099 en_err(priv, "Failed restarting port:%d\n",
2101 queue_work(mdev->workqueue, &priv->watchdog_task);
2104 mutex_unlock(&mdev->state_lock);
2109 static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2111 struct mlx4_en_priv *priv = netdev_priv(dev);
2112 struct mlx4_en_dev *mdev = priv->mdev;
2113 struct hwtstamp_config config;
2115 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2118 /* reserved for future extensions */
2122 /* device doesn't support time stamping */
2123 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2126 /* TX HW timestamp */
2127 switch (config.tx_type) {
2128 case HWTSTAMP_TX_OFF:
2129 case HWTSTAMP_TX_ON:
2135 /* RX HW timestamp */
2136 switch (config.rx_filter) {
2137 case HWTSTAMP_FILTER_NONE:
2139 case HWTSTAMP_FILTER_ALL:
2140 case HWTSTAMP_FILTER_SOME:
2141 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2142 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2143 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2144 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2145 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2146 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2147 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2148 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2149 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2150 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2151 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2152 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2153 config.rx_filter = HWTSTAMP_FILTER_ALL;
2159 if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
2160 config.tx_type = HWTSTAMP_TX_OFF;
2161 config.rx_filter = HWTSTAMP_FILTER_NONE;
2164 return copy_to_user(ifr->ifr_data, &config,
2165 sizeof(config)) ? -EFAULT : 0;
2168 static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2170 struct mlx4_en_priv *priv = netdev_priv(dev);
2172 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2173 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2176 static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2180 return mlx4_en_hwtstamp_set(dev, ifr);
2182 return mlx4_en_hwtstamp_get(dev, ifr);
2188 static int mlx4_en_set_features(struct net_device *netdev,
2189 netdev_features_t features)
2191 struct mlx4_en_priv *priv = netdev_priv(netdev);
2193 if (features & NETIF_F_LOOPBACK)
2194 priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
2197 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
2199 mlx4_en_update_loopback_state(netdev, features);
2205 static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2207 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2208 struct mlx4_en_dev *mdev = en_priv->mdev;
2209 u64 mac_u64 = mlx4_en_mac_to_u64(mac);
2211 if (!is_valid_ether_addr(mac))
2214 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
2217 static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2219 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2220 struct mlx4_en_dev *mdev = en_priv->mdev;
2222 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
2225 static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2227 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2228 struct mlx4_en_dev *mdev = en_priv->mdev;
2230 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2233 static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2235 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2236 struct mlx4_en_dev *mdev = en_priv->mdev;
2238 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2241 static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2243 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2244 struct mlx4_en_dev *mdev = en_priv->mdev;
2246 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2249 #define PORT_ID_BYTE_LEN 8
2250 static int mlx4_en_get_phys_port_id(struct net_device *dev,
2251 struct netdev_phys_port_id *ppid)
2253 struct mlx4_en_priv *priv = netdev_priv(dev);
2254 struct mlx4_dev *mdev = priv->mdev->dev;
2256 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2261 ppid->id_len = sizeof(phys_port_id);
2262 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2263 ppid->id[i] = phys_port_id & 0xff;
2269 static const struct net_device_ops mlx4_netdev_ops = {
2270 .ndo_open = mlx4_en_open,
2271 .ndo_stop = mlx4_en_close,
2272 .ndo_start_xmit = mlx4_en_xmit,
2273 .ndo_select_queue = mlx4_en_select_queue,
2274 .ndo_get_stats = mlx4_en_get_stats,
2275 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2276 .ndo_set_mac_address = mlx4_en_set_mac,
2277 .ndo_validate_addr = eth_validate_addr,
2278 .ndo_change_mtu = mlx4_en_change_mtu,
2279 .ndo_do_ioctl = mlx4_en_ioctl,
2280 .ndo_tx_timeout = mlx4_en_tx_timeout,
2281 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2282 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2283 #ifdef CONFIG_NET_POLL_CONTROLLER
2284 .ndo_poll_controller = mlx4_en_netpoll,
2286 .ndo_set_features = mlx4_en_set_features,
2287 .ndo_setup_tc = mlx4_en_setup_tc,
2288 #ifdef CONFIG_RFS_ACCEL
2289 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2291 #ifdef CONFIG_NET_RX_BUSY_POLL
2292 .ndo_busy_poll = mlx4_en_low_latency_recv,
2294 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2297 static const struct net_device_ops mlx4_netdev_ops_master = {
2298 .ndo_open = mlx4_en_open,
2299 .ndo_stop = mlx4_en_close,
2300 .ndo_start_xmit = mlx4_en_xmit,
2301 .ndo_select_queue = mlx4_en_select_queue,
2302 .ndo_get_stats = mlx4_en_get_stats,
2303 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2304 .ndo_set_mac_address = mlx4_en_set_mac,
2305 .ndo_validate_addr = eth_validate_addr,
2306 .ndo_change_mtu = mlx4_en_change_mtu,
2307 .ndo_tx_timeout = mlx4_en_tx_timeout,
2308 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2309 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2310 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
2311 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
2312 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
2313 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2314 .ndo_get_vf_config = mlx4_en_get_vf_config,
2315 #ifdef CONFIG_NET_POLL_CONTROLLER
2316 .ndo_poll_controller = mlx4_en_netpoll,
2318 .ndo_set_features = mlx4_en_set_features,
2319 .ndo_setup_tc = mlx4_en_setup_tc,
2320 #ifdef CONFIG_RFS_ACCEL
2321 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2323 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2326 int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2327 struct mlx4_en_port_profile *prof)
2329 struct net_device *dev;
2330 struct mlx4_en_priv *priv;
2335 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
2336 MAX_TX_RINGS, MAX_RX_RINGS);
2340 netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
2341 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2343 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
2344 dev->dev_id = port - 1;
2347 * Initialize driver private data
2350 priv = netdev_priv(dev);
2351 memset(priv, 0, sizeof(struct mlx4_en_priv));
2354 priv->ddev = &mdev->pdev->dev;
2357 priv->port_up = false;
2358 priv->flags = prof->flags;
2359 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
2360 MLX4_WQE_CTRL_SOLICITED);
2361 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2362 priv->tx_ring_num = prof->tx_ring_num;
2364 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2366 if (!priv->tx_ring) {
2370 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2376 priv->rx_ring_num = prof->rx_ring_num;
2377 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
2378 priv->mac_index = -1;
2379 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2380 spin_lock_init(&priv->stats_lock);
2381 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2382 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2383 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2384 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2385 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
2386 #ifdef CONFIG_MLX4_EN_DCB
2387 if (!mlx4_is_slave(priv->mdev->dev)) {
2388 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
2389 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2391 en_info(priv, "enabling only PFC DCB ops\n");
2392 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2397 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
2398 INIT_HLIST_HEAD(&priv->mac_hash[i]);
2400 /* Query for default mac and max mtu */
2401 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
2403 /* Set default MAC */
2404 dev->addr_len = ETH_ALEN;
2405 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
2406 if (!is_valid_ether_addr(dev->dev_addr)) {
2407 if (mlx4_is_slave(priv->mdev->dev)) {
2408 eth_hw_addr_random(dev);
2409 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2410 mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
2411 mdev->dev->caps.def_mac[priv->port] = mac_u64;
2413 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
2414 priv->port, dev->dev_addr);
2420 memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
2422 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2423 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2424 err = mlx4_en_alloc_resources(priv);
2428 #ifdef CONFIG_RFS_ACCEL
2429 INIT_LIST_HEAD(&priv->filters);
2430 spin_lock_init(&priv->filters_lock);
2433 /* Initialize time stamping config */
2434 priv->hwtstamp_config.flags = 0;
2435 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
2436 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
2438 /* Allocate page for receive rings */
2439 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
2440 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
2442 en_err(priv, "Failed to allocate page for rx qps\n");
2445 priv->allocated = 1;
2448 * Initialize netdev entry points
2450 if (mlx4_is_master(priv->mdev->dev))
2451 dev->netdev_ops = &mlx4_netdev_ops_master;
2453 dev->netdev_ops = &mlx4_netdev_ops;
2454 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
2455 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
2456 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
2458 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
2461 * Set driver features
2463 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2464 if (mdev->LSO_support)
2465 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2467 dev->vlan_features = dev->hw_features;
2469 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
2470 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
2471 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2472 NETIF_F_HW_VLAN_CTAG_FILTER;
2473 dev->hw_features |= NETIF_F_LOOPBACK;
2475 if (mdev->dev->caps.steering_mode ==
2476 MLX4_STEERING_MODE_DEVICE_MANAGED)
2477 dev->hw_features |= NETIF_F_NTUPLE;
2479 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2480 dev->priv_flags |= IFF_UNICAST_FLT;
2482 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2483 dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2484 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2485 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2486 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2489 mdev->pndev[port] = dev;
2491 netif_carrier_off(dev);
2492 mlx4_en_set_default_moderation(priv);
2494 err = register_netdev(dev);
2496 en_err(priv, "Netdev registration failed for port %d\n", port);
2499 priv->registered = 1;
2501 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
2502 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
2504 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
2506 /* Configure port */
2507 mlx4_en_calc_rx_buf(dev);
2508 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
2509 priv->rx_skb_size + ETH_FCS_LEN,
2510 prof->tx_pause, prof->tx_ppp,
2511 prof->rx_pause, prof->rx_ppp);
2513 en_err(priv, "Failed setting port general configurations "
2514 "for port %d, with error %d\n", priv->port, err);
2518 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2519 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
2521 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
2528 en_warn(priv, "Initializing port\n");
2529 err = mlx4_INIT_PORT(mdev->dev, priv->port);
2531 en_err(priv, "Failed Initializing port\n");
2534 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
2536 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2537 queue_delayed_work(mdev->workqueue, &priv->service_task,
2538 SERVICE_TASK_DELAY);
2543 mlx4_en_destroy_netdev(dev);