netdev: pass the stuck queue to the timeout handler
[linux-block.git] / drivers / net / ethernet / mellanox / mlx4 / en_netdev.c
CommitLineData
c27a02cd
YP
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
47a38e15 34#include <linux/bpf.h>
c27a02cd
YP
35#include <linux/etherdevice.h>
36#include <linux/tcp.h>
37#include <linux/if_vlan.h>
38#include <linux/delay.h>
5a0e3ad6 39#include <linux/slab.h>
1eb8c695
AV
40#include <linux/hash.h>
41#include <net/ip.h>
1b136de1 42#include <net/vxlan.h>
09d4d087 43#include <net/devlink.h>
c27a02cd
YP
44
45#include <linux/mlx4/driver.h>
46#include <linux/mlx4/device.h>
47#include <linux/mlx4/cmd.h>
48#include <linux/mlx4/cq.h>
49
50#include "mlx4_en.h"
51#include "en_port.h"
52
ea3349a0
MKL
53#define MLX4_EN_MAX_XDP_MTU ((int)(PAGE_SIZE - ETH_HLEN - (2 * VLAN_HLEN) - \
54 XDP_PACKET_HEADROOM))
b45f0674 55
d317966b 56int mlx4_en_setup_tc(struct net_device *dev, u8 up)
897d7846 57{
bc6a4744
AV
58 struct mlx4_en_priv *priv = netdev_priv(dev);
59 int i;
d317966b 60 unsigned int offset = 0;
bc6a4744 61
f21ad614 62 if (up && up != MLX4_EN_NUM_UP_HIGH)
897d7846
AV
63 return -EINVAL;
64
bc6a4744 65 netdev_set_num_tc(dev, up);
ec327f7a 66 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
bc6a4744 67 /* Partition Tx queues evenly amongst UP's */
bc6a4744 68 for (i = 0; i < up; i++) {
d317966b
AV
69 netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
70 offset += priv->num_tx_rings_p_up;
bc6a4744
AV
71 }
72
af7d5185
RS
73#ifdef CONFIG_MLX4_EN_DCB
74 if (!mlx4_is_slave(priv->mdev->dev)) {
75 if (up) {
564ed9b1
TT
76 if (priv->dcbx_cap)
77 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
af7d5185
RS
78 } else {
79 priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
564ed9b1 80 priv->cee_config.pfc_state = false;
af7d5185
RS
81 }
82 }
83#endif /* CONFIG_MLX4_EN_DCB */
84
897d7846
AV
85 return 0;
86}
87
ec327f7a
IK
88int mlx4_en_alloc_tx_queue_per_tc(struct net_device *dev, u8 tc)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91 struct mlx4_en_dev *mdev = priv->mdev;
92 struct mlx4_en_port_profile new_prof;
93 struct mlx4_en_priv *tmp;
2744bf42 94 int total_count;
ec327f7a
IK
95 int port_up = 0;
96 int err = 0;
97
98 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
99 if (!tmp)
100 return -ENOMEM;
101
102 mutex_lock(&mdev->state_lock);
103 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
104 new_prof.num_up = (tc == 0) ? MLX4_EN_NUM_UP_LOW :
105 MLX4_EN_NUM_UP_HIGH;
106 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up *
107 new_prof.num_up;
2744bf42
TT
108 total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP];
109 if (total_count > MAX_TX_RINGS) {
110 err = -EINVAL;
111 en_err(priv,
112 "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
113 total_count, MAX_TX_RINGS);
114 goto out;
115 }
ec327f7a
IK
116 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
117 if (err)
118 goto out;
119
120 if (priv->port_up) {
121 port_up = 1;
122 mlx4_en_stop_port(dev, 1);
123 }
124
125 mlx4_en_safe_replace_resources(priv, tmp);
126 if (port_up) {
127 err = mlx4_en_start_port(dev);
128 if (err) {
129 en_err(priv, "Failed starting port for setup TC\n");
130 goto out;
131 }
132 }
133
134 err = mlx4_en_setup_tc(dev, tc);
135out:
136 mutex_unlock(&mdev->state_lock);
137 kfree(tmp);
138 return err;
139}
140
2572ac53 141static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type,
de4784ca 142 void *type_data)
e4c6734e 143{
de4784ca
JP
144 struct tc_mqprio_qopt *mqprio = type_data;
145
575ed7d3 146 if (type != TC_SETUP_QDISC_MQPRIO)
38cf0426 147 return -EOPNOTSUPP;
e4c6734e 148
de4784ca 149 if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH)
ec327f7a
IK
150 return -EINVAL;
151
de4784ca 152 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
56f36acd 153
de4784ca 154 return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc);
e4c6734e
JF
155}
156
1eb8c695
AV
157#ifdef CONFIG_RFS_ACCEL
158
159struct mlx4_en_filter {
160 struct list_head next;
161 struct work_struct work;
162
75a353d4 163 u8 ip_proto;
1eb8c695
AV
164 __be32 src_ip;
165 __be32 dst_ip;
166 __be16 src_port;
167 __be16 dst_port;
168
169 int rxq_index;
170 struct mlx4_en_priv *priv;
171 u32 flow_id; /* RFS infrastructure id */
172 int id; /* mlx4_en driver id */
173 u64 reg_id; /* Flow steering API id */
174 u8 activated; /* Used to prevent expiry before filter
175 * is attached
176 */
177 struct hlist_node filter_chain;
178};
179
180static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
181
75a353d4
EP
182static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
183{
184 switch (ip_proto) {
185 case IPPROTO_UDP:
186 return MLX4_NET_TRANS_RULE_ID_UDP;
187 case IPPROTO_TCP:
188 return MLX4_NET_TRANS_RULE_ID_TCP;
189 default:
c3ca5205 190 return MLX4_NET_TRANS_RULE_NUM;
75a353d4
EP
191 }
192};
193
b6e01232
TT
194/* Must not acquire state_lock, as its corresponding work_sync
195 * is done under it.
196 */
1eb8c695
AV
197static void mlx4_en_filter_work(struct work_struct *work)
198{
199 struct mlx4_en_filter *filter = container_of(work,
200 struct mlx4_en_filter,
201 work);
202 struct mlx4_en_priv *priv = filter->priv;
75a353d4
EP
203 struct mlx4_spec_list spec_tcp_udp = {
204 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto),
1eb8c695
AV
205 {
206 .tcp_udp = {
207 .dst_port = filter->dst_port,
208 .dst_port_msk = (__force __be16)-1,
209 .src_port = filter->src_port,
210 .src_port_msk = (__force __be16)-1,
211 },
212 },
213 };
214 struct mlx4_spec_list spec_ip = {
215 .id = MLX4_NET_TRANS_RULE_ID_IPV4,
216 {
217 .ipv4 = {
218 .dst_ip = filter->dst_ip,
219 .dst_ip_msk = (__force __be32)-1,
220 .src_ip = filter->src_ip,
221 .src_ip_msk = (__force __be32)-1,
222 },
223 },
224 };
225 struct mlx4_spec_list spec_eth = {
226 .id = MLX4_NET_TRANS_RULE_ID_ETH,
227 };
228 struct mlx4_net_trans_rule rule = {
229 .list = LIST_HEAD_INIT(rule.list),
230 .queue_mode = MLX4_NET_TRANS_Q_LIFO,
231 .exclusive = 1,
232 .allow_loopback = 1,
f9162539 233 .promisc_mode = MLX4_FS_REGULAR,
1eb8c695
AV
234 .port = priv->port,
235 .priority = MLX4_DOMAIN_RFS,
236 };
237 int rc;
1eb8c695
AV
238 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
239
c3ca5205 240 if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
75a353d4
EP
241 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
242 filter->ip_proto);
243 goto ignore;
244 }
1eb8c695
AV
245 list_add_tail(&spec_eth.list, &rule.list);
246 list_add_tail(&spec_ip.list, &rule.list);
75a353d4 247 list_add_tail(&spec_tcp_udp.list, &rule.list);
1eb8c695 248
1eb8c695 249 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
6bbb6d99 250 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
1eb8c695
AV
251 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
252
253 filter->activated = 0;
254
255 if (filter->reg_id) {
256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
257 if (rc && rc != -ENOENT)
258 en_err(priv, "Error detaching flow. rc = %d\n", rc);
259 }
260
261 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
262 if (rc)
263 en_err(priv, "Error attaching flow. err = %d\n", rc);
264
75a353d4 265ignore:
1eb8c695
AV
266 mlx4_en_filter_rfs_expire(priv);
267
268 filter->activated = 1;
269}
270
271static inline struct hlist_head *
272filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
273 __be16 src_port, __be16 dst_port)
274{
275 unsigned long l;
276 int bucket_idx;
277
278 l = (__force unsigned long)src_port |
279 ((__force unsigned long)dst_port << 2);
280 l ^= (__force unsigned long)(src_ip ^ dst_ip);
281
282 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
283
284 return &priv->filter_hash[bucket_idx];
285}
286
287static struct mlx4_en_filter *
288mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
75a353d4
EP
289 __be32 dst_ip, u8 ip_proto, __be16 src_port,
290 __be16 dst_port, u32 flow_id)
1eb8c695
AV
291{
292 struct mlx4_en_filter *filter = NULL;
293
294 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
295 if (!filter)
296 return NULL;
297
298 filter->priv = priv;
299 filter->rxq_index = rxq_index;
300 INIT_WORK(&filter->work, mlx4_en_filter_work);
301
302 filter->src_ip = src_ip;
303 filter->dst_ip = dst_ip;
75a353d4 304 filter->ip_proto = ip_proto;
1eb8c695
AV
305 filter->src_port = src_port;
306 filter->dst_port = dst_port;
307
308 filter->flow_id = flow_id;
309
ee64c0ee 310 filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
1eb8c695
AV
311
312 list_add_tail(&filter->next, &priv->filters);
313 hlist_add_head(&filter->filter_chain,
314 filter_hash_bucket(priv, src_ip, dst_ip, src_port,
315 dst_port));
316
317 return filter;
318}
319
320static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
321{
322 struct mlx4_en_priv *priv = filter->priv;
323 int rc;
324
325 list_del(&filter->next);
326
327 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
328 if (rc && rc != -ENOENT)
329 en_err(priv, "Error detaching flow. rc = %d\n", rc);
330
331 kfree(filter);
332}
333
334static inline struct mlx4_en_filter *
335mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
75a353d4 336 u8 ip_proto, __be16 src_port, __be16 dst_port)
1eb8c695 337{
1eb8c695
AV
338 struct mlx4_en_filter *filter;
339 struct mlx4_en_filter *ret = NULL;
340
b67bfe0d 341 hlist_for_each_entry(filter,
1eb8c695
AV
342 filter_hash_bucket(priv, src_ip, dst_ip,
343 src_port, dst_port),
344 filter_chain) {
345 if (filter->src_ip == src_ip &&
346 filter->dst_ip == dst_ip &&
75a353d4 347 filter->ip_proto == ip_proto &&
1eb8c695
AV
348 filter->src_port == src_port &&
349 filter->dst_port == dst_port) {
350 ret = filter;
351 break;
352 }
353 }
354
355 return ret;
356}
357
358static int
359mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
360 u16 rxq_index, u32 flow_id)
361{
362 struct mlx4_en_priv *priv = netdev_priv(net_dev);
363 struct mlx4_en_filter *filter;
364 const struct iphdr *ip;
365 const __be16 *ports;
75a353d4 366 u8 ip_proto;
1eb8c695
AV
367 __be32 src_ip;
368 __be32 dst_ip;
369 __be16 src_port;
370 __be16 dst_port;
371 int nhoff = skb_network_offset(skb);
372 int ret = 0;
373
374 if (skb->protocol != htons(ETH_P_IP))
375 return -EPROTONOSUPPORT;
376
377 ip = (const struct iphdr *)(skb->data + nhoff);
378 if (ip_is_fragment(ip))
379 return -EPROTONOSUPPORT;
380
75a353d4
EP
381 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP))
382 return -EPROTONOSUPPORT;
1eb8c695
AV
383 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
384
75a353d4 385 ip_proto = ip->protocol;
1eb8c695
AV
386 src_ip = ip->saddr;
387 dst_ip = ip->daddr;
388 src_port = ports[0];
389 dst_port = ports[1];
390
1eb8c695 391 spin_lock_bh(&priv->filters_lock);
75a353d4
EP
392 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto,
393 src_port, dst_port);
1eb8c695
AV
394 if (filter) {
395 if (filter->rxq_index == rxq_index)
396 goto out;
397
398 filter->rxq_index = rxq_index;
399 } else {
400 filter = mlx4_en_filter_alloc(priv, rxq_index,
75a353d4 401 src_ip, dst_ip, ip_proto,
1eb8c695
AV
402 src_port, dst_port, flow_id);
403 if (!filter) {
404 ret = -ENOMEM;
405 goto err;
406 }
407 }
408
409 queue_work(priv->mdev->workqueue, &filter->work);
410
411out:
412 ret = filter->id;
413err:
414 spin_unlock_bh(&priv->filters_lock);
415
416 return ret;
417}
418
41d942d5 419void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
1eb8c695
AV
420{
421 struct mlx4_en_filter *filter, *tmp;
422 LIST_HEAD(del_list);
423
424 spin_lock_bh(&priv->filters_lock);
425 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
426 list_move(&filter->next, &del_list);
427 hlist_del(&filter->filter_chain);
428 }
429 spin_unlock_bh(&priv->filters_lock);
430
431 list_for_each_entry_safe(filter, tmp, &del_list, next) {
432 cancel_work_sync(&filter->work);
433 mlx4_en_filter_free(filter);
434 }
435}
436
437static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
438{
439 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
440 LIST_HEAD(del_list);
441 int i = 0;
442
443 spin_lock_bh(&priv->filters_lock);
444 list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
445 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
446 break;
447
448 if (filter->activated &&
449 !work_pending(&filter->work) &&
450 rps_may_expire_flow(priv->dev,
451 filter->rxq_index, filter->flow_id,
452 filter->id)) {
453 list_move(&filter->next, &del_list);
454 hlist_del(&filter->filter_chain);
455 } else
456 last_filter = filter;
457
458 i++;
459 }
460
461 if (last_filter && (&last_filter->next != priv->filters.next))
462 list_move(&priv->filters, &last_filter->next);
463
464 spin_unlock_bh(&priv->filters_lock);
465
466 list_for_each_entry_safe(filter, tmp, &del_list, next)
467 mlx4_en_filter_free(filter);
468}
469#endif
470
80d5c368
PM
471static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
472 __be16 proto, u16 vid)
c27a02cd
YP
473{
474 struct mlx4_en_priv *priv = netdev_priv(dev);
475 struct mlx4_en_dev *mdev = priv->mdev;
476 int err;
4c3eb3ca 477 int idx;
c27a02cd 478
f1b553fb 479 en_dbg(HW, priv, "adding VLAN:%d\n", vid);
c27a02cd 480
f1b553fb 481 set_bit(vid, priv->active_vlans);
c27a02cd
YP
482
483 /* Add VID to port VLAN filter */
484 mutex_lock(&mdev->state_lock);
485 if (mdev->device_up && priv->port_up) {
f1b553fb 486 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
93c098af 487 if (err) {
453a6082 488 en_err(priv, "Failed configuring VLAN filter\n");
93c098af
KH
489 goto out;
490 }
c27a02cd 491 }
93c098af
KH
492 err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
493 if (err)
494 en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
4c3eb3ca 495
93c098af
KH
496out:
497 mutex_unlock(&mdev->state_lock);
498 return err;
c27a02cd
YP
499}
500
80d5c368
PM
501static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
502 __be16 proto, u16 vid)
c27a02cd
YP
503{
504 struct mlx4_en_priv *priv = netdev_priv(dev);
505 struct mlx4_en_dev *mdev = priv->mdev;
93c098af 506 int err = 0;
c27a02cd 507
f1b553fb 508 en_dbg(HW, priv, "Killing VID:%d\n", vid);
c27a02cd 509
f1b553fb 510 clear_bit(vid, priv->active_vlans);
c27a02cd
YP
511
512 /* Remove VID from port VLAN filter */
513 mutex_lock(&mdev->state_lock);
2009d005 514 mlx4_unregister_vlan(mdev->dev, priv->port, vid);
4c3eb3ca 515
c27a02cd 516 if (mdev->device_up && priv->port_up) {
f1b553fb 517 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
c27a02cd 518 if (err)
453a6082 519 en_err(priv, "Failed configuring VLAN filter\n");
c27a02cd
YP
520 }
521 mutex_unlock(&mdev->state_lock);
8e586137 522
93c098af 523 return err;
c27a02cd
YP
524}
525
6bbb6d99
YB
526static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
527{
bab6a9ea
YB
528 int i;
529 for (i = ETH_ALEN - 1; i >= 0; --i) {
6bbb6d99
YB
530 dst_mac[i] = src_mac & 0xff;
531 src_mac >>= 8;
532 }
533 memset(&dst_mac[ETH_ALEN], 0, 2);
534}
535
837052d0
OG
536
537static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
538 int qpn, u64 *reg_id)
539{
540 int err;
837052d0 541
5eff6dad
OG
542 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
543 priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
837052d0
OG
544 return 0; /* do nothing */
545
b95089d0
OG
546 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
547 MLX4_DOMAIN_NIC, reg_id);
837052d0
OG
548 if (err) {
549 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
550 return err;
551 }
552 en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
553 return 0;
554}
555
556
16a10ffd
YB
557static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
558 unsigned char *mac, int *qpn, u64 *reg_id)
559{
560 struct mlx4_en_dev *mdev = priv->mdev;
561 struct mlx4_dev *dev = mdev->dev;
562 int err;
563
564 switch (dev->caps.steering_mode) {
565 case MLX4_STEERING_MODE_B0: {
566 struct mlx4_qp qp;
567 u8 gid[16] = {0};
568
569 qp.qpn = *qpn;
570 memcpy(&gid[10], mac, ETH_ALEN);
571 gid[5] = priv->port;
572
573 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
574 break;
575 }
576 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
577 struct mlx4_spec_list spec_eth = { {NULL} };
578 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
579
580 struct mlx4_net_trans_rule rule = {
581 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
582 .exclusive = 0,
583 .allow_loopback = 1,
f9162539 584 .promisc_mode = MLX4_FS_REGULAR,
16a10ffd
YB
585 .priority = MLX4_DOMAIN_NIC,
586 };
587
588 rule.port = priv->port;
589 rule.qpn = *qpn;
590 INIT_LIST_HEAD(&rule.list);
591
592 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
593 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
594 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
595 list_add_tail(&spec_eth.list, &rule.list);
596
597 err = mlx4_flow_attach(dev, &rule, reg_id);
598 break;
599 }
600 default:
601 return -EINVAL;
602 }
603 if (err)
604 en_warn(priv, "Failed Attaching Unicast\n");
605
606 return err;
607}
608
609static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
610 unsigned char *mac, int qpn, u64 reg_id)
611{
612 struct mlx4_en_dev *mdev = priv->mdev;
613 struct mlx4_dev *dev = mdev->dev;
614
615 switch (dev->caps.steering_mode) {
616 case MLX4_STEERING_MODE_B0: {
617 struct mlx4_qp qp;
618 u8 gid[16] = {0};
619
620 qp.qpn = qpn;
621 memcpy(&gid[10], mac, ETH_ALEN);
622 gid[5] = priv->port;
623
624 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
625 break;
626 }
627 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
628 mlx4_flow_detach(dev, reg_id);
629 break;
630 }
631 default:
632 en_err(priv, "Invalid steering mode.\n");
633 }
634}
635
636static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
637{
638 struct mlx4_en_dev *mdev = priv->mdev;
639 struct mlx4_dev *dev = mdev->dev;
16a10ffd
YB
640 int index = 0;
641 int err = 0;
16a10ffd 642 int *qpn = &priv->base_qpn;
9813337a 643 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
16a10ffd
YB
644
645 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
646 priv->dev->dev_addr);
647 index = mlx4_register_mac(dev, priv->port, mac);
648 if (index < 0) {
649 err = index;
650 en_err(priv, "Failed adding MAC: %pM\n",
651 priv->dev->dev_addr);
652 return err;
653 }
654
4931c6ef
SM
655 en_info(priv, "Steering Mode %d\n", dev->caps.steering_mode);
656
16a10ffd
YB
657 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
658 int base_qpn = mlx4_get_base_qpn(dev, priv->port);
659 *qpn = base_qpn + index;
660 return 0;
661 }
662
f3301870
MS
663 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP,
664 MLX4_RES_USAGE_DRIVER);
16a10ffd
YB
665 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
666 if (err) {
667 en_err(priv, "Failed to reserve qp for mac registration\n");
ba4b87ae
IS
668 mlx4_unregister_mac(dev, priv->port, mac);
669 return err;
16a10ffd 670 }
16a10ffd 671
c07cb4b0 672 return 0;
16a10ffd
YB
673}
674
675static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
676{
677 struct mlx4_en_dev *mdev = priv->mdev;
678 struct mlx4_dev *dev = mdev->dev;
16a10ffd 679 int qpn = priv->base_qpn;
16a10ffd 680
83a5a6ce 681 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
ba4b87ae 682 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
83a5a6ce
YB
683 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
684 priv->dev->dev_addr);
685 mlx4_unregister_mac(dev, priv->port, mac);
686 } else {
83a5a6ce
YB
687 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
688 priv->port, qpn);
689 mlx4_qp_release_range(dev, qpn, 1);
690 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
16a10ffd
YB
691 }
692}
693
694static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
90bbb74a 695 unsigned char *new_mac, unsigned char *prev_mac)
16a10ffd
YB
696{
697 struct mlx4_en_dev *mdev = priv->mdev;
698 struct mlx4_dev *dev = mdev->dev;
16a10ffd 699 int err = 0;
9813337a 700 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
16a10ffd
YB
701
702 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
c07cb4b0
YB
703 struct hlist_head *bucket;
704 unsigned int mac_hash;
705 struct mlx4_mac_entry *entry;
b67bfe0d 706 struct hlist_node *tmp;
9813337a 707 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
c07cb4b0
YB
708
709 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
b67bfe0d 710 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
c07cb4b0
YB
711 if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
712 mlx4_en_uc_steer_release(priv, entry->mac,
713 qpn, entry->reg_id);
714 mlx4_unregister_mac(dev, priv->port,
715 prev_mac_u64);
716 hlist_del_rcu(&entry->hlist);
717 synchronize_rcu();
718 memcpy(entry->mac, new_mac, ETH_ALEN);
719 entry->reg_id = 0;
720 mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
721 hlist_add_head_rcu(&entry->hlist,
722 &priv->mac_hash[mac_hash]);
723 mlx4_register_mac(dev, priv->port, new_mac_u64);
724 err = mlx4_en_uc_steer_add(priv, new_mac,
725 &qpn,
726 &entry->reg_id);
2a2083f7
OG
727 if (err)
728 return err;
729 if (priv->tunnel_reg_id) {
730 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
731 priv->tunnel_reg_id = 0;
732 }
733 err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
734 &priv->tunnel_reg_id);
c07cb4b0
YB
735 return err;
736 }
737 }
738 return -EINVAL;
16a10ffd
YB
739 }
740
741 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
742}
743
be599603
MS
744static void mlx4_en_update_user_mac(struct mlx4_en_priv *priv,
745 unsigned char new_mac[ETH_ALEN + 2])
746{
747 struct mlx4_en_dev *mdev = priv->mdev;
748 int err;
749
750 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_USER_MAC_EN))
751 return;
752
753 err = mlx4_SET_PORT_user_mac(mdev->dev, priv->port, new_mac);
754 if (err)
755 en_err(priv, "Failed to pass user MAC(%pM) to Firmware for port %d, with error %d\n",
756 new_mac, priv->port, err);
757}
758
2695bab2
NO
759static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
760 unsigned char new_mac[ETH_ALEN + 2])
c27a02cd 761{
c27a02cd
YP
762 int err = 0;
763
c27a02cd
YP
764 if (priv->port_up) {
765 /* Remove old MAC and insert the new one */
16a10ffd 766 err = mlx4_en_replace_mac(priv, priv->base_qpn,
2695bab2 767 new_mac, priv->current_mac);
c27a02cd 768 if (err)
453a6082 769 en_err(priv, "Failed changing HW MAC address\n");
c27a02cd 770 } else
48e551ff 771 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
c27a02cd 772
2695bab2
NO
773 if (!err)
774 memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
ee755324 775
bfa8ab47
YB
776 return err;
777}
778
779static int mlx4_en_set_mac(struct net_device *dev, void *addr)
780{
781 struct mlx4_en_priv *priv = netdev_priv(dev);
782 struct mlx4_en_dev *mdev = priv->mdev;
783 struct sockaddr *saddr = addr;
2695bab2 784 unsigned char new_mac[ETH_ALEN + 2];
bfa8ab47
YB
785 int err;
786
787 if (!is_valid_ether_addr(saddr->sa_data))
788 return -EADDRNOTAVAIL;
789
bfa8ab47 790 mutex_lock(&mdev->state_lock);
2695bab2
NO
791 memcpy(new_mac, saddr->sa_data, ETH_ALEN);
792 err = mlx4_en_do_set_mac(priv, new_mac);
be599603
MS
793 if (err)
794 goto out;
795
796 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
797 mlx4_en_update_user_mac(priv, new_mac);
798out:
c27a02cd 799 mutex_unlock(&mdev->state_lock);
bfa8ab47
YB
800
801 return err;
c27a02cd
YP
802}
803
804static void mlx4_en_clear_list(struct net_device *dev)
805{
806 struct mlx4_en_priv *priv = netdev_priv(dev);
6d199937 807 struct mlx4_en_mc_list *tmp, *mc_to_del;
c27a02cd 808
6d199937
YP
809 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
810 list_del(&mc_to_del->list);
811 kfree(mc_to_del);
812 }
c27a02cd
YP
813}
814
815static void mlx4_en_cache_mclist(struct net_device *dev)
816{
817 struct mlx4_en_priv *priv = netdev_priv(dev);
22bedad3 818 struct netdev_hw_addr *ha;
6d199937 819 struct mlx4_en_mc_list *tmp;
ff6e2163 820
0e03567a 821 mlx4_en_clear_list(dev);
6d199937
YP
822 netdev_for_each_mc_addr(ha, dev) {
823 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
824 if (!tmp) {
6d199937
YP
825 mlx4_en_clear_list(dev);
826 return;
827 }
828 memcpy(tmp->addr, ha->addr, ETH_ALEN);
829 list_add_tail(&tmp->list, &priv->mc_list);
830 }
c27a02cd
YP
831}
832
6d199937
YP
833static void update_mclist_flags(struct mlx4_en_priv *priv,
834 struct list_head *dst,
835 struct list_head *src)
836{
837 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
838 bool found;
839
840 /* Find all the entries that should be removed from dst,
841 * These are the entries that are not found in src
842 */
843 list_for_each_entry(dst_tmp, dst, list) {
844 found = false;
845 list_for_each_entry(src_tmp, src, list) {
c0623e58 846 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
6d199937
YP
847 found = true;
848 break;
849 }
850 }
851 if (!found)
852 dst_tmp->action = MCLIST_REM;
853 }
854
855 /* Add entries that exist in src but not in dst
856 * mark them as need to add
857 */
858 list_for_each_entry(src_tmp, src, list) {
859 found = false;
860 list_for_each_entry(dst_tmp, dst, list) {
c0623e58 861 if (ether_addr_equal(dst_tmp->addr, src_tmp->addr)) {
6d199937
YP
862 dst_tmp->action = MCLIST_NONE;
863 found = true;
864 break;
865 }
866 }
867 if (!found) {
14f8dc49
JP
868 new_mc = kmemdup(src_tmp,
869 sizeof(struct mlx4_en_mc_list),
6d199937 870 GFP_KERNEL);
14f8dc49 871 if (!new_mc)
6d199937 872 return;
14f8dc49 873
6d199937
YP
874 new_mc->action = MCLIST_ADD;
875 list_add_tail(&new_mc->list, dst);
876 }
877 }
878}
c27a02cd 879
0eb74fdd 880static void mlx4_en_set_rx_mode(struct net_device *dev)
c27a02cd
YP
881{
882 struct mlx4_en_priv *priv = netdev_priv(dev);
883
884 if (!priv->port_up)
885 return;
886
0eb74fdd 887 queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
c27a02cd
YP
888}
889
0eb74fdd
YB
890static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
891 struct mlx4_en_dev *mdev)
c27a02cd 892{
c96d97f4 893 int err = 0;
c27a02cd 894
0eb74fdd 895 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
c27a02cd 896 if (netif_msg_rx_status(priv))
0eb74fdd
YB
897 en_warn(priv, "Entering promiscuous mode\n");
898 priv->flags |= MLX4_EN_FLAG_PROMISC;
c27a02cd 899
0eb74fdd 900 /* Enable promiscouos mode */
c96d97f4 901 switch (mdev->dev->caps.steering_mode) {
592e49dd 902 case MLX4_STEERING_MODE_DEVICE_MANAGED:
0eb74fdd
YB
903 err = mlx4_flow_steer_promisc_add(mdev->dev,
904 priv->port,
905 priv->base_qpn,
f9162539 906 MLX4_FS_ALL_DEFAULT);
592e49dd 907 if (err)
0eb74fdd
YB
908 en_err(priv, "Failed enabling promiscuous mode\n");
909 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
592e49dd
HHZ
910 break;
911
c96d97f4 912 case MLX4_STEERING_MODE_B0:
0eb74fdd
YB
913 err = mlx4_unicast_promisc_add(mdev->dev,
914 priv->base_qpn,
915 priv->port);
c96d97f4 916 if (err)
0eb74fdd
YB
917 en_err(priv, "Failed enabling unicast promiscuous mode\n");
918
919 /* Add the default qp number as multicast
920 * promisc
921 */
922 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
923 err = mlx4_multicast_promisc_add(mdev->dev,
924 priv->base_qpn,
925 priv->port);
c96d97f4 926 if (err)
0eb74fdd
YB
927 en_err(priv, "Failed enabling multicast promiscuous mode\n");
928 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
c96d97f4
HHZ
929 }
930 break;
c27a02cd 931
c96d97f4
HHZ
932 case MLX4_STEERING_MODE_A0:
933 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
934 priv->port,
0eb74fdd
YB
935 priv->base_qpn,
936 1);
1679200f 937 if (err)
0eb74fdd 938 en_err(priv, "Failed enabling promiscuous mode\n");
c96d97f4 939 break;
1679200f
YP
940 }
941
0eb74fdd
YB
942 /* Disable port multicast filter (unconditionally) */
943 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
944 0, MLX4_MCAST_DISABLE);
945 if (err)
946 en_err(priv, "Failed disabling multicast filter\n");
0eb74fdd
YB
947 }
948}
949
950static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
951 struct mlx4_en_dev *mdev)
952{
953 int err = 0;
954
955 if (netif_msg_rx_status(priv))
956 en_warn(priv, "Leaving promiscuous mode\n");
957 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
958
959 /* Disable promiscouos mode */
960 switch (mdev->dev->caps.steering_mode) {
961 case MLX4_STEERING_MODE_DEVICE_MANAGED:
962 err = mlx4_flow_steer_promisc_remove(mdev->dev,
963 priv->port,
f9162539 964 MLX4_FS_ALL_DEFAULT);
0eb74fdd
YB
965 if (err)
966 en_err(priv, "Failed disabling promiscuous mode\n");
967 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
968 break;
969
970 case MLX4_STEERING_MODE_B0:
971 err = mlx4_unicast_promisc_remove(mdev->dev,
972 priv->base_qpn,
973 priv->port);
974 if (err)
975 en_err(priv, "Failed disabling unicast promiscuous mode\n");
976 /* Disable Multicast promisc */
977 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
978 err = mlx4_multicast_promisc_remove(mdev->dev,
979 priv->base_qpn,
980 priv->port);
981 if (err)
982 en_err(priv, "Failed disabling multicast promiscuous mode\n");
983 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
984 }
985 break;
986
987 case MLX4_STEERING_MODE_A0:
988 err = mlx4_SET_PORT_qpn_calc(mdev->dev,
989 priv->port,
990 priv->base_qpn, 0);
991 if (err)
992 en_err(priv, "Failed disabling promiscuous mode\n");
993 break;
c27a02cd 994 }
0eb74fdd
YB
995}
996
997static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
998 struct net_device *dev,
999 struct mlx4_en_dev *mdev)
1000{
1001 struct mlx4_en_mc_list *mclist, *tmp;
1002 u64 mcast_addr = 0;
1003 u8 mc_list[16] = {0};
1004 int err = 0;
1005
c27a02cd
YP
1006 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
1007 if (dev->flags & IFF_ALLMULTI) {
1008 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1009 0, MLX4_MCAST_DISABLE);
1010 if (err)
453a6082 1011 en_err(priv, "Failed disabling multicast filter\n");
1679200f
YP
1012
1013 /* Add the default qp number as multicast promisc */
1014 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
c96d97f4 1015 switch (mdev->dev->caps.steering_mode) {
592e49dd
HHZ
1016 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1017 err = mlx4_flow_steer_promisc_add(mdev->dev,
1018 priv->port,
1019 priv->base_qpn,
f9162539 1020 MLX4_FS_MC_DEFAULT);
592e49dd
HHZ
1021 break;
1022
c96d97f4
HHZ
1023 case MLX4_STEERING_MODE_B0:
1024 err = mlx4_multicast_promisc_add(mdev->dev,
1025 priv->base_qpn,
1026 priv->port);
1027 break;
1028
1029 case MLX4_STEERING_MODE_A0:
1030 break;
1031 }
1679200f
YP
1032 if (err)
1033 en_err(priv, "Failed entering multicast promisc mode\n");
1034 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
1035 }
c27a02cd 1036 } else {
1679200f
YP
1037 /* Disable Multicast promisc */
1038 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
c96d97f4 1039 switch (mdev->dev->caps.steering_mode) {
592e49dd
HHZ
1040 case MLX4_STEERING_MODE_DEVICE_MANAGED:
1041 err = mlx4_flow_steer_promisc_remove(mdev->dev,
1042 priv->port,
f9162539 1043 MLX4_FS_MC_DEFAULT);
592e49dd
HHZ
1044 break;
1045
c96d97f4
HHZ
1046 case MLX4_STEERING_MODE_B0:
1047 err = mlx4_multicast_promisc_remove(mdev->dev,
1048 priv->base_qpn,
1049 priv->port);
1050 break;
1051
1052 case MLX4_STEERING_MODE_A0:
1053 break;
1054 }
1679200f 1055 if (err)
25985edc 1056 en_err(priv, "Failed disabling multicast promiscuous mode\n");
1679200f
YP
1057 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1058 }
ff6e2163 1059
c27a02cd
YP
1060 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1061 0, MLX4_MCAST_DISABLE);
1062 if (err)
453a6082 1063 en_err(priv, "Failed disabling multicast filter\n");
c27a02cd
YP
1064
1065 /* Flush mcast filter and init it with broadcast address */
1066 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1067 1, MLX4_MCAST_CONFIG);
1068
1069 /* Update multicast list - we cache all addresses so they won't
1070 * change while HW is updated holding the command semaphor */
dbd501a8 1071 netif_addr_lock_bh(dev);
c27a02cd 1072 mlx4_en_cache_mclist(dev);
dbd501a8 1073 netif_addr_unlock_bh(dev);
6d199937 1074 list_for_each_entry(mclist, &priv->mc_list, list) {
9813337a 1075 mcast_addr = mlx4_mac_to_u64(mclist->addr);
c27a02cd
YP
1076 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1077 mcast_addr, 0, MLX4_MCAST_CONFIG);
1078 }
1079 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
1080 0, MLX4_MCAST_ENABLE);
1081 if (err)
453a6082 1082 en_err(priv, "Failed enabling multicast filter\n");
6d199937
YP
1083
1084 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
1085 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1086 if (mclist->action == MCLIST_REM) {
1087 /* detach this address and delete from list */
1088 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1089 mc_list[5] = priv->port;
1090 err = mlx4_multicast_detach(mdev->dev,
4931c6ef 1091 priv->rss_map.indir_qp,
6d199937 1092 mc_list,
0ff1fb65
HHZ
1093 MLX4_PROT_ETH,
1094 mclist->reg_id);
6d199937
YP
1095 if (err)
1096 en_err(priv, "Fail to detach multicast address\n");
1097
837052d0
OG
1098 if (mclist->tunnel_reg_id) {
1099 err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
1100 if (err)
1101 en_err(priv, "Failed to detach multicast address\n");
1102 }
1103
6d199937
YP
1104 /* remove from list */
1105 list_del(&mclist->list);
1106 kfree(mclist);
9c64508a 1107 } else if (mclist->action == MCLIST_ADD) {
6d199937
YP
1108 /* attach the address */
1109 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
0ff1fb65 1110 /* needed for B0 steering support */
6d199937
YP
1111 mc_list[5] = priv->port;
1112 err = mlx4_multicast_attach(mdev->dev,
4931c6ef 1113 priv->rss_map.indir_qp,
0ff1fb65
HHZ
1114 mc_list,
1115 priv->port, 0,
1116 MLX4_PROT_ETH,
1117 &mclist->reg_id);
6d199937
YP
1118 if (err)
1119 en_err(priv, "Fail to attach multicast address\n");
1120
837052d0
OG
1121 err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
1122 &mclist->tunnel_reg_id);
1123 if (err)
1124 en_err(priv, "Failed to attach multicast address\n");
6d199937
YP
1125 }
1126 }
c27a02cd 1127 }
0eb74fdd
YB
1128}
1129
cc5387f7
YB
1130static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1131 struct net_device *dev,
1132 struct mlx4_en_dev *mdev)
1133{
1134 struct netdev_hw_addr *ha;
1135 struct mlx4_mac_entry *entry;
b67bfe0d 1136 struct hlist_node *tmp;
cc5387f7
YB
1137 bool found;
1138 u64 mac;
1139 int err = 0;
1140 struct hlist_head *bucket;
1141 unsigned int i;
1142 int removed = 0;
1143 u32 prev_flags;
1144
1145 /* Note that we do not need to protect our mac_hash traversal with rcu,
1146 * since all modification code is protected by mdev->state_lock
1147 */
1148
1149 /* find what to remove */
1150 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1151 bucket = &priv->mac_hash[i];
b67bfe0d 1152 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
cc5387f7
YB
1153 found = false;
1154 netdev_for_each_uc_addr(ha, dev) {
1155 if (ether_addr_equal_64bits(entry->mac,
1156 ha->addr)) {
1157 found = true;
1158 break;
1159 }
1160 }
1161
1162 /* MAC address of the port is not in uc list */
2695bab2
NO
1163 if (ether_addr_equal_64bits(entry->mac,
1164 priv->current_mac))
cc5387f7
YB
1165 found = true;
1166
1167 if (!found) {
9813337a 1168 mac = mlx4_mac_to_u64(entry->mac);
cc5387f7
YB
1169 mlx4_en_uc_steer_release(priv, entry->mac,
1170 priv->base_qpn,
1171 entry->reg_id);
1172 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1173
1174 hlist_del_rcu(&entry->hlist);
1175 kfree_rcu(entry, rcu);
1176 en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
1177 entry->mac, priv->port);
1178 ++removed;
1179 }
1180 }
1181 }
1182
1183 /* if we didn't remove anything, there is no use in trying to add
1184 * again once we are in a forced promisc mode state
1185 */
1186 if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
1187 return;
1188
1189 prev_flags = priv->flags;
1190 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
1191
1192 /* find what to add */
1193 netdev_for_each_uc_addr(ha, dev) {
1194 found = false;
1195 bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
b67bfe0d 1196 hlist_for_each_entry(entry, bucket, hlist) {
cc5387f7
YB
1197 if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
1198 found = true;
1199 break;
1200 }
1201 }
1202
1203 if (!found) {
1204 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1205 if (!entry) {
1206 en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
1207 ha->addr, priv->port);
1208 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1209 break;
1210 }
9813337a 1211 mac = mlx4_mac_to_u64(ha->addr);
cc5387f7
YB
1212 memcpy(entry->mac, ha->addr, ETH_ALEN);
1213 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1214 if (err < 0) {
1215 en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
1216 ha->addr, priv->port, err);
1217 kfree(entry);
1218 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1219 break;
1220 }
1221 err = mlx4_en_uc_steer_add(priv, ha->addr,
1222 &priv->base_qpn,
1223 &entry->reg_id);
1224 if (err) {
1225 en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
1226 ha->addr, priv->port, err);
1227 mlx4_unregister_mac(mdev->dev, priv->port, mac);
1228 kfree(entry);
1229 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1230 break;
1231 } else {
1232 unsigned int mac_hash;
1233 en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
1234 ha->addr, priv->port);
1235 mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
1236 bucket = &priv->mac_hash[mac_hash];
1237 hlist_add_head_rcu(&entry->hlist, bucket);
1238 }
1239 }
1240 }
1241
1242 if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1243 en_warn(priv, "Forcing promiscuous mode on port:%d\n",
1244 priv->port);
1245 } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
1246 en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
1247 priv->port);
1248 }
1249}
1250
0eb74fdd
YB
1251static void mlx4_en_do_set_rx_mode(struct work_struct *work)
1252{
1253 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1254 rx_mode_task);
1255 struct mlx4_en_dev *mdev = priv->mdev;
1256 struct net_device *dev = priv->dev;
1257
1258 mutex_lock(&mdev->state_lock);
1259 if (!mdev->device_up) {
1260 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
1261 goto out;
1262 }
1263 if (!priv->port_up) {
1264 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
1265 goto out;
1266 }
1267
1268 if (!netif_carrier_ok(dev)) {
1269 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
1270 if (priv->port_state.link_state) {
1271 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
1272 netif_carrier_on(dev);
1273 en_dbg(LINK, priv, "Link Up\n");
1274 }
1275 }
1276 }
1277
cc5387f7
YB
1278 if (dev->priv_flags & IFF_UNICAST_FLT)
1279 mlx4_en_do_uc_filter(priv, dev, mdev);
1280
0eb74fdd 1281 /* Promsicuous mode: disable all filters */
cc5387f7
YB
1282 if ((dev->flags & IFF_PROMISC) ||
1283 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
0eb74fdd
YB
1284 mlx4_en_set_promisc_mode(priv, mdev);
1285 goto out;
1286 }
1287
1288 /* Not in promiscuous mode */
1289 if (priv->flags & MLX4_EN_FLAG_PROMISC)
1290 mlx4_en_clear_promisc_mode(priv, mdev);
1291
1292 mlx4_en_do_multicast(priv, dev, mdev);
c27a02cd
YP
1293out:
1294 mutex_unlock(&mdev->state_lock);
1295}
1296
ba4b87ae
IS
1297static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1298{
1299 u64 reg_id;
1300 int err = 0;
1301 int *qpn = &priv->base_qpn;
1302 struct mlx4_mac_entry *entry;
1303
1304 err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
1305 if (err)
1306 return err;
1307
1308 err = mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
1309 &priv->tunnel_reg_id);
1310 if (err)
1311 goto tunnel_err;
1312
1313 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1314 if (!entry) {
1315 err = -ENOMEM;
1316 goto alloc_err;
1317 }
1318
1319 memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
1320 memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
1321 entry->reg_id = reg_id;
1322 hlist_add_head_rcu(&entry->hlist,
1323 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
1324
1325 return 0;
1326
1327alloc_err:
1328 if (priv->tunnel_reg_id)
1329 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1330
1331tunnel_err:
1332 mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
1333 return err;
1334}
1335
1336static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
1337{
1338 u64 mac;
1339 unsigned int i;
1340 int qpn = priv->base_qpn;
1341 struct hlist_head *bucket;
1342 struct hlist_node *tmp;
1343 struct mlx4_mac_entry *entry;
1344
1345 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
1346 bucket = &priv->mac_hash[i];
1347 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
1348 mac = mlx4_mac_to_u64(entry->mac);
1349 en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
1350 entry->mac);
1351 mlx4_en_uc_steer_release(priv, entry->mac,
1352 qpn, entry->reg_id);
1353
1354 mlx4_unregister_mac(priv->mdev->dev, priv->port, mac);
1355 hlist_del_rcu(&entry->hlist);
1356 kfree_rcu(entry, rcu);
1357 }
1358 }
1359
1360 if (priv->tunnel_reg_id) {
1361 mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
1362 priv->tunnel_reg_id = 0;
1363 }
1364}
1365
0290bd29 1366static void mlx4_en_tx_timeout(struct net_device *dev, unsigned int txqueue)
c27a02cd
YP
1367{
1368 struct mlx4_en_priv *priv = netdev_priv(dev);
1369 struct mlx4_en_dev *mdev = priv->mdev;
b944ebec 1370 int i;
c27a02cd
YP
1371
1372 if (netif_msg_timer(priv))
453a6082 1373 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
c27a02cd 1374
67f8b1dc
TT
1375 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1376 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
1377
b944ebec
YP
1378 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1379 continue;
1380 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
e3f42f84 1381 i, tx_ring->qpn, tx_ring->sp_cqn,
67f8b1dc 1382 tx_ring->cons, tx_ring->prod);
b944ebec
YP
1383 }
1384
1e338db5 1385 priv->port_stats.tx_timeout++;
453a6082 1386 en_dbg(DRV, priv, "Scheduling watchdog\n");
1e338db5 1387 queue_work(mdev->workqueue, &priv->watchdog_task);
c27a02cd
YP
1388}
1389
1390
bc1f4470 1391static void
9ed17db1 1392mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
c27a02cd
YP
1393{
1394 struct mlx4_en_priv *priv = netdev_priv(dev);
1395
1396 spin_lock_bh(&priv->stats_lock);
40931b85 1397 mlx4_en_fold_software_stats(dev);
f73a6f43 1398 netdev_stats_to_stats64(stats, &dev->stats);
c27a02cd 1399 spin_unlock_bh(&priv->stats_lock);
c27a02cd
YP
1400}
1401
1402static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1403{
c27a02cd 1404 struct mlx4_en_cq *cq;
67f8b1dc 1405 int i, t;
c27a02cd
YP
1406
1407 /* If we haven't received a specific coalescing setting
98a1708d 1408 * (module param), we set the moderation parameters as follows:
c27a02cd 1409 * - moder_cnt is set to the number of mtu sized packets to
ecfd2ce1 1410 * satisfy our coalescing target.
c27a02cd
YP
1411 * - moder_time is set to a fixed value.
1412 */
3db36fb2 1413 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
60b9f9e5 1414 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
a19a848a
YP
1415 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
1416 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
593814d1 1417 en_dbg(INTR, priv, "Default coalescing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
48e551ff 1418 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
c27a02cd
YP
1419
1420 /* Setup cq moderation params */
1421 for (i = 0; i < priv->rx_ring_num; i++) {
41d942d5 1422 cq = priv->rx_cq[i];
c27a02cd
YP
1423 cq->moder_cnt = priv->rx_frames;
1424 cq->moder_time = priv->rx_usecs;
6b4d8d9f
AG
1425 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
1426 priv->last_moder_packets[i] = 0;
1427 priv->last_moder_bytes[i] = 0;
c27a02cd
YP
1428 }
1429
67f8b1dc
TT
1430 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
1431 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1432 cq = priv->tx_cq[t][i];
1433 cq->moder_cnt = priv->tx_frames;
1434 cq->moder_time = priv->tx_usecs;
1435 }
c27a02cd
YP
1436 }
1437
1438 /* Reset auto-moderation params */
1439 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
1440 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
1441 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
1442 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
1443 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
60b9f9e5 1444 priv->adaptive_rx_coal = 1;
c27a02cd 1445 priv->last_moder_jiffies = 0;
c27a02cd 1446 priv->last_moder_tx_packets = 0;
c27a02cd
YP
1447}
1448
1449static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1450{
1451 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
f5a57723 1452 u32 pkt_rate_high, pkt_rate_low;
c27a02cd
YP
1453 struct mlx4_en_cq *cq;
1454 unsigned long packets;
1455 unsigned long rate;
1456 unsigned long avg_pkt_size;
1457 unsigned long rx_packets;
1458 unsigned long rx_bytes;
c27a02cd
YP
1459 unsigned long rx_pkt_diff;
1460 int moder_time;
6b4d8d9f 1461 int ring, err;
c27a02cd
YP
1462
1463 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
1464 return;
1465
f5a57723
ED
1466 pkt_rate_low = READ_ONCE(priv->pkt_rate_low);
1467 pkt_rate_high = READ_ONCE(priv->pkt_rate_high);
1468
6b4d8d9f 1469 for (ring = 0; ring < priv->rx_ring_num; ring++) {
b9972d22
ED
1470 rx_packets = READ_ONCE(priv->rx_ring[ring]->packets);
1471 rx_bytes = READ_ONCE(priv->rx_ring[ring]->bytes);
6b4d8d9f 1472
f5a57723 1473 rx_pkt_diff = rx_packets - priv->last_moder_packets[ring];
6b4d8d9f
AG
1474 packets = rx_pkt_diff;
1475 rate = packets * HZ / period;
f5a57723
ED
1476 avg_pkt_size = packets ? (rx_bytes -
1477 priv->last_moder_bytes[ring]) / packets : 0;
6b4d8d9f
AG
1478
1479 /* Apply auto-moderation only when packet rate
1480 * exceeds a rate that it matters */
1481 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
1482 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
f5a57723 1483 if (rate <= pkt_rate_low)
c27a02cd 1484 moder_time = priv->rx_usecs_low;
f5a57723 1485 else if (rate >= pkt_rate_high)
c27a02cd
YP
1486 moder_time = priv->rx_usecs_high;
1487 else
f5a57723 1488 moder_time = (rate - pkt_rate_low) *
c27a02cd 1489 (priv->rx_usecs_high - priv->rx_usecs_low) /
f5a57723 1490 (pkt_rate_high - pkt_rate_low) +
c27a02cd 1491 priv->rx_usecs_low;
6b4d8d9f
AG
1492 } else {
1493 moder_time = priv->rx_usecs_low;
c27a02cd 1494 }
c27a02cd 1495
f5a57723
ED
1496 cq = priv->rx_cq[ring];
1497 if (moder_time != priv->last_moder_time[ring] ||
1498 cq->moder_cnt != priv->rx_frames) {
6b4d8d9f 1499 priv->last_moder_time[ring] = moder_time;
c27a02cd 1500 cq->moder_time = moder_time;
a1c6693a 1501 cq->moder_cnt = priv->rx_frames;
c27a02cd 1502 err = mlx4_en_set_cq_moder(priv, cq);
6b4d8d9f 1503 if (err)
48e551ff
YB
1504 en_err(priv, "Failed modifying moderation for cq:%d\n",
1505 ring);
c27a02cd 1506 }
6b4d8d9f
AG
1507 priv->last_moder_packets[ring] = rx_packets;
1508 priv->last_moder_bytes[ring] = rx_bytes;
c27a02cd
YP
1509 }
1510
c27a02cd
YP
1511 priv->last_moder_jiffies = jiffies;
1512}
1513
1514static void mlx4_en_do_get_stats(struct work_struct *work)
1515{
bf6aede7 1516 struct delayed_work *delay = to_delayed_work(work);
c27a02cd
YP
1517 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1518 stats_task);
1519 struct mlx4_en_dev *mdev = priv->mdev;
1520 int err;
1521
c27a02cd
YP
1522 mutex_lock(&mdev->state_lock);
1523 if (mdev->device_up) {
6123db2e
JM
1524 if (priv->port_up) {
1525 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
1526 if (err)
1527 en_dbg(HW, priv, "Could not update stats\n");
2d51837f 1528
c27a02cd 1529 mlx4_en_auto_moderation(priv);
6123db2e 1530 }
c27a02cd
YP
1531
1532 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1533 }
d7e1a487 1534 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
2695bab2 1535 mlx4_en_do_set_mac(priv, priv->current_mac);
d7e1a487
YP
1536 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1537 }
c27a02cd
YP
1538 mutex_unlock(&mdev->state_lock);
1539}
1540
b6c39bfc
AV
1541/* mlx4_en_service_task - Run service task for tasks that needed to be done
1542 * periodically
1543 */
1544static void mlx4_en_service_task(struct work_struct *work)
1545{
1546 struct delayed_work *delay = to_delayed_work(work);
1547 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
1548 service_task);
1549 struct mlx4_en_dev *mdev = priv->mdev;
1550
1551 mutex_lock(&mdev->state_lock);
1552 if (mdev->device_up) {
dc8142ea
AV
1553 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
1554 mlx4_en_ptp_overflow_check(mdev);
b6c39bfc 1555
07841f9d 1556 mlx4_en_recover_from_oom(priv);
b6c39bfc
AV
1557 queue_delayed_work(mdev->workqueue, &priv->service_task,
1558 SERVICE_TASK_DELAY);
1559 }
1560 mutex_unlock(&mdev->state_lock);
1561}
1562
c27a02cd
YP
1563static void mlx4_en_linkstate(struct work_struct *work)
1564{
1565 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
1566 linkstate_task);
1567 struct mlx4_en_dev *mdev = priv->mdev;
1568 int linkstate = priv->link_state;
1569
1570 mutex_lock(&mdev->state_lock);
1571 /* If observable port state changed set carrier state and
1572 * report to system log */
1573 if (priv->last_link_state != linkstate) {
1574 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
e5cc44b2 1575 en_info(priv, "Link Down\n");
c27a02cd
YP
1576 netif_carrier_off(priv->dev);
1577 } else {
e5cc44b2 1578 en_info(priv, "Link Up\n");
c27a02cd
YP
1579 netif_carrier_on(priv->dev);
1580 }
1581 }
1582 priv->last_link_state = linkstate;
1583 mutex_unlock(&mdev->state_lock);
1584}
1585
9e311e77
YA
1586static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1587{
1588 struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
1589 int numa_node = priv->mdev->dev->numa_node;
9e311e77
YA
1590
1591 if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
1592 return -ENOMEM;
1593
f36963c9
RR
1594 cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
1595 ring->affinity_mask);
1596 return 0;
9e311e77
YA
1597}
1598
1599static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
1600{
1601 free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
1602}
c27a02cd 1603
9ecc2d86
BB
1604static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
1605 int tx_ring_idx)
1606{
67f8b1dc
TT
1607 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
1608 int rr_index = tx_ring_idx;
9ecc2d86 1609
67f8b1dc
TT
1610 tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
1611 tx_ring->recycle_ring = priv->rx_ring[rr_index];
1612 en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
1613 TX_XDP, tx_ring_idx, rr_index);
9ecc2d86
BB
1614}
1615
18cc42a3 1616int mlx4_en_start_port(struct net_device *dev)
c27a02cd
YP
1617{
1618 struct mlx4_en_priv *priv = netdev_priv(dev);
1619 struct mlx4_en_dev *mdev = priv->mdev;
1620 struct mlx4_en_cq *cq;
1621 struct mlx4_en_tx_ring *tx_ring;
c27a02cd 1622 int rx_index = 0;
c27a02cd 1623 int err = 0;
67f8b1dc 1624 int i, t;
c27a02cd 1625 int j;
1679200f 1626 u8 mc_list[16] = {0};
c27a02cd
YP
1627
1628 if (priv->port_up) {
453a6082 1629 en_dbg(DRV, priv, "start port called while port already up\n");
c27a02cd
YP
1630 return 0;
1631 }
1632
6d199937
YP
1633 INIT_LIST_HEAD(&priv->mc_list);
1634 INIT_LIST_HEAD(&priv->curr_list);
0d256c0e
HHZ
1635 INIT_LIST_HEAD(&priv->ethtool_list);
1636 memset(&priv->ethtool_rules[0], 0,
1637 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
6d199937 1638
c27a02cd
YP
1639 /* Calculate Rx buf size */
1640 dev->mtu = min(dev->mtu, priv->max_mtu);
1641 mlx4_en_calc_rx_buf(dev);
453a6082 1642 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
38aab07c 1643
c27a02cd 1644 /* Configure rx cq's and rings */
38aab07c
YP
1645 err = mlx4_en_activate_rx_rings(priv);
1646 if (err) {
453a6082 1647 en_err(priv, "Failed to activate RX rings\n");
38aab07c
YP
1648 return err;
1649 }
c27a02cd 1650 for (i = 0; i < priv->rx_ring_num; i++) {
41d942d5 1651 cq = priv->rx_cq[i];
c27a02cd 1652
9e311e77
YA
1653 err = mlx4_en_init_affinity_hint(priv, i);
1654 if (err) {
1655 en_err(priv, "Failed preparing IRQ affinity hint\n");
1656 goto cq_err;
1657 }
1658
76532d0c 1659 err = mlx4_en_activate_cq(priv, cq, i);
c27a02cd 1660 if (err) {
453a6082 1661 en_err(priv, "Failed activating Rx CQ\n");
9e311e77 1662 mlx4_en_free_affinity_hint(priv, i);
a4233304 1663 goto cq_err;
c27a02cd 1664 }
c3f2511f
IS
1665
1666 for (j = 0; j < cq->size; j++) {
1667 struct mlx4_cqe *cqe = NULL;
1668
1669 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
1670 priv->cqe_factor;
1671 cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
1672 }
1673
c27a02cd
YP
1674 err = mlx4_en_set_cq_moder(priv, cq);
1675 if (err) {
1a91de28 1676 en_err(priv, "Failed setting cq moderation parameters\n");
c27a02cd 1677 mlx4_en_deactivate_cq(priv, cq);
9e311e77 1678 mlx4_en_free_affinity_hint(priv, i);
c27a02cd
YP
1679 goto cq_err;
1680 }
1681 mlx4_en_arm_cq(priv, cq);
41d942d5 1682 priv->rx_ring[i]->cqn = cq->mcq.cqn;
c27a02cd
YP
1683 ++rx_index;
1684 }
1685
ffe455ad
EE
1686 /* Set qp number */
1687 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
16a10ffd 1688 err = mlx4_en_get_qp(priv);
1679200f 1689 if (err) {
ffe455ad 1690 en_err(priv, "Failed getting eth qp\n");
1679200f
YP
1691 goto cq_err;
1692 }
1693 mdev->mac_removed[priv->port] = 0;
1694
6de5f7f6
EBE
1695 priv->counter_index =
1696 mlx4_get_default_counter_index(mdev->dev, priv->port);
1697
c27a02cd
YP
1698 err = mlx4_en_config_rss_steer(priv);
1699 if (err) {
453a6082 1700 en_err(priv, "Failed configuring rss steering\n");
1679200f 1701 goto mac_err;
c27a02cd
YP
1702 }
1703
cabdc8ee
HHZ
1704 err = mlx4_en_create_drop_qp(priv);
1705 if (err)
1706 goto rss_err;
1707
c27a02cd 1708 /* Configure tx cq's and rings */
67f8b1dc 1709 for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
eb9def61
TT
1710 u8 num_tx_rings_p_up = t == TX ?
1711 priv->num_tx_rings_p_up : priv->tx_ring_num[t];
c27a02cd 1712
67f8b1dc
TT
1713 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1714 /* Configure cq */
1715 cq = priv->tx_cq[t][i];
1716 err = mlx4_en_activate_cq(priv, cq, i);
1717 if (err) {
1718 en_err(priv, "Failed allocating Tx CQ\n");
1719 goto tx_err;
1720 }
1721 err = mlx4_en_set_cq_moder(priv, cq);
1722 if (err) {
1723 en_err(priv, "Failed setting cq moderation parameters\n");
1724 mlx4_en_deactivate_cq(priv, cq);
1725 goto tx_err;
1726 }
1727 en_dbg(DRV, priv,
1728 "Resetting index of collapsed CQ:%d to -1\n", i);
1729 cq->buf->wqe_index = cpu_to_be16(0xffff);
1730
1731 /* Configure ring */
1732 tx_ring = priv->tx_ring[t][i];
1733 err = mlx4_en_activate_tx_ring(priv, tx_ring,
1734 cq->mcq.cqn,
1735 i / num_tx_rings_p_up);
1736 if (err) {
1737 en_err(priv, "Failed allocating Tx ring\n");
1738 mlx4_en_deactivate_cq(priv, cq);
1739 goto tx_err;
1740 }
1741 if (t != TX_XDP) {
1742 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
1743 tx_ring->recycle_ring = NULL;
6c78511b
TT
1744
1745 /* Arm CQ for TX completions */
1746 mlx4_en_arm_cq(priv, cq);
1747
67f8b1dc 1748 } else {
f025fd60 1749 mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring);
67f8b1dc 1750 mlx4_en_init_recycle_ring(priv, i);
6c78511b 1751 /* XDP TX CQ should never be armed */
67f8b1dc 1752 }
9ecc2d86 1753
67f8b1dc
TT
1754 /* Set initial ownership of all Tx TXBBs to SW (1) */
1755 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
1756 *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
1757 }
c27a02cd
YP
1758 }
1759
1760 /* Configure port */
1761 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1762 priv->rx_skb_size + ETH_FCS_LEN,
d53b93f2
YP
1763 priv->prof->tx_pause,
1764 priv->prof->tx_ppp,
1765 priv->prof->rx_pause,
1766 priv->prof->rx_ppp);
c27a02cd 1767 if (err) {
48e551ff
YB
1768 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
1769 priv->port, err);
c27a02cd
YP
1770 goto tx_err;
1771 }
40fb4fc1
SD
1772
1773 err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
1774 if (err) {
1775 en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
1776 dev->mtu, priv->port, err);
1777 goto tx_err;
1778 }
1779
c27a02cd
YP
1780 /* Set default qp number */
1781 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
1782 if (err) {
453a6082 1783 en_err(priv, "Failed setting default qp numbers\n");
c27a02cd
YP
1784 goto tx_err;
1785 }
c27a02cd 1786
837052d0 1787 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1b136de1 1788 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
837052d0
OG
1789 if (err) {
1790 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
1791 err);
1792 goto tx_err;
1793 }
1794 }
1795
c27a02cd 1796 /* Init port */
453a6082 1797 en_dbg(HW, priv, "Initializing port\n");
c27a02cd
YP
1798 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1799 if (err) {
453a6082 1800 en_err(priv, "Failed Initializing port\n");
1679200f 1801 goto tx_err;
c27a02cd
YP
1802 }
1803
ba4b87ae
IS
1804 /* Set Unicast and VXLAN steering rules */
1805 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0 &&
1806 mlx4_en_set_rss_steer_rules(priv))
1807 mlx4_warn(mdev, "Failed setting steering rules\n");
1808
1679200f 1809 /* Attach rx QP to bradcast address */
c7bf7169 1810 eth_broadcast_addr(&mc_list[10]);
0ff1fb65 1811 mc_list[5] = priv->port; /* needed for B0 steering support */
4931c6ef 1812 if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list,
0ff1fb65
HHZ
1813 priv->port, 0, MLX4_PROT_ETH,
1814 &priv->broadcast_id))
1679200f
YP
1815 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
1816
b5845f98
HX
1817 /* Must redo promiscuous mode setup. */
1818 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
1819
c27a02cd 1820 /* Schedule multicast task to populate multicast list */
0eb74fdd 1821 queue_work(mdev->workqueue, &priv->rx_mode_task);
c27a02cd 1822
9737c6ab 1823 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
a831274a
AD
1824 udp_tunnel_get_rx_info(dev);
1825
c27a02cd 1826 priv->port_up = true;
8d59de8f
ES
1827
1828 /* Process all completions if exist to prevent
1829 * the queues freezing if they are full
1830 */
8cf699ec
ED
1831 for (i = 0; i < priv->rx_ring_num; i++) {
1832 local_bh_disable();
8d59de8f 1833 napi_schedule(&priv->rx_cq[i]->napi);
8cf699ec
ED
1834 local_bh_enable();
1835 }
8d59de8f 1836
a11faac7 1837 netif_tx_start_all_queues(dev);
3484aac1
AV
1838 netif_device_attach(dev);
1839
c27a02cd
YP
1840 return 0;
1841
c27a02cd 1842tx_err:
67f8b1dc
TT
1843 if (t == MLX4_EN_NUM_TX_TYPES) {
1844 t--;
1845 i = priv->tx_ring_num[t];
1846 }
1847 while (t >= 0) {
1848 while (i--) {
1849 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1850 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1851 }
1852 if (!t--)
1853 break;
1854 i = priv->tx_ring_num[t];
c27a02cd 1855 }
cabdc8ee
HHZ
1856 mlx4_en_destroy_drop_qp(priv);
1857rss_err:
c27a02cd 1858 mlx4_en_release_rss_steer(priv);
1679200f 1859mac_err:
16a10ffd 1860 mlx4_en_put_qp(priv);
c27a02cd 1861cq_err:
9e311e77 1862 while (rx_index--) {
41d942d5 1863 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
f94813f3 1864 mlx4_en_free_affinity_hint(priv, rx_index);
9e311e77 1865 }
38aab07c 1866 for (i = 0; i < priv->rx_ring_num; i++)
41d942d5 1867 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
c27a02cd
YP
1868
1869 return err; /* need to close devices */
1870}
1871
1872
3484aac1 1873void mlx4_en_stop_port(struct net_device *dev, int detach)
c27a02cd
YP
1874{
1875 struct mlx4_en_priv *priv = netdev_priv(dev);
1876 struct mlx4_en_dev *mdev = priv->mdev;
6d199937 1877 struct mlx4_en_mc_list *mclist, *tmp;
0d256c0e 1878 struct ethtool_flow_id *flow, *tmp_flow;
67f8b1dc 1879 int i, t;
1679200f 1880 u8 mc_list[16] = {0};
c27a02cd
YP
1881
1882 if (!priv->port_up) {
453a6082 1883 en_dbg(DRV, priv, "stop port called while port already down\n");
c27a02cd
YP
1884 return;
1885 }
c27a02cd 1886
0cc5c8bf
EE
1887 /* close port*/
1888 mlx4_CLOSE_PORT(mdev->dev, priv->port);
1889
c27a02cd
YP
1890 /* Synchronize with tx routine */
1891 netif_tx_lock_bh(dev);
3484aac1
AV
1892 if (detach)
1893 netif_device_detach(dev);
3c05f5ef 1894 netif_tx_stop_all_queues(dev);
c27a02cd
YP
1895 netif_tx_unlock_bh(dev);
1896
3484aac1
AV
1897 netif_tx_disable(dev);
1898
7f7bf160
ED
1899 spin_lock_bh(&priv->stats_lock);
1900 mlx4_en_fold_software_stats(dev);
7c287380 1901 /* Set port as not active */
3c05f5ef 1902 priv->port_up = false;
7f7bf160
ED
1903 spin_unlock_bh(&priv->stats_lock);
1904
6de5f7f6 1905 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
c27a02cd 1906
db0e7cba
AY
1907 /* Promsicuous mode */
1908 if (mdev->dev->caps.steering_mode ==
1909 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1910 priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
1911 MLX4_EN_FLAG_MC_PROMISC);
1912 mlx4_flow_steer_promisc_remove(mdev->dev,
1913 priv->port,
f9162539 1914 MLX4_FS_ALL_DEFAULT);
db0e7cba
AY
1915 mlx4_flow_steer_promisc_remove(mdev->dev,
1916 priv->port,
f9162539 1917 MLX4_FS_MC_DEFAULT);
db0e7cba
AY
1918 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
1919 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
1920
1921 /* Disable promiscouos mode */
1922 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
1923 priv->port);
1924
1925 /* Disable Multicast promisc */
1926 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
1927 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
1928 priv->port);
1929 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
1930 }
1931 }
1932
1679200f 1933 /* Detach All multicasts */
c7bf7169 1934 eth_broadcast_addr(&mc_list[10]);
0ff1fb65 1935 mc_list[5] = priv->port; /* needed for B0 steering support */
4931c6ef 1936 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp, mc_list,
0ff1fb65 1937 MLX4_PROT_ETH, priv->broadcast_id);
6d199937
YP
1938 list_for_each_entry(mclist, &priv->curr_list, list) {
1939 memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
1679200f 1940 mc_list[5] = priv->port;
4931c6ef 1941 mlx4_multicast_detach(mdev->dev, priv->rss_map.indir_qp,
0ff1fb65 1942 mc_list, MLX4_PROT_ETH, mclist->reg_id);
de123268
OG
1943 if (mclist->tunnel_reg_id)
1944 mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
1679200f
YP
1945 }
1946 mlx4_en_clear_list(dev);
6d199937
YP
1947 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
1948 list_del(&mclist->list);
1949 kfree(mclist);
1950 }
1951
1679200f
YP
1952 /* Flush multicast filter */
1953 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
1954
6efb5fac
HHZ
1955 /* Remove flow steering rules for the port*/
1956 if (mdev->dev->caps.steering_mode ==
1957 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1958 ASSERT_RTNL();
1959 list_for_each_entry_safe(flow, tmp_flow,
1960 &priv->ethtool_list, list) {
1961 mlx4_flow_detach(mdev->dev, flow->id);
1962 list_del(&flow->list);
1963 }
1964 }
1965
cabdc8ee
HHZ
1966 mlx4_en_destroy_drop_qp(priv);
1967
c27a02cd 1968 /* Free TX Rings */
67f8b1dc
TT
1969 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
1970 for (i = 0; i < priv->tx_ring_num[t]; i++) {
1971 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
1972 mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
1973 }
c27a02cd
YP
1974 }
1975 msleep(10);
1976
67f8b1dc
TT
1977 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
1978 for (i = 0; i < priv->tx_ring_num[t]; i++)
1979 mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
c27a02cd 1980
ba4b87ae
IS
1981 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1982 mlx4_en_delete_rss_steer_rules(priv);
1983
c27a02cd
YP
1984 /* Free RSS qps */
1985 mlx4_en_release_rss_steer(priv);
1986
ffe455ad 1987 /* Unregister Mac address for the port */
16a10ffd 1988 mlx4_en_put_qp(priv);
5930e8d0 1989 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
955154fa 1990 mdev->mac_removed[priv->port] = 1;
ffe455ad 1991
c27a02cd
YP
1992 /* Free RX Rings */
1993 for (i = 0; i < priv->rx_ring_num; i++) {
41d942d5 1994 struct mlx4_en_cq *cq = priv->rx_cq[i];
9e77a2b8 1995
f4a36751 1996 napi_synchronize(&cq->napi);
41d942d5 1997 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
9e77a2b8 1998 mlx4_en_deactivate_cq(priv, cq);
9e311e77
YA
1999
2000 mlx4_en_free_affinity_hint(priv, i);
c27a02cd
YP
2001 }
2002}
2003
2004static void mlx4_en_restart(struct work_struct *work)
2005{
2006 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2007 watchdog_task);
2008 struct mlx4_en_dev *mdev = priv->mdev;
2009 struct net_device *dev = priv->dev;
2010
453a6082 2011 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
1e338db5 2012
0c5c3252 2013 rtnl_lock();
1e338db5
YP
2014 mutex_lock(&mdev->state_lock);
2015 if (priv->port_up) {
3484aac1 2016 mlx4_en_stop_port(dev, 1);
1e338db5 2017 if (mlx4_en_start_port(dev))
453a6082 2018 en_err(priv, "Failed restarting port %d\n", priv->port);
1e338db5
YP
2019 }
2020 mutex_unlock(&mdev->state_lock);
0c5c3252 2021 rtnl_unlock();
c27a02cd
YP
2022}
2023
b477ba62 2024static void mlx4_en_clear_stats(struct net_device *dev)
c27a02cd
YP
2025{
2026 struct mlx4_en_priv *priv = netdev_priv(dev);
2027 struct mlx4_en_dev *mdev = priv->mdev;
67f8b1dc 2028 struct mlx4_en_tx_ring **tx_ring;
c27a02cd 2029 int i;
c27a02cd 2030
eb4b6788
TT
2031 if (!mlx4_is_slave(mdev->dev))
2032 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
2033 en_dbg(HW, priv, "Failed dumping statistics\n");
c27a02cd 2034
c27a02cd 2035 memset(&priv->pstats, 0, sizeof(priv->pstats));
b477ba62
EE
2036 memset(&priv->pkstats, 0, sizeof(priv->pkstats));
2037 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
0b131561
MB
2038 memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
2039 memset(&priv->tx_flowstats, 0, sizeof(priv->tx_flowstats));
2040 memset(&priv->rx_priority_flowstats, 0,
2041 sizeof(priv->rx_priority_flowstats));
2042 memset(&priv->tx_priority_flowstats, 0,
2043 sizeof(priv->tx_priority_flowstats));
b42de4d0 2044 memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
c27a02cd 2045
67f8b1dc
TT
2046 tx_ring = priv->tx_ring[TX];
2047 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
2048 tx_ring[i]->bytes = 0;
2049 tx_ring[i]->packets = 0;
2050 tx_ring[i]->tx_csum = 0;
2051 tx_ring[i]->tx_dropped = 0;
2052 tx_ring[i]->queue_stopped = 0;
2053 tx_ring[i]->wake_queue = 0;
2054 tx_ring[i]->tso_packets = 0;
2055 tx_ring[i]->xmit_more = 0;
c27a02cd
YP
2056 }
2057 for (i = 0; i < priv->rx_ring_num; i++) {
41d942d5
EE
2058 priv->rx_ring[i]->bytes = 0;
2059 priv->rx_ring[i]->packets = 0;
2060 priv->rx_ring[i]->csum_ok = 0;
2061 priv->rx_ring[i]->csum_none = 0;
f8c6455b 2062 priv->rx_ring[i]->csum_complete = 0;
c27a02cd 2063 }
b477ba62
EE
2064}
2065
2066static int mlx4_en_open(struct net_device *dev)
2067{
2068 struct mlx4_en_priv *priv = netdev_priv(dev);
2069 struct mlx4_en_dev *mdev = priv->mdev;
2070 int err = 0;
2071
2072 mutex_lock(&mdev->state_lock);
2073
2074 if (!mdev->device_up) {
2075 en_err(priv, "Cannot open - device down/disabled\n");
2076 err = -EBUSY;
2077 goto out;
2078 }
2079
2080 /* Reset HW statistics and SW counters */
2081 mlx4_en_clear_stats(dev);
c27a02cd 2082
c27a02cd
YP
2083 err = mlx4_en_start_port(dev);
2084 if (err)
453a6082 2085 en_err(priv, "Failed starting port:%d\n", priv->port);
c27a02cd
YP
2086
2087out:
2088 mutex_unlock(&mdev->state_lock);
2089 return err;
2090}
2091
2092
2093static int mlx4_en_close(struct net_device *dev)
2094{
2095 struct mlx4_en_priv *priv = netdev_priv(dev);
2096 struct mlx4_en_dev *mdev = priv->mdev;
2097
453a6082 2098 en_dbg(IFDOWN, priv, "Close port called\n");
c27a02cd
YP
2099
2100 mutex_lock(&mdev->state_lock);
2101
3484aac1 2102 mlx4_en_stop_port(dev, 0);
c27a02cd
YP
2103 netif_carrier_off(dev);
2104
2105 mutex_unlock(&mdev->state_lock);
2106 return 0;
2107}
2108
ec25bc04 2109static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
c27a02cd 2110{
67f8b1dc 2111 int i, t;
c27a02cd 2112
1eb8c695 2113#ifdef CONFIG_RFS_ACCEL
1eb8c695
AV
2114 priv->dev->rx_cpu_rmap = NULL;
2115#endif
2116
67f8b1dc
TT
2117 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2118 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2119 if (priv->tx_ring[t] && priv->tx_ring[t][i])
2120 mlx4_en_destroy_tx_ring(priv,
2121 &priv->tx_ring[t][i]);
2122 if (priv->tx_cq[t] && priv->tx_cq[t][i])
2123 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2124 }
f32b20e8
MKL
2125 kfree(priv->tx_ring[t]);
2126 kfree(priv->tx_cq[t]);
c27a02cd
YP
2127 }
2128
2129 for (i = 0; i < priv->rx_ring_num; i++) {
41d942d5 2130 if (priv->rx_ring[i])
68355f71
TLSC
2131 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2132 priv->prof->rx_ring_size, priv->stride);
41d942d5 2133 if (priv->rx_cq[i])
fe0af03c 2134 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
c27a02cd 2135 }
044ca2a5 2136
c27a02cd
YP
2137}
2138
ec25bc04 2139static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
c27a02cd 2140{
c27a02cd 2141 struct mlx4_en_port_profile *prof = priv->prof;
67f8b1dc 2142 int i, t;
163561a4 2143 int node;
87a5c389 2144
c27a02cd 2145 /* Create tx Rings */
67f8b1dc
TT
2146 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2147 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2148 node = cpu_to_node(i % num_online_cpus());
2149 if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
2150 prof->tx_ring_size, i, t, node))
2151 goto err;
2152
2153 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
2154 prof->tx_ring_size,
2155 TXBB_SIZE, node, i))
2156 goto err;
2157 }
c27a02cd
YP
2158 }
2159
2160 /* Create rx Rings */
2161 for (i = 0; i < priv->rx_ring_num; i++) {
163561a4 2162 node = cpu_to_node(i % num_online_cpus());
c27a02cd 2163 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
163561a4 2164 prof->rx_ring_size, i, RX, node))
c27a02cd
YP
2165 goto err;
2166
2167 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
163561a4 2168 prof->rx_ring_size, priv->stride,
ae75415d 2169 node, i))
c27a02cd 2170 goto err;
ae75415d 2171
c27a02cd
YP
2172 }
2173
1eb8c695 2174#ifdef CONFIG_RFS_ACCEL
c66fa19c 2175 priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
1eb8c695
AV
2176#endif
2177
c27a02cd
YP
2178 return 0;
2179
2180err:
453a6082 2181 en_err(priv, "Failed to allocate NIC resources\n");
41d942d5
EE
2182 for (i = 0; i < priv->rx_ring_num; i++) {
2183 if (priv->rx_ring[i])
2184 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
2185 prof->rx_ring_size,
2186 priv->stride);
2187 if (priv->rx_cq[i])
2188 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
2189 }
67f8b1dc
TT
2190 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2191 for (i = 0; i < priv->tx_ring_num[t]; i++) {
2192 if (priv->tx_ring[t][i])
2193 mlx4_en_destroy_tx_ring(priv,
2194 &priv->tx_ring[t][i]);
2195 if (priv->tx_cq[t][i])
2196 mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
2197 }
41d942d5 2198 }
c27a02cd
YP
2199 return -ENOMEM;
2200}
2201
2202
ec25bc04
EE
2203static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
2204 struct mlx4_en_priv *src,
2205 struct mlx4_en_port_profile *prof)
2206{
67f8b1dc
TT
2207 int t;
2208
ec25bc04
EE
2209 memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
2210 sizeof(dst->hwtstamp_config));
ec327f7a 2211 dst->num_tx_rings_p_up = prof->num_tx_rings_p_up;
ec25bc04
EE
2212 dst->rx_ring_num = prof->rx_ring_num;
2213 dst->flags = prof->flags;
2214 dst->mdev = src->mdev;
2215 dst->port = src->port;
2216 dst->dev = src->dev;
2217 dst->prof = prof;
2218 dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
2219 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
2220
67f8b1dc
TT
2221 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2222 dst->tx_ring_num[t] = prof->tx_ring_num[t];
2223 if (!dst->tx_ring_num[t])
2224 continue;
ec25bc04 2225
6396bb22
KC
2226 dst->tx_ring[t] = kcalloc(MAX_TX_RINGS,
2227 sizeof(struct mlx4_en_tx_ring *),
2228 GFP_KERNEL);
67f8b1dc
TT
2229 if (!dst->tx_ring[t])
2230 goto err_free_tx;
2231
6396bb22
KC
2232 dst->tx_cq[t] = kcalloc(MAX_TX_RINGS,
2233 sizeof(struct mlx4_en_cq *),
2234 GFP_KERNEL);
67f8b1dc
TT
2235 if (!dst->tx_cq[t]) {
2236 kfree(dst->tx_ring[t]);
2237 goto err_free_tx;
2238 }
ec25bc04 2239 }
67f8b1dc 2240
ec25bc04 2241 return 0;
67f8b1dc
TT
2242
2243err_free_tx:
2244 while (t--) {
2245 kfree(dst->tx_ring[t]);
2246 kfree(dst->tx_cq[t]);
2247 }
2248 return -ENOMEM;
ec25bc04
EE
2249}
2250
2251static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
2252 struct mlx4_en_priv *src)
2253{
67f8b1dc 2254 int t;
ec25bc04
EE
2255 memcpy(dst->rx_ring, src->rx_ring,
2256 sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
2257 memcpy(dst->rx_cq, src->rx_cq,
2258 sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
2259 memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
2260 sizeof(dst->hwtstamp_config));
67f8b1dc
TT
2261 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2262 dst->tx_ring_num[t] = src->tx_ring_num[t];
2263 dst->tx_ring[t] = src->tx_ring[t];
2264 dst->tx_cq[t] = src->tx_cq[t];
2265 }
ec327f7a 2266 dst->num_tx_rings_p_up = src->num_tx_rings_p_up;
ec25bc04 2267 dst->rx_ring_num = src->rx_ring_num;
ec25bc04
EE
2268 memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
2269}
2270
2271int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
2272 struct mlx4_en_priv *tmp,
770f8225
MKL
2273 struct mlx4_en_port_profile *prof,
2274 bool carry_xdp_prog)
ec25bc04 2275{
770f8225
MKL
2276 struct bpf_prog *xdp_prog;
2277 int i, t;
67f8b1dc 2278
ec25bc04
EE
2279 mlx4_en_copy_priv(tmp, priv, prof);
2280
2281 if (mlx4_en_alloc_resources(tmp)) {
2282 en_warn(priv,
2283 "%s: Resource allocation failed, using previous configuration\n",
2284 __func__);
67f8b1dc
TT
2285 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
2286 kfree(tmp->tx_ring[t]);
2287 kfree(tmp->tx_cq[t]);
2288 }
ec25bc04
EE
2289 return -ENOMEM;
2290 }
770f8225
MKL
2291
2292 /* All rx_rings has the same xdp_prog. Pick the first one. */
2293 xdp_prog = rcu_dereference_protected(
2294 priv->rx_ring[0]->xdp_prog,
2295 lockdep_is_held(&priv->mdev->state_lock));
2296
2297 if (xdp_prog && carry_xdp_prog) {
85192dbf 2298 bpf_prog_add(xdp_prog, tmp->rx_ring_num);
770f8225
MKL
2299 for (i = 0; i < tmp->rx_ring_num; i++)
2300 rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
2301 xdp_prog);
2302 }
2303
ec25bc04
EE
2304 return 0;
2305}
2306
2307void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
2308 struct mlx4_en_priv *tmp)
2309{
2310 mlx4_en_free_resources(priv);
2311 mlx4_en_update_priv(priv, tmp);
2312}
2313
c27a02cd
YP
2314void mlx4_en_destroy_netdev(struct net_device *dev)
2315{
2316 struct mlx4_en_priv *priv = netdev_priv(dev);
2317 struct mlx4_en_dev *mdev = priv->mdev;
2318
453a6082 2319 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
c27a02cd
YP
2320
2321 /* Unregister device - this will close the port if it was up */
09d4d087
JP
2322 if (priv->registered) {
2323 devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
2324 priv->port));
b4353708 2325 unregister_netdev(dev);
09d4d087 2326 }
c27a02cd
YP
2327
2328 if (priv->allocated)
2329 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
2330
2331 cancel_delayed_work(&priv->stats_task);
b6c39bfc 2332 cancel_delayed_work(&priv->service_task);
c27a02cd
YP
2333 /* flush any pending task for this netdev */
2334 flush_workqueue(mdev->workqueue);
2335
90683061
EE
2336 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
2337 mlx4_en_remove_timestamp(mdev);
2338
c27a02cd
YP
2339 /* Detach the netdev so tasks would not attempt to access it */
2340 mutex_lock(&mdev->state_lock);
2341 mdev->pndev[priv->port] = NULL;
5da03547 2342 mdev->upper[priv->port] = NULL;
c27a02cd 2343
30f56e3c
EE
2344#ifdef CONFIG_RFS_ACCEL
2345 mlx4_en_cleanup_filters(priv);
2346#endif
2347
fe0af03c 2348 mlx4_en_free_resources(priv);
b6e01232 2349 mutex_unlock(&mdev->state_lock);
564c274c 2350
b4353708 2351 free_netdev(dev);
c27a02cd
YP
2352}
2353
b45f0674
MKL
2354static bool mlx4_en_check_xdp_mtu(struct net_device *dev, int mtu)
2355{
2356 struct mlx4_en_priv *priv = netdev_priv(dev);
2357
2358 if (mtu > MLX4_EN_MAX_XDP_MTU) {
2359 en_err(priv, "mtu:%d > max:%d when XDP prog is attached\n",
2360 mtu, MLX4_EN_MAX_XDP_MTU);
2361 return false;
2362 }
2363
2364 return true;
2365}
2366
c27a02cd
YP
2367static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
2368{
2369 struct mlx4_en_priv *priv = netdev_priv(dev);
2370 struct mlx4_en_dev *mdev = priv->mdev;
2371 int err = 0;
2372
453a6082 2373 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
c27a02cd
YP
2374 dev->mtu, new_mtu);
2375
b45f0674
MKL
2376 if (priv->tx_ring_num[TX_XDP] &&
2377 !mlx4_en_check_xdp_mtu(dev, new_mtu))
9f9b74ef 2378 return -EOPNOTSUPP;
b45f0674 2379
c27a02cd
YP
2380 dev->mtu = new_mtu;
2381
2382 if (netif_running(dev)) {
2383 mutex_lock(&mdev->state_lock);
2384 if (!mdev->device_up) {
2385 /* NIC is probably restarting - let watchdog task reset
2386 * the port */
453a6082 2387 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
c27a02cd 2388 } else {
3484aac1 2389 mlx4_en_stop_port(dev, 1);
c27a02cd
YP
2390 err = mlx4_en_start_port(dev);
2391 if (err) {
453a6082 2392 en_err(priv, "Failed restarting port:%d\n",
c27a02cd
YP
2393 priv->port);
2394 queue_work(mdev->workqueue, &priv->watchdog_task);
2395 }
2396 }
2397 mutex_unlock(&mdev->state_lock);
2398 }
2399 return 0;
2400}
2401
100dbda8 2402static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
ec693d47
AV
2403{
2404 struct mlx4_en_priv *priv = netdev_priv(dev);
2405 struct mlx4_en_dev *mdev = priv->mdev;
2406 struct hwtstamp_config config;
2407
2408 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2409 return -EFAULT;
2410
2411 /* reserved for future extensions */
2412 if (config.flags)
2413 return -EINVAL;
2414
2415 /* device doesn't support time stamping */
2416 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
2417 return -EINVAL;
2418
2419 /* TX HW timestamp */
2420 switch (config.tx_type) {
2421 case HWTSTAMP_TX_OFF:
2422 case HWTSTAMP_TX_ON:
2423 break;
2424 default:
2425 return -ERANGE;
2426 }
2427
2428 /* RX HW timestamp */
2429 switch (config.rx_filter) {
2430 case HWTSTAMP_FILTER_NONE:
2431 break;
2432 case HWTSTAMP_FILTER_ALL:
2433 case HWTSTAMP_FILTER_SOME:
2434 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2435 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2436 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2437 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2438 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2439 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2440 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2441 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2442 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2443 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2444 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2445 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
e3412575 2446 case HWTSTAMP_FILTER_NTP_ALL:
ec693d47
AV
2447 config.rx_filter = HWTSTAMP_FILTER_ALL;
2448 break;
2449 default:
2450 return -ERANGE;
2451 }
2452
7787fa66 2453 if (mlx4_en_reset_config(dev, config, dev->features)) {
ec693d47
AV
2454 config.tx_type = HWTSTAMP_TX_OFF;
2455 config.rx_filter = HWTSTAMP_FILTER_NONE;
2456 }
2457
2458 return copy_to_user(ifr->ifr_data, &config,
2459 sizeof(config)) ? -EFAULT : 0;
2460}
2461
100dbda8
BH
2462static int mlx4_en_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2463{
2464 struct mlx4_en_priv *priv = netdev_priv(dev);
2465
2466 return copy_to_user(ifr->ifr_data, &priv->hwtstamp_config,
2467 sizeof(priv->hwtstamp_config)) ? -EFAULT : 0;
2468}
2469
ec693d47
AV
2470static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2471{
2472 switch (cmd) {
2473 case SIOCSHWTSTAMP:
100dbda8
BH
2474 return mlx4_en_hwtstamp_set(dev, ifr);
2475 case SIOCGHWTSTAMP:
2476 return mlx4_en_hwtstamp_get(dev, ifr);
ec693d47
AV
2477 default:
2478 return -EOPNOTSUPP;
2479 }
2480}
2481
e38af4fa
HHZ
2482static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
2483 netdev_features_t features)
2484{
2485 struct mlx4_en_priv *en_priv = netdev_priv(netdev);
2486 struct mlx4_en_dev *mdev = en_priv->mdev;
2487
2488 /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
2489 * enable/disable make sure S-TAG flag is always in same state as
2490 * C-TAG.
2491 */
2492 if (features & NETIF_F_HW_VLAN_CTAG_RX &&
2493 !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
2494 features |= NETIF_F_HW_VLAN_STAG_RX;
2495 else
2496 features &= ~NETIF_F_HW_VLAN_STAG_RX;
2497
2498 return features;
2499}
2500
60d6fe99
AV
2501static int mlx4_en_set_features(struct net_device *netdev,
2502 netdev_features_t features)
2503{
2504 struct mlx4_en_priv *priv = netdev_priv(netdev);
f0df3503 2505 bool reset = false;
537f6f95
SM
2506 int ret = 0;
2507
f0df3503
MM
2508 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXFCS)) {
2509 en_info(priv, "Turn %s RX-FCS\n",
2510 (features & NETIF_F_RXFCS) ? "ON" : "OFF");
2511 reset = true;
2512 }
2513
78500b8c
MM
2514 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_RXALL)) {
2515 u8 ignore_fcs_value = (features & NETIF_F_RXALL) ? 1 : 0;
2516
2517 en_info(priv, "Turn %s RX-ALL\n",
2518 ignore_fcs_value ? "ON" : "OFF");
2519 ret = mlx4_SET_PORT_fcs_check(priv->mdev->dev,
2520 priv->port, ignore_fcs_value);
2521 if (ret)
2522 return ret;
2523 }
2524
537f6f95
SM
2525 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
2526 en_info(priv, "Turn %s RX vlan strip offload\n",
2527 (features & NETIF_F_HW_VLAN_CTAG_RX) ? "ON" : "OFF");
f0df3503 2528 reset = true;
537f6f95 2529 }
60d6fe99 2530
cfb53f36
IS
2531 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
2532 en_info(priv, "Turn %s TX vlan strip offload\n",
2533 (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
2534
e38af4fa
HHZ
2535 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
2536 en_info(priv, "Turn %s TX S-VLAN strip offload\n",
2537 (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
2538
241a08c3
IS
2539 if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
2540 en_info(priv, "Turn %s loopback\n",
2541 (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
2542 mlx4_en_update_loopback_state(netdev, features);
2543 }
79aeaccd 2544
f0df3503
MM
2545 if (reset) {
2546 ret = mlx4_en_reset_config(netdev, priv->hwtstamp_config,
2547 features);
2548 if (ret)
2549 return ret;
2550 }
60d6fe99 2551
f0df3503 2552 return 0;
60d6fe99
AV
2553}
2554
8f7ba3ca
RE
2555static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2556{
2557 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2558 struct mlx4_en_dev *mdev = en_priv->mdev;
8f7ba3ca 2559
745d8ae4 2560 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac);
8f7ba3ca
RE
2561}
2562
79aab093
MS
2563static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
2564 __be16 vlan_proto)
3f7fb021
RE
2565{
2566 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2567 struct mlx4_en_dev *mdev = en_priv->mdev;
2568
b42959dc
MS
2569 return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos,
2570 vlan_proto);
3f7fb021
RE
2571}
2572
cda373f4
IS
2573static int mlx4_en_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
2574 int max_tx_rate)
2575{
2576 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2577 struct mlx4_en_dev *mdev = en_priv->mdev;
2578
2579 return mlx4_set_vf_rate(mdev->dev, en_priv->port, vf, min_tx_rate,
2580 max_tx_rate);
2581}
2582
e6b6a231
RE
2583static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2584{
2585 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2586 struct mlx4_en_dev *mdev = en_priv->mdev;
2587
2588 return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
2589}
2590
2cccb9e4
RE
2591static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
2592{
2593 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2594 struct mlx4_en_dev *mdev = en_priv->mdev;
2595
2596 return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
2597}
8f7ba3ca 2598
948e306d
RE
2599static int mlx4_en_set_vf_link_state(struct net_device *dev, int vf, int link_state)
2600{
2601 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2602 struct mlx4_en_dev *mdev = en_priv->mdev;
2603
2604 return mlx4_set_vf_link_state(mdev->dev, en_priv->port, vf, link_state);
2605}
84c86403 2606
62a89055
EBE
2607static int mlx4_en_get_vf_stats(struct net_device *dev, int vf,
2608 struct ifla_vf_stats *vf_stats)
2609{
2610 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2611 struct mlx4_en_dev *mdev = en_priv->mdev;
2612
2613 return mlx4_get_vf_stats(mdev->dev, en_priv->port, vf, vf_stats);
2614}
2615
84c86403
HHZ
2616#define PORT_ID_BYTE_LEN 8
2617static int mlx4_en_get_phys_port_id(struct net_device *dev,
02637fce 2618 struct netdev_phys_item_id *ppid)
84c86403
HHZ
2619{
2620 struct mlx4_en_priv *priv = netdev_priv(dev);
2621 struct mlx4_dev *mdev = priv->mdev->dev;
2622 int i;
2623 u64 phys_port_id = mdev->caps.phys_port_id[priv->port];
2624
2625 if (!phys_port_id)
2626 return -EOPNOTSUPP;
2627
2628 ppid->id_len = sizeof(phys_port_id);
2629 for (i = PORT_ID_BYTE_LEN - 1; i >= 0; --i) {
2630 ppid->id[i] = phys_port_id & 0xff;
2631 phys_port_id >>= 8;
2632 }
2633 return 0;
2634}
2635
1b136de1
OG
2636static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2637{
2638 int ret;
2639 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2640 vxlan_add_task);
2641
2642 ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
2643 if (ret)
2644 goto out;
2645
2646 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2647 VXLAN_STEER_BY_OUTER_MAC, 1);
2648out:
f4a1edd5 2649 if (ret) {
1b136de1 2650 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
f4a1edd5
OG
2651 return;
2652 }
1b136de1
OG
2653}
2654
2655static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2656{
2657 int ret;
2658 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2659 vxlan_del_task);
1b136de1
OG
2660 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2661 VXLAN_STEER_BY_OUTER_MAC, 0);
2662 if (ret)
2663 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2664
2665 priv->vxlan_port = 0;
2666}
2667
2668static void mlx4_en_add_vxlan_port(struct net_device *dev,
a831274a 2669 struct udp_tunnel_info *ti)
1b136de1
OG
2670{
2671 struct mlx4_en_priv *priv = netdev_priv(dev);
a831274a 2672 __be16 port = ti->port;
1b136de1
OG
2673 __be16 current_port;
2674
a831274a 2675 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
1b136de1
OG
2676 return;
2677
a831274a
AD
2678 if (ti->sa_family != AF_INET)
2679 return;
2680
2681 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1b136de1
OG
2682 return;
2683
2684 current_port = priv->vxlan_port;
2685 if (current_port && current_port != port) {
2686 en_warn(priv, "vxlan port %d configured, can't add port %d\n",
2687 ntohs(current_port), ntohs(port));
2688 return;
2689 }
2690
2691 priv->vxlan_port = port;
2692 queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
2693}
2694
2695static void mlx4_en_del_vxlan_port(struct net_device *dev,
a831274a 2696 struct udp_tunnel_info *ti)
1b136de1
OG
2697{
2698 struct mlx4_en_priv *priv = netdev_priv(dev);
a831274a 2699 __be16 port = ti->port;
1b136de1
OG
2700 __be16 current_port;
2701
a831274a 2702 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
1b136de1
OG
2703 return;
2704
a831274a
AD
2705 if (ti->sa_family != AF_INET)
2706 return;
2707
2708 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1b136de1
OG
2709 return;
2710
2711 current_port = priv->vxlan_port;
2712 if (current_port != port) {
2713 en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
2714 return;
2715 }
2716
2717 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2718}
956bdab2 2719
5f35227e
JG
2720static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
2721 struct net_device *dev,
2722 netdev_features_t features)
956bdab2 2723{
8cb65d00 2724 features = vlan_features_check(skb, features);
09067122
AD
2725 features = vxlan_features_check(skb, features);
2726
2727 /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
2728 * support inner IPv6 checksums and segmentation so we need to
2729 * strip that feature if this is an IPv6 encapsulated frame.
2730 */
2731 if (skb->encapsulation &&
a547224d
AD
2732 (skb->ip_summed == CHECKSUM_PARTIAL)) {
2733 struct mlx4_en_priv *priv = netdev_priv(dev);
2734
2735 if (!priv->vxlan_port ||
2736 (ip_hdr(skb)->version != 4) ||
2737 (udp_hdr(skb)->dest != priv->vxlan_port))
2738 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2739 }
09067122
AD
2740
2741 return features;
956bdab2 2742}
1b136de1 2743
de1cf8a7 2744static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
c10e4fc6
OG
2745{
2746 struct mlx4_en_priv *priv = netdev_priv(dev);
67f8b1dc 2747 struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
c10e4fc6
OG
2748 struct mlx4_update_qp_params params;
2749 int err;
2750
2751 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT))
2752 return -EOPNOTSUPP;
2753
2754 /* rate provided to us in Mbs, check if it fits into 12 bits, if not use Gbs */
2755 if (maxrate >> 12) {
2756 params.rate_unit = MLX4_QP_RATE_LIMIT_GBS;
2757 params.rate_val = maxrate / 1000;
2758 } else if (maxrate) {
2759 params.rate_unit = MLX4_QP_RATE_LIMIT_MBS;
2760 params.rate_val = maxrate;
2761 } else { /* zero serves to revoke the QP rate-limitation */
2762 params.rate_unit = 0;
2763 params.rate_val = 0;
2764 }
2765
2766 err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
2767 &params);
2768 return err;
2769}
2770
47a38e15
BB
2771static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
2772{
2773 struct mlx4_en_priv *priv = netdev_priv(dev);
d576acf0 2774 struct mlx4_en_dev *mdev = priv->mdev;
67f8b1dc 2775 struct mlx4_en_port_profile new_prof;
47a38e15 2776 struct bpf_prog *old_prog;
67f8b1dc
TT
2777 struct mlx4_en_priv *tmp;
2778 int tx_changed = 0;
47a38e15 2779 int xdp_ring_num;
d576acf0
BB
2780 int port_up = 0;
2781 int err;
47a38e15
BB
2782 int i;
2783
67f8b1dc 2784 xdp_ring_num = prog ? priv->rx_ring_num : 0;
47a38e15 2785
d576acf0
BB
2786 /* No need to reconfigure buffers when simply swapping the
2787 * program for a new one.
2788 */
67f8b1dc 2789 if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
85192dbf
AN
2790 if (prog)
2791 bpf_prog_add(prog, priv->rx_ring_num - 1);
2792
326fe02d 2793 mutex_lock(&mdev->state_lock);
d576acf0 2794 for (i = 0; i < priv->rx_ring_num; i++) {
326fe02d
BB
2795 old_prog = rcu_dereference_protected(
2796 priv->rx_ring[i]->xdp_prog,
2797 lockdep_is_held(&mdev->state_lock));
2798 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
d576acf0
BB
2799 if (old_prog)
2800 bpf_prog_put(old_prog);
2801 }
326fe02d 2802 mutex_unlock(&mdev->state_lock);
d576acf0
BB
2803 return 0;
2804 }
2805
b45f0674 2806 if (!mlx4_en_check_xdp_mtu(dev, dev->mtu))
47a38e15 2807 return -EOPNOTSUPP;
47a38e15 2808
67f8b1dc
TT
2809 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2810 if (!tmp)
2811 return -ENOMEM;
9ecc2d86 2812
85192dbf
AN
2813 if (prog)
2814 bpf_prog_add(prog, priv->rx_ring_num - 1);
47a38e15 2815
d576acf0 2816 mutex_lock(&mdev->state_lock);
67f8b1dc
TT
2817 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
2818 new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
2819
2820 if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
2821 tx_changed = 1;
2822 new_prof.tx_ring_num[TX] =
f21ad614 2823 MAX_TX_RINGS - ALIGN(xdp_ring_num, priv->prof->num_up);
67f8b1dc
TT
2824 en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
2825 }
2826
770f8225 2827 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
c540594f
DB
2828 if (err) {
2829 if (prog)
2830 bpf_prog_sub(prog, priv->rx_ring_num - 1);
67f8b1dc 2831 goto unlock_out;
c540594f 2832 }
67f8b1dc 2833
d576acf0
BB
2834 if (priv->port_up) {
2835 port_up = 1;
2836 mlx4_en_stop_port(dev, 1);
2837 }
2838
67f8b1dc
TT
2839 mlx4_en_safe_replace_resources(priv, tmp);
2840 if (tx_changed)
2841 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
47a38e15 2842
47a38e15 2843 for (i = 0; i < priv->rx_ring_num; i++) {
326fe02d
BB
2844 old_prog = rcu_dereference_protected(
2845 priv->rx_ring[i]->xdp_prog,
2846 lockdep_is_held(&mdev->state_lock));
2847 rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
47a38e15
BB
2848 if (old_prog)
2849 bpf_prog_put(old_prog);
2850 }
2851
d576acf0
BB
2852 if (port_up) {
2853 err = mlx4_en_start_port(dev);
2854 if (err) {
2855 en_err(priv, "Failed starting port %d for XDP change\n",
2856 priv->port);
2857 queue_work(mdev->workqueue, &priv->watchdog_task);
2858 }
2859 }
2860
67f8b1dc 2861unlock_out:
d576acf0 2862 mutex_unlock(&mdev->state_lock);
67f8b1dc
TT
2863 kfree(tmp);
2864 return err;
47a38e15
BB
2865}
2866
2e37e9b0 2867static u32 mlx4_xdp_query(struct net_device *dev)
47a38e15
BB
2868{
2869 struct mlx4_en_priv *priv = netdev_priv(dev);
2e37e9b0
MKL
2870 struct mlx4_en_dev *mdev = priv->mdev;
2871 const struct bpf_prog *xdp_prog;
2872 u32 prog_id = 0;
2873
2874 if (!priv->tx_ring_num[TX_XDP])
2875 return prog_id;
2876
2877 mutex_lock(&mdev->state_lock);
2878 xdp_prog = rcu_dereference_protected(
2879 priv->rx_ring[0]->xdp_prog,
2880 lockdep_is_held(&mdev->state_lock));
2881 if (xdp_prog)
2882 prog_id = xdp_prog->aux->id;
2883 mutex_unlock(&mdev->state_lock);
47a38e15 2884
2e37e9b0 2885 return prog_id;
47a38e15
BB
2886}
2887
f4e63525 2888static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp)
47a38e15
BB
2889{
2890 switch (xdp->command) {
2891 case XDP_SETUP_PROG:
2892 return mlx4_xdp_set(dev, xdp->prog);
2893 case XDP_QUERY_PROG:
2e37e9b0 2894 xdp->prog_id = mlx4_xdp_query(dev);
47a38e15
BB
2895 return 0;
2896 default:
2897 return -EINVAL;
2898 }
2899}
2900
3addc568
SH
2901static const struct net_device_ops mlx4_netdev_ops = {
2902 .ndo_open = mlx4_en_open,
2903 .ndo_stop = mlx4_en_close,
2904 .ndo_start_xmit = mlx4_en_xmit,
f813cad8 2905 .ndo_select_queue = mlx4_en_select_queue,
9ed17db1 2906 .ndo_get_stats64 = mlx4_en_get_stats64,
0eb74fdd 2907 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
3addc568 2908 .ndo_set_mac_address = mlx4_en_set_mac,
52255bbe 2909 .ndo_validate_addr = eth_validate_addr,
3addc568 2910 .ndo_change_mtu = mlx4_en_change_mtu,
ec693d47 2911 .ndo_do_ioctl = mlx4_en_ioctl,
3addc568 2912 .ndo_tx_timeout = mlx4_en_tx_timeout,
3addc568
SH
2913 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2914 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
60d6fe99 2915 .ndo_set_features = mlx4_en_set_features,
e38af4fa 2916 .ndo_fix_features = mlx4_en_fix_features,
e4c6734e 2917 .ndo_setup_tc = __mlx4_en_setup_tc,
1eb8c695
AV
2918#ifdef CONFIG_RFS_ACCEL
2919 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
9e77a2b8 2920#endif
84c86403 2921 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
a831274a
AD
2922 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2923 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
5f35227e 2924 .ndo_features_check = mlx4_en_features_check,
c10e4fc6 2925 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
f4e63525 2926 .ndo_bpf = mlx4_xdp,
3addc568
SH
2927};
2928
8f7ba3ca
RE
2929static const struct net_device_ops mlx4_netdev_ops_master = {
2930 .ndo_open = mlx4_en_open,
2931 .ndo_stop = mlx4_en_close,
2932 .ndo_start_xmit = mlx4_en_xmit,
2933 .ndo_select_queue = mlx4_en_select_queue,
9ed17db1 2934 .ndo_get_stats64 = mlx4_en_get_stats64,
8f7ba3ca
RE
2935 .ndo_set_rx_mode = mlx4_en_set_rx_mode,
2936 .ndo_set_mac_address = mlx4_en_set_mac,
2937 .ndo_validate_addr = eth_validate_addr,
2938 .ndo_change_mtu = mlx4_en_change_mtu,
2939 .ndo_tx_timeout = mlx4_en_tx_timeout,
2940 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2941 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2942 .ndo_set_vf_mac = mlx4_en_set_vf_mac,
3f7fb021 2943 .ndo_set_vf_vlan = mlx4_en_set_vf_vlan,
cda373f4 2944 .ndo_set_vf_rate = mlx4_en_set_vf_rate,
e6b6a231 2945 .ndo_set_vf_spoofchk = mlx4_en_set_vf_spoofchk,
948e306d 2946 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
62a89055 2947 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2cccb9e4 2948 .ndo_get_vf_config = mlx4_en_get_vf_config,
8f7ba3ca 2949 .ndo_set_features = mlx4_en_set_features,
e38af4fa 2950 .ndo_fix_features = mlx4_en_fix_features,
e4c6734e 2951 .ndo_setup_tc = __mlx4_en_setup_tc,
8f7ba3ca
RE
2952#ifdef CONFIG_RFS_ACCEL
2953 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2954#endif
84c86403 2955 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
a831274a
AD
2956 .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
2957 .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
5f35227e 2958 .ndo_features_check = mlx4_en_features_check,
c10e4fc6 2959 .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
f4e63525 2960 .ndo_bpf = mlx4_xdp,
8f7ba3ca
RE
2961};
2962
5da03547
MS
2963struct mlx4_en_bond {
2964 struct work_struct work;
2965 struct mlx4_en_priv *priv;
2966 int is_bonded;
2967 struct mlx4_port_map port_map;
2968};
2969
2970static void mlx4_en_bond_work(struct work_struct *work)
2971{
2972 struct mlx4_en_bond *bond = container_of(work,
2973 struct mlx4_en_bond,
2974 work);
2975 int err = 0;
2976 struct mlx4_dev *dev = bond->priv->mdev->dev;
2977
2978 if (bond->is_bonded) {
2979 if (!mlx4_is_bonded(dev)) {
2980 err = mlx4_bond(dev);
2981 if (err)
2982 en_err(bond->priv, "Fail to bond device\n");
2983 }
2984 if (!err) {
2985 err = mlx4_port_map_set(dev, &bond->port_map);
2986 if (err)
2987 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2988 bond->port_map.port1,
2989 bond->port_map.port2,
2990 err);
2991 }
2992 } else if (mlx4_is_bonded(dev)) {
2993 err = mlx4_unbond(dev);
2994 if (err)
2995 en_err(bond->priv, "Fail to unbond device\n");
2996 }
2997 dev_put(bond->priv->dev);
2998 kfree(bond);
2999}
3000
3001static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
3002 u8 v2p_p1, u8 v2p_p2)
3003{
3004 struct mlx4_en_bond *bond = NULL;
3005
3006 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
3007 if (!bond)
3008 return -ENOMEM;
3009
3010 INIT_WORK(&bond->work, mlx4_en_bond_work);
3011 bond->priv = priv;
3012 bond->is_bonded = is_bonded;
3013 bond->port_map.port1 = v2p_p1;
3014 bond->port_map.port2 = v2p_p2;
3015 dev_hold(priv->dev);
3016 queue_work(priv->mdev->workqueue, &bond->work);
3017 return 0;
3018}
3019
3020int mlx4_en_netdev_event(struct notifier_block *this,
3021 unsigned long event, void *ptr)
3022{
3023 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3024 u8 port = 0;
3025 struct mlx4_en_dev *mdev;
3026 struct mlx4_dev *dev;
3027 int i, num_eth_ports = 0;
3028 bool do_bond = true;
3029 struct mlx4_en_priv *priv;
3030 u8 v2p_port1 = 0;
3031 u8 v2p_port2 = 0;
3032
3033 if (!net_eq(dev_net(ndev), &init_net))
3034 return NOTIFY_DONE;
3035
3036 mdev = container_of(this, struct mlx4_en_dev, nb);
3037 dev = mdev->dev;
3038
3039 /* Go into this mode only when two network devices set on two ports
3040 * of the same mlx4 device are slaves of the same bonding master
3041 */
3042 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
3043 ++num_eth_ports;
3044 if (!port && (mdev->pndev[i] == ndev))
3045 port = i;
3046 mdev->upper[i] = mdev->pndev[i] ?
3047 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
3048 /* condition not met: network device is a slave */
3049 if (!mdev->upper[i])
3050 do_bond = false;
3051 if (num_eth_ports < 2)
3052 continue;
3053 /* condition not met: same master */
3054 if (mdev->upper[i] != mdev->upper[i-1])
3055 do_bond = false;
3056 }
3057 /* condition not met: 2 salves */
3058 do_bond = (num_eth_ports == 2) ? do_bond : false;
3059
3060 /* handle only events that come with enough info */
3061 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
3062 return NOTIFY_DONE;
3063
3064 priv = netdev_priv(ndev);
3065 if (do_bond) {
3066 struct netdev_notifier_bonding_info *notifier_info = ptr;
3067 struct netdev_bonding_info *bonding_info =
3068 &notifier_info->bonding_info;
3069
3070 /* required mode 1, 2 or 4 */
3071 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
3072 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
3073 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
3074 do_bond = false;
3075
3076 /* require exactly 2 slaves */
3077 if (bonding_info->master.num_slaves != 2)
3078 do_bond = false;
3079
3080 /* calc v2p */
3081 if (do_bond) {
3082 if (bonding_info->master.bond_mode ==
3083 BOND_MODE_ACTIVEBACKUP) {
3084 /* in active-backup mode virtual ports are
3085 * mapped to the physical port of the active
3086 * slave */
3087 if (bonding_info->slave.state ==
3088 BOND_STATE_BACKUP) {
3089 if (port == 1) {
3090 v2p_port1 = 2;
3091 v2p_port2 = 2;
3092 } else {
3093 v2p_port1 = 1;
3094 v2p_port2 = 1;
3095 }
3096 } else { /* BOND_STATE_ACTIVE */
3097 if (port == 1) {
3098 v2p_port1 = 1;
3099 v2p_port2 = 1;
3100 } else {
3101 v2p_port1 = 2;
3102 v2p_port2 = 2;
3103 }
3104 }
3105 } else { /* Active-Active */
3106 /* in active-active mode a virtual port is
3107 * mapped to the native physical port if and only
3108 * if the physical port is up */
3109 __s8 link = bonding_info->slave.link;
3110
3111 if (port == 1)
3112 v2p_port2 = 2;
3113 else
3114 v2p_port1 = 1;
3115 if ((link == BOND_LINK_UP) ||
3116 (link == BOND_LINK_FAIL)) {
3117 if (port == 1)
3118 v2p_port1 = 1;
3119 else
3120 v2p_port2 = 2;
3121 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
3122 if (port == 1)
3123 v2p_port1 = 2;
3124 else
3125 v2p_port2 = 1;
3126 }
3127 }
3128 }
3129 }
3130
3131 mlx4_en_queue_bond_work(priv, do_bond,
3132 v2p_port1, v2p_port2);
3133
3134 return NOTIFY_DONE;
3135}
3136
0b131561
MB
3137void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
3138 struct mlx4_en_stats_bitmap *stats_bitmap,
3139 u8 rx_ppp, u8 rx_pause,
3140 u8 tx_ppp, u8 tx_pause)
3141{
b42de4d0 3142 int last_i = NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PF_STATS;
0b131561
MB
3143
3144 if (!mlx4_is_slave(dev) &&
3145 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN)) {
3146 mutex_lock(&stats_bitmap->mutex);
3147 bitmap_clear(stats_bitmap->bitmap, last_i, NUM_FLOW_STATS);
3148
3149 if (rx_ppp)
3150 bitmap_set(stats_bitmap->bitmap, last_i,
3151 NUM_FLOW_PRIORITY_STATS_RX);
3152 last_i += NUM_FLOW_PRIORITY_STATS_RX;
3153
3154 if (rx_pause && !(rx_ppp))
3155 bitmap_set(stats_bitmap->bitmap, last_i,
3156 NUM_FLOW_STATS_RX);
3157 last_i += NUM_FLOW_STATS_RX;
3158
3159 if (tx_ppp)
3160 bitmap_set(stats_bitmap->bitmap, last_i,
3161 NUM_FLOW_PRIORITY_STATS_TX);
3162 last_i += NUM_FLOW_PRIORITY_STATS_TX;
3163
3164 if (tx_pause && !(tx_ppp))
3165 bitmap_set(stats_bitmap->bitmap, last_i,
3166 NUM_FLOW_STATS_TX);
3167 last_i += NUM_FLOW_STATS_TX;
3168
3169 mutex_unlock(&stats_bitmap->mutex);
3170 }
3171}
3172
6fcd2735 3173void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
0b131561
MB
3174 struct mlx4_en_stats_bitmap *stats_bitmap,
3175 u8 rx_ppp, u8 rx_pause,
3176 u8 tx_ppp, u8 tx_pause)
ffa88f37 3177{
6fcd2735
EBE
3178 int last_i = 0;
3179
3da8a36c
EBE
3180 mutex_init(&stats_bitmap->mutex);
3181 bitmap_zero(stats_bitmap->bitmap, NUM_ALL_STATS);
6fcd2735
EBE
3182
3183 if (mlx4_is_slave(dev)) {
3da8a36c 3184 bitmap_set(stats_bitmap->bitmap, last_i +
6fcd2735 3185 MLX4_FIND_NETDEV_STAT(rx_packets), 1);
3da8a36c 3186 bitmap_set(stats_bitmap->bitmap, last_i +
6fcd2735 3187 MLX4_FIND_NETDEV_STAT(tx_packets), 1);
3da8a36c 3188 bitmap_set(stats_bitmap->bitmap, last_i +
6fcd2735 3189 MLX4_FIND_NETDEV_STAT(rx_bytes), 1);
3da8a36c 3190 bitmap_set(stats_bitmap->bitmap, last_i +
6fcd2735 3191 MLX4_FIND_NETDEV_STAT(tx_bytes), 1);
3da8a36c 3192 bitmap_set(stats_bitmap->bitmap, last_i +
6fcd2735 3193 MLX4_FIND_NETDEV_STAT(rx_dropped), 1);
3da8a36c 3194 bitmap_set(stats_bitmap->bitmap, last_i +
6fcd2735
EBE
3195 MLX4_FIND_NETDEV_STAT(tx_dropped), 1);
3196 } else {
3da8a36c 3197 bitmap_set(stats_bitmap->bitmap, last_i, NUM_MAIN_STATS);
ffa88f37 3198 }
6fcd2735 3199 last_i += NUM_MAIN_STATS;
ffa88f37 3200
3da8a36c 3201 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PORT_STATS);
6fcd2735 3202 last_i += NUM_PORT_STATS;
ffa88f37 3203
b42de4d0
EBE
3204 if (mlx4_is_master(dev))
3205 bitmap_set(stats_bitmap->bitmap, last_i,
3206 NUM_PF_STATS);
3207 last_i += NUM_PF_STATS;
3208
0b131561
MB
3209 mlx4_en_update_pfc_stats_bitmap(dev, stats_bitmap,
3210 rx_ppp, rx_pause,
3211 tx_ppp, tx_pause);
3212 last_i += NUM_FLOW_STATS;
3213
6fcd2735 3214 if (!mlx4_is_slave(dev))
3da8a36c 3215 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PKT_STATS);
15fca2c8
TT
3216 last_i += NUM_PKT_STATS;
3217
3218 bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
3219 last_i += NUM_XDP_STATS;
f26d0d25
EBE
3220
3221 if (!mlx4_is_slave(dev))
3222 bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
3223 last_i += NUM_PHY_STATS;
ffa88f37
EBE
3224}
3225
c27a02cd
YP
3226int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
3227 struct mlx4_en_port_profile *prof)
3228{
3229 struct net_device *dev;
3230 struct mlx4_en_priv *priv;
67f8b1dc 3231 int i, t;
c27a02cd
YP
3232 int err;
3233
f1593d22 3234 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
d317966b 3235 MAX_TX_RINGS, MAX_RX_RINGS);
41de8d4c 3236 if (dev == NULL)
c27a02cd 3237 return -ENOMEM;
c27a02cd 3238
67f8b1dc 3239 netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
d317966b
AV
3240 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
3241
872bf2fb 3242 SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
76a066f2 3243 dev->dev_port = port - 1;
c27a02cd
YP
3244
3245 /*
3246 * Initialize driver private data
3247 */
3248
3249 priv = netdev_priv(dev);
3250 memset(priv, 0, sizeof(struct mlx4_en_priv));
6de5f7f6 3251 priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
207af6c5
EE
3252 spin_lock_init(&priv->stats_lock);
3253 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
3254 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
3255 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
3256 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
3257 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
207af6c5
EE
3258 INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
3259 INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
207af6c5
EE
3260#ifdef CONFIG_RFS_ACCEL
3261 INIT_LIST_HEAD(&priv->filters);
3262 spin_lock_init(&priv->filters_lock);
3263#endif
3264
c27a02cd
YP
3265 priv->dev = dev;
3266 priv->mdev = mdev;
ebf8c9aa 3267 priv->ddev = &mdev->pdev->dev;
c27a02cd
YP
3268 priv->prof = prof;
3269 priv->port = port;
3270 priv->port_up = false;
c27a02cd 3271 priv->flags = prof->flags;
0fef9d03 3272 priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
60d6fe99
AV
3273 priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
3274 MLX4_WQE_CTRL_SOLICITED);
7e1dc5e9 3275 priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
fbc6daf1 3276 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
bd635c35 3277 netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
d317966b 3278
67f8b1dc
TT
3279 for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
3280 priv->tx_ring_num[t] = prof->tx_ring_num[t];
3281 if (!priv->tx_ring_num[t])
3282 continue;
3283
6396bb22
KC
3284 priv->tx_ring[t] = kcalloc(MAX_TX_RINGS,
3285 sizeof(struct mlx4_en_tx_ring *),
3286 GFP_KERNEL);
67f8b1dc
TT
3287 if (!priv->tx_ring[t]) {
3288 err = -ENOMEM;
a577d868 3289 goto out;
67f8b1dc 3290 }
6396bb22
KC
3291 priv->tx_cq[t] = kcalloc(MAX_TX_RINGS,
3292 sizeof(struct mlx4_en_cq *),
3293 GFP_KERNEL);
67f8b1dc 3294 if (!priv->tx_cq[t]) {
67f8b1dc
TT
3295 err = -ENOMEM;
3296 goto out;
3297 }
bc6a4744 3298 }
c27a02cd 3299 priv->rx_ring_num = prof->rx_ring_num;
08ff3235 3300 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
b1b6b4da 3301 priv->cqe_size = mdev->dev->caps.cqe_size;
c27a02cd
YP
3302 priv->mac_index = -1;
3303 priv->msg_enable = MLX4_EN_MSG_LEVEL;
564c274c 3304#ifdef CONFIG_MLX4_EN_DCB
540b3a39 3305 if (!mlx4_is_slave(priv->mdev->dev)) {
a42b63c1
MS
3306 u8 prio;
3307
3308 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) {
3309 priv->ets.prio_tc[prio] = prio;
3310 priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR;
3311 }
3312
564ed9b1
TT
3313 priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
3314 DCB_CAP_DCBX_VER_IEEE;
af7d5185 3315 priv->flags |= MLX4_EN_DCB_ENABLED;
564ed9b1 3316 priv->cee_config.pfc_state = false;
af7d5185 3317
f21ad614 3318 for (i = 0; i < MLX4_EN_NUM_UP_HIGH; i++)
564ed9b1 3319 priv->cee_config.dcb_pfc[i] = pfc_disabled;
af7d5185 3320
3742cc65 3321 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
540b3a39
OG
3322 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
3323 } else {
3324 en_info(priv, "enabling only PFC DCB ops\n");
3325 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
3326 }
3327 }
564c274c 3328#endif
c27a02cd 3329
c07cb4b0
YB
3330 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
3331 INIT_HLIST_HEAD(&priv->mac_hash[i]);
16a10ffd 3332
c27a02cd
YP
3333 /* Query for default mac and max mtu */
3334 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
6bbb6d99 3335
f8c6455b
SM
3336 if (mdev->dev->caps.rx_checksum_flags_port[priv->port] &
3337 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP)
3338 priv->flags |= MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP;
3339
6bbb6d99
YB
3340 /* Set default MAC */
3341 dev->addr_len = ETH_ALEN;
3342 mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
3343 if (!is_valid_ether_addr(dev->dev_addr)) {
21d2cb49 3344 en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
2b3ddf27
JM
3345 priv->port, dev->dev_addr);
3346 err = -EINVAL;
3347 goto out;
3348 } else if (mlx4_is_slave(priv->mdev->dev) &&
3349 (priv->mdev->dev->port_random_macs & 1 << priv->port)) {
3350 /* Random MAC was assigned in mlx4_slave_cap
3351 * in mlx4_core module
3352 */
3353 dev->addr_assign_type |= NET_ADDR_RANDOM;
3354 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
c27a02cd
YP
3355 }
3356
2695bab2 3357 memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
6bbb6d99 3358
c27a02cd
YP
3359 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
3360 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
3361 err = mlx4_en_alloc_resources(priv);
3362 if (err)
3363 goto out;
3364
ec693d47
AV
3365 /* Initialize time stamping config */
3366 priv->hwtstamp_config.flags = 0;
3367 priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
3368 priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3369
c27a02cd
YP
3370 /* Allocate page for receive rings */
3371 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
73898db0 3372 MLX4_EN_PAGE_SIZE);
c27a02cd 3373 if (err) {
453a6082 3374 en_err(priv, "Failed to allocate page for rx qps\n");
c27a02cd
YP
3375 goto out;
3376 }
3377 priv->allocated = 1;
3378
c27a02cd
YP
3379 /*
3380 * Initialize netdev entry points
3381 */
8f7ba3ca
RE
3382 if (mlx4_is_master(priv->mdev->dev))
3383 dev->netdev_ops = &mlx4_netdev_ops_master;
3384 else
3385 dev->netdev_ops = &mlx4_netdev_ops;
c27a02cd 3386 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
67f8b1dc 3387 netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
1eb63a28 3388 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
3addc568 3389
7ad24ea4 3390 dev->ethtool_ops = &mlx4_en_ethtool_ops;
c27a02cd 3391
c27a02cd
YP
3392 /*
3393 * Set driver features
3394 */
c8c64cff
MM
3395 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3396 if (mdev->LSO_support)
3397 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3398
91c6bfb8
DC
3399 if (mdev->dev->caps.tunnel_offload_mode ==
3400 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
3401 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3402 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3403 NETIF_F_GSO_PARTIAL;
3404 dev->features |= NETIF_F_GSO_UDP_TUNNEL |
3405 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3406 NETIF_F_GSO_PARTIAL;
3407 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
3408 dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3409 NETIF_F_RXCSUM |
3410 NETIF_F_TSO | NETIF_F_TSO6 |
3411 NETIF_F_GSO_UDP_TUNNEL |
3412 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3413 NETIF_F_GSO_PARTIAL;
3414 }
3415
c8c64cff
MM
3416 dev->vlan_features = dev->hw_features;
3417
ad86107f 3418 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
c8c64cff 3419 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
f646968f
PM
3420 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
3421 NETIF_F_HW_VLAN_CTAG_FILTER;
537f6f95
SM
3422 dev->hw_features |= NETIF_F_LOOPBACK |
3423 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c27a02cd 3424
e38af4fa
HHZ
3425 if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
3426 dev->features |= NETIF_F_HW_VLAN_STAG_RX |
3427 NETIF_F_HW_VLAN_STAG_FILTER;
3428 dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
3429 }
3430
3431 if (mlx4_is_slave(mdev->dev)) {
0815fe3a 3432 bool vlan_offload_disabled;
e38af4fa
HHZ
3433 int phv;
3434
3435 err = get_phv_bit(mdev->dev, port, &phv);
3436 if (!err && phv) {
3437 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3438 priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
3439 }
0815fe3a
MS
3440 err = mlx4_get_is_vlan_offload_disabled(mdev->dev, port,
3441 &vlan_offload_disabled);
3442 if (!err && vlan_offload_disabled) {
3443 dev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3444 NETIF_F_HW_VLAN_CTAG_RX |
3445 NETIF_F_HW_VLAN_STAG_TX |
3446 NETIF_F_HW_VLAN_STAG_RX);
3447 dev->features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3448 NETIF_F_HW_VLAN_CTAG_RX |
3449 NETIF_F_HW_VLAN_STAG_TX |
3450 NETIF_F_HW_VLAN_STAG_RX);
3451 }
e38af4fa
HHZ
3452 } else {
3453 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
3454 !(mdev->dev->caps.flags2 &
3455 MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
3456 dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
3457 }
3458
f0df3503
MM
3459 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
3460 dev->hw_features |= NETIF_F_RXFCS;
3461
78500b8c
MM
3462 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)
3463 dev->hw_features |= NETIF_F_RXALL;
3464
1eb8c695 3465 if (mdev->dev->caps.steering_mode ==
7d077cd3
MB
3466 MLX4_STEERING_MODE_DEVICE_MANAGED &&
3467 mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
1eb8c695
AV
3468 dev->hw_features |= NETIF_F_NTUPLE;
3469
cc5387f7
YB
3470 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
3471 dev->priv_flags |= IFF_UNICAST_FLT;
3472
947cbb0a
EP
3473 /* Setting a default hash function value */
3474 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP) {
3475 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3476 } else if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR) {
3477 priv->rss_hash_fn = ETH_RSS_HASH_XOR;
3478 } else {
3479 en_warn(priv,
3480 "No RSS hash capabilities exposed, using Toeplitz\n");
3481 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
3482 }
3483
24be19e4
EBE
3484 /* MTU range: 68 - hw-specific max */
3485 dev->min_mtu = ETH_MIN_MTU;
b80f71f5
JW
3486 dev->max_mtu = priv->max_mtu;
3487
c27a02cd 3488 mdev->pndev[port] = dev;
5da03547 3489 mdev->upper[port] = NULL;
c27a02cd
YP
3490
3491 netif_carrier_off(dev);
4801ae70
EE
3492 mlx4_en_set_default_moderation(priv);
3493
67f8b1dc 3494 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
453a6082
YP
3495 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
3496
79aeaccd
YB
3497 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
3498
90822265 3499 /* Configure port */
5c8e9046 3500 mlx4_en_calc_rx_buf(dev);
90822265 3501 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
5c8e9046
YP
3502 priv->rx_skb_size + ETH_FCS_LEN,
3503 prof->tx_pause, prof->tx_ppp,
3504 prof->rx_pause, prof->rx_ppp);
90822265 3505 if (err) {
1a91de28
JP
3506 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
3507 priv->port, err);
90822265
YP
3508 goto out;
3509 }
3510
837052d0 3511 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1b136de1 3512 err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 1);
837052d0
OG
3513 if (err) {
3514 en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
3515 err);
3516 goto out;
3517 }
3518 }
3519
90822265
YP
3520 /* Init port */
3521 en_warn(priv, "Initializing port\n");
3522 err = mlx4_INIT_PORT(mdev->dev, priv->port);
3523 if (err) {
3524 en_err(priv, "Failed Initializing port\n");
3525 goto out;
3526 }
c27a02cd 3527 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
dc8142ea 3528
90683061 3529 /* Initialize time stamp mechanism */
dc8142ea 3530 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
90683061
EE
3531 mlx4_en_init_timestamp(mdev);
3532
fc9f5ea9
EE
3533 queue_delayed_work(mdev->workqueue, &priv->service_task,
3534 SERVICE_TASK_DELAY);
dc8142ea 3535
0b131561
MB
3536 mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
3537 mdev->profile.prof[priv->port].rx_ppp,
3538 mdev->profile.prof[priv->port].rx_pause,
3539 mdev->profile.prof[priv->port].tx_ppp,
3540 mdev->profile.prof[priv->port].tx_pause);
39de961a 3541
e5eda89d
IS
3542 err = register_netdev(dev);
3543 if (err) {
3544 en_err(priv, "Netdev registration failed for port %d\n", port);
3545 goto out;
3546 }
3547
3548 priv->registered = 1;
09d4d087
JP
3549 devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
3550 dev);
e5eda89d 3551
c27a02cd
YP
3552 return 0;
3553
3554out:
3555 mlx4_en_destroy_netdev(dev);
3556 return err;
3557}
3558
537f6f95
SM
3559int mlx4_en_reset_config(struct net_device *dev,
3560 struct hwtstamp_config ts_config,
3561 netdev_features_t features)
3562{
3563 struct mlx4_en_priv *priv = netdev_priv(dev);
3564 struct mlx4_en_dev *mdev = priv->mdev;
ec25bc04
EE
3565 struct mlx4_en_port_profile new_prof;
3566 struct mlx4_en_priv *tmp;
537f6f95
SM
3567 int port_up = 0;
3568 int err = 0;
3569
3570 if (priv->hwtstamp_config.tx_type == ts_config.tx_type &&
3571 priv->hwtstamp_config.rx_filter == ts_config.rx_filter &&
f0df3503
MM
3572 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3573 !DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS))
537f6f95
SM
3574 return 0; /* Nothing to change */
3575
3576 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX) &&
3577 (features & NETIF_F_HW_VLAN_CTAG_RX) &&
3578 (priv->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE)) {
3579 en_warn(priv, "Can't turn ON rx vlan offload while time-stamping rx filter is ON\n");
3580 return -EINVAL;
3581 }
3582
ec25bc04
EE
3583 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
3584 if (!tmp)
3585 return -ENOMEM;
3586
537f6f95 3587 mutex_lock(&mdev->state_lock);
ec25bc04
EE
3588
3589 memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
3590 memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
3591
770f8225 3592 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
ec25bc04
EE
3593 if (err)
3594 goto out;
3595
537f6f95
SM
3596 if (priv->port_up) {
3597 port_up = 1;
3598 mlx4_en_stop_port(dev, 1);
3599 }
3600
ec25bc04 3601 mlx4_en_safe_replace_resources(priv, tmp);
537f6f95
SM
3602
3603 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
3604 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3605 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3606 else
3607 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3608 } else if (ts_config.rx_filter == HWTSTAMP_FILTER_NONE) {
3609 /* RX time-stamping is OFF, update the RX vlan offload
3610 * to the latest wanted state
3611 */
3612 if (dev->wanted_features & NETIF_F_HW_VLAN_CTAG_RX)
3613 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3614 else
3615 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3616 }
3617
f0df3503
MM
3618 if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_RXFCS)) {
3619 if (features & NETIF_F_RXFCS)
3620 dev->features |= NETIF_F_RXFCS;
3621 else
3622 dev->features &= ~NETIF_F_RXFCS;
3623 }
3624
537f6f95
SM
3625 /* RX vlan offload and RX time-stamping can't co-exist !
3626 * Regardless of the caller's choice,
3627 * Turn Off RX vlan offload in case of time-stamping is ON
3628 */
3629 if (ts_config.rx_filter != HWTSTAMP_FILTER_NONE) {
3630 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
3631 en_warn(priv, "Turning off RX vlan offload since RX time-stamping is ON\n");
3632 dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
3633 }
3634
537f6f95
SM
3635 if (port_up) {
3636 err = mlx4_en_start_port(dev);
3637 if (err)
3638 en_err(priv, "Failed starting port\n");
3639 }
3640
3641out:
3642 mutex_unlock(&mdev->state_lock);
ec25bc04
EE
3643 kfree(tmp);
3644 if (!err)
3645 netdev_features_change(dev);
537f6f95
SM
3646 return err;
3647}