Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox...
[linux-2.6-block.git] / drivers / infiniband / hw / mlx5 / main.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/bitmap.h>
42 #if defined(CONFIG_X86)
43 #include <asm/pat.h>
44 #endif
45 #include <linux/sched.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/task.h>
48 #include <linux/delay.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_cache.h>
52 #include <linux/mlx5/port.h>
53 #include <linux/mlx5/vport.h>
54 #include <linux/mlx5/fs.h>
55 #include <linux/list.h>
56 #include <rdma/ib_smi.h>
57 #include <rdma/ib_umem.h>
58 #include <linux/in.h>
59 #include <linux/etherdevice.h>
60 #include "mlx5_ib.h"
61 #include "ib_rep.h"
62 #include "cmd.h"
63 #include "srq.h"
64 #include <linux/mlx5/fs_helpers.h>
65 #include <linux/mlx5/accel.h>
66 #include <rdma/uverbs_std_types.h>
67 #include <rdma/mlx5_user_ioctl_verbs.h>
68 #include <rdma/mlx5_user_ioctl_cmds.h>
69
70 #define UVERBS_MODULE_NAME mlx5_ib
71 #include <rdma/uverbs_named_ioctl.h>
72
73 #define DRIVER_NAME "mlx5_ib"
74 #define DRIVER_VERSION "5.0-0"
75
76 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
77 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
78 MODULE_LICENSE("Dual BSD/GPL");
79
80 static char mlx5_version[] =
81         DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
82         DRIVER_VERSION "\n";
83
84 struct mlx5_ib_event_work {
85         struct work_struct      work;
86         union {
87                 struct mlx5_ib_dev            *dev;
88                 struct mlx5_ib_multiport_info *mpi;
89         };
90         bool                    is_slave;
91         unsigned int            event;
92         void                    *param;
93 };
94
95 enum {
96         MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
97 };
98
99 static struct workqueue_struct *mlx5_ib_event_wq;
100 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
101 static LIST_HEAD(mlx5_ib_dev_list);
102 /*
103  * This mutex should be held when accessing either of the above lists
104  */
105 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
106
107 /* We can't use an array for xlt_emergency_page because dma_map_single
108  * doesn't work on kernel modules memory
109  */
110 static unsigned long xlt_emergency_page;
111 static struct mutex xlt_emergency_page_mutex;
112
113 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
114 {
115         struct mlx5_ib_dev *dev;
116
117         mutex_lock(&mlx5_ib_multiport_mutex);
118         dev = mpi->ibdev;
119         mutex_unlock(&mlx5_ib_multiport_mutex);
120         return dev;
121 }
122
123 static enum rdma_link_layer
124 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
125 {
126         switch (port_type_cap) {
127         case MLX5_CAP_PORT_TYPE_IB:
128                 return IB_LINK_LAYER_INFINIBAND;
129         case MLX5_CAP_PORT_TYPE_ETH:
130                 return IB_LINK_LAYER_ETHERNET;
131         default:
132                 return IB_LINK_LAYER_UNSPECIFIED;
133         }
134 }
135
136 static enum rdma_link_layer
137 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
138 {
139         struct mlx5_ib_dev *dev = to_mdev(device);
140         int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
141
142         return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
143 }
144
145 static int get_port_state(struct ib_device *ibdev,
146                           u8 port_num,
147                           enum ib_port_state *state)
148 {
149         struct ib_port_attr attr;
150         int ret;
151
152         memset(&attr, 0, sizeof(attr));
153         ret = ibdev->ops.query_port(ibdev, port_num, &attr);
154         if (!ret)
155                 *state = attr.state;
156         return ret;
157 }
158
159 static int mlx5_netdev_event(struct notifier_block *this,
160                              unsigned long event, void *ptr)
161 {
162         struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
163         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
164         u8 port_num = roce->native_port_num;
165         struct mlx5_core_dev *mdev;
166         struct mlx5_ib_dev *ibdev;
167
168         ibdev = roce->dev;
169         mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
170         if (!mdev)
171                 return NOTIFY_DONE;
172
173         switch (event) {
174         case NETDEV_REGISTER:
175                 write_lock(&roce->netdev_lock);
176                 if (ibdev->rep) {
177                         struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
178                         struct net_device *rep_ndev;
179
180                         rep_ndev = mlx5_ib_get_rep_netdev(esw,
181                                                           ibdev->rep->vport);
182                         if (rep_ndev == ndev)
183                                 roce->netdev = ndev;
184                 } else if (ndev->dev.parent == &mdev->pdev->dev) {
185                         roce->netdev = ndev;
186                 }
187                 write_unlock(&roce->netdev_lock);
188                 break;
189
190         case NETDEV_UNREGISTER:
191                 write_lock(&roce->netdev_lock);
192                 if (roce->netdev == ndev)
193                         roce->netdev = NULL;
194                 write_unlock(&roce->netdev_lock);
195                 break;
196
197         case NETDEV_CHANGE:
198         case NETDEV_UP:
199         case NETDEV_DOWN: {
200                 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
201                 struct net_device *upper = NULL;
202
203                 if (lag_ndev) {
204                         upper = netdev_master_upper_dev_get(lag_ndev);
205                         dev_put(lag_ndev);
206                 }
207
208                 if ((upper == ndev || (!upper && ndev == roce->netdev))
209                     && ibdev->ib_active) {
210                         struct ib_event ibev = { };
211                         enum ib_port_state port_state;
212
213                         if (get_port_state(&ibdev->ib_dev, port_num,
214                                            &port_state))
215                                 goto done;
216
217                         if (roce->last_port_state == port_state)
218                                 goto done;
219
220                         roce->last_port_state = port_state;
221                         ibev.device = &ibdev->ib_dev;
222                         if (port_state == IB_PORT_DOWN)
223                                 ibev.event = IB_EVENT_PORT_ERR;
224                         else if (port_state == IB_PORT_ACTIVE)
225                                 ibev.event = IB_EVENT_PORT_ACTIVE;
226                         else
227                                 goto done;
228
229                         ibev.element.port_num = port_num;
230                         ib_dispatch_event(&ibev);
231                 }
232                 break;
233         }
234
235         default:
236                 break;
237         }
238 done:
239         mlx5_ib_put_native_port_mdev(ibdev, port_num);
240         return NOTIFY_DONE;
241 }
242
243 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
244                                              u8 port_num)
245 {
246         struct mlx5_ib_dev *ibdev = to_mdev(device);
247         struct net_device *ndev;
248         struct mlx5_core_dev *mdev;
249
250         mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
251         if (!mdev)
252                 return NULL;
253
254         ndev = mlx5_lag_get_roce_netdev(mdev);
255         if (ndev)
256                 goto out;
257
258         /* Ensure ndev does not disappear before we invoke dev_hold()
259          */
260         read_lock(&ibdev->roce[port_num - 1].netdev_lock);
261         ndev = ibdev->roce[port_num - 1].netdev;
262         if (ndev)
263                 dev_hold(ndev);
264         read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
265
266 out:
267         mlx5_ib_put_native_port_mdev(ibdev, port_num);
268         return ndev;
269 }
270
271 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
272                                                    u8 ib_port_num,
273                                                    u8 *native_port_num)
274 {
275         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
276                                                           ib_port_num);
277         struct mlx5_core_dev *mdev = NULL;
278         struct mlx5_ib_multiport_info *mpi;
279         struct mlx5_ib_port *port;
280
281         if (!mlx5_core_mp_enabled(ibdev->mdev) ||
282             ll != IB_LINK_LAYER_ETHERNET) {
283                 if (native_port_num)
284                         *native_port_num = ib_port_num;
285                 return ibdev->mdev;
286         }
287
288         if (native_port_num)
289                 *native_port_num = 1;
290
291         port = &ibdev->port[ib_port_num - 1];
292         if (!port)
293                 return NULL;
294
295         spin_lock(&port->mp.mpi_lock);
296         mpi = ibdev->port[ib_port_num - 1].mp.mpi;
297         if (mpi && !mpi->unaffiliate) {
298                 mdev = mpi->mdev;
299                 /* If it's the master no need to refcount, it'll exist
300                  * as long as the ib_dev exists.
301                  */
302                 if (!mpi->is_master)
303                         mpi->mdev_refcnt++;
304         }
305         spin_unlock(&port->mp.mpi_lock);
306
307         return mdev;
308 }
309
310 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
311 {
312         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
313                                                           port_num);
314         struct mlx5_ib_multiport_info *mpi;
315         struct mlx5_ib_port *port;
316
317         if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
318                 return;
319
320         port = &ibdev->port[port_num - 1];
321
322         spin_lock(&port->mp.mpi_lock);
323         mpi = ibdev->port[port_num - 1].mp.mpi;
324         if (mpi->is_master)
325                 goto out;
326
327         mpi->mdev_refcnt--;
328         if (mpi->unaffiliate)
329                 complete(&mpi->unref_comp);
330 out:
331         spin_unlock(&port->mp.mpi_lock);
332 }
333
334 static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
335                                            u8 *active_width)
336 {
337         switch (eth_proto_oper) {
338         case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
339         case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
340         case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
341         case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
342                 *active_width = IB_WIDTH_1X;
343                 *active_speed = IB_SPEED_SDR;
344                 break;
345         case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
346         case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
347         case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
348         case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
349         case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
350         case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
351         case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
352                 *active_width = IB_WIDTH_1X;
353                 *active_speed = IB_SPEED_QDR;
354                 break;
355         case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
356         case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
357         case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
358                 *active_width = IB_WIDTH_1X;
359                 *active_speed = IB_SPEED_EDR;
360                 break;
361         case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
362         case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
363         case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
364         case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
365                 *active_width = IB_WIDTH_4X;
366                 *active_speed = IB_SPEED_QDR;
367                 break;
368         case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
369         case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
370         case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
371                 *active_width = IB_WIDTH_1X;
372                 *active_speed = IB_SPEED_HDR;
373                 break;
374         case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
375                 *active_width = IB_WIDTH_4X;
376                 *active_speed = IB_SPEED_FDR;
377                 break;
378         case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
379         case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
380         case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
381         case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
382                 *active_width = IB_WIDTH_4X;
383                 *active_speed = IB_SPEED_EDR;
384                 break;
385         default:
386                 return -EINVAL;
387         }
388
389         return 0;
390 }
391
392 static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
393                                         u8 *active_width)
394 {
395         switch (eth_proto_oper) {
396         case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
397         case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
398                 *active_width = IB_WIDTH_1X;
399                 *active_speed = IB_SPEED_SDR;
400                 break;
401         case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
402                 *active_width = IB_WIDTH_1X;
403                 *active_speed = IB_SPEED_DDR;
404                 break;
405         case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
406                 *active_width = IB_WIDTH_1X;
407                 *active_speed = IB_SPEED_QDR;
408                 break;
409         case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
410                 *active_width = IB_WIDTH_4X;
411                 *active_speed = IB_SPEED_QDR;
412                 break;
413         case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
414                 *active_width = IB_WIDTH_1X;
415                 *active_speed = IB_SPEED_EDR;
416                 break;
417         case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
418                 *active_width = IB_WIDTH_2X;
419                 *active_speed = IB_SPEED_EDR;
420                 break;
421         case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
422                 *active_width = IB_WIDTH_1X;
423                 *active_speed = IB_SPEED_HDR;
424                 break;
425         case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
426                 *active_width = IB_WIDTH_4X;
427                 *active_speed = IB_SPEED_EDR;
428                 break;
429         case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
430                 *active_width = IB_WIDTH_2X;
431                 *active_speed = IB_SPEED_HDR;
432                 break;
433         case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
434                 *active_width = IB_WIDTH_4X;
435                 *active_speed = IB_SPEED_HDR;
436                 break;
437         default:
438                 return -EINVAL;
439         }
440
441         return 0;
442 }
443
444 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
445                                     u8 *active_width, bool ext)
446 {
447         return ext ?
448                 translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
449                                              active_width) :
450                 translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
451                                                 active_width);
452 }
453
454 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
455                                 struct ib_port_attr *props)
456 {
457         struct mlx5_ib_dev *dev = to_mdev(device);
458         u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
459         struct mlx5_core_dev *mdev;
460         struct net_device *ndev, *upper;
461         enum ib_mtu ndev_ib_mtu;
462         bool put_mdev = true;
463         u16 qkey_viol_cntr;
464         u32 eth_prot_oper;
465         u8 mdev_port_num;
466         bool ext;
467         int err;
468
469         mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
470         if (!mdev) {
471                 /* This means the port isn't affiliated yet. Get the
472                  * info for the master port instead.
473                  */
474                 put_mdev = false;
475                 mdev = dev->mdev;
476                 mdev_port_num = 1;
477                 port_num = 1;
478         }
479
480         /* Possible bad flows are checked before filling out props so in case
481          * of an error it will still be zeroed out.
482          */
483         err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
484                                    mdev_port_num);
485         if (err)
486                 goto out;
487         ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
488         eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
489
490         props->active_width     = IB_WIDTH_4X;
491         props->active_speed     = IB_SPEED_QDR;
492
493         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
494                                  &props->active_width, ext);
495
496         props->port_cap_flags |= IB_PORT_CM_SUP;
497         props->ip_gids = true;
498
499         props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
500                                                 roce_address_table_size);
501         props->max_mtu          = IB_MTU_4096;
502         props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
503         props->pkey_tbl_len     = 1;
504         props->state            = IB_PORT_DOWN;
505         props->phys_state       = 3;
506
507         mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
508         props->qkey_viol_cntr = qkey_viol_cntr;
509
510         /* If this is a stub query for an unaffiliated port stop here */
511         if (!put_mdev)
512                 goto out;
513
514         ndev = mlx5_ib_get_netdev(device, port_num);
515         if (!ndev)
516                 goto out;
517
518         if (dev->lag_active) {
519                 rcu_read_lock();
520                 upper = netdev_master_upper_dev_get_rcu(ndev);
521                 if (upper) {
522                         dev_put(ndev);
523                         ndev = upper;
524                         dev_hold(ndev);
525                 }
526                 rcu_read_unlock();
527         }
528
529         if (netif_running(ndev) && netif_carrier_ok(ndev)) {
530                 props->state      = IB_PORT_ACTIVE;
531                 props->phys_state = 5;
532         }
533
534         ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
535
536         dev_put(ndev);
537
538         props->active_mtu       = min(props->max_mtu, ndev_ib_mtu);
539 out:
540         if (put_mdev)
541                 mlx5_ib_put_native_port_mdev(dev, port_num);
542         return err;
543 }
544
545 struct mlx5_ib_vlan_info {
546         u16 vlan_id;
547         bool vlan;
548 };
549
550 static int get_lower_dev_vlan(struct net_device *lower_dev, void *data)
551 {
552         struct mlx5_ib_vlan_info *vlan_info = data;
553
554         if (is_vlan_dev(lower_dev)) {
555                 vlan_info->vlan = true;
556                 vlan_info->vlan_id = vlan_dev_vlan_id(lower_dev);
557         }
558         /* We are interested only in first level vlan device, so
559          * always return 1 to stop iterating over next level devices.
560          */
561         return 1;
562 }
563
564 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
565                          unsigned int index, const union ib_gid *gid,
566                          const struct ib_gid_attr *attr)
567 {
568         enum ib_gid_type gid_type = IB_GID_TYPE_IB;
569         struct mlx5_ib_vlan_info vlan_info = { };
570         u8 roce_version = 0;
571         u8 roce_l3_type = 0;
572         u8 mac[ETH_ALEN];
573
574         if (gid) {
575                 gid_type = attr->gid_type;
576                 ether_addr_copy(mac, attr->ndev->dev_addr);
577
578                 if (is_vlan_dev(attr->ndev)) {
579                         vlan_info.vlan = true;
580                         vlan_info.vlan_id = vlan_dev_vlan_id(attr->ndev);
581                 } else {
582                         /* If the netdev is upper device and if it's lower
583                          * lower device is vlan device, consider vlan id of
584                          * the lower vlan device for this gid entry.
585                          */
586                         rcu_read_lock();
587                         netdev_walk_all_lower_dev_rcu(attr->ndev,
588                                         get_lower_dev_vlan, &vlan_info);
589                         rcu_read_unlock();
590                 }
591         }
592
593         switch (gid_type) {
594         case IB_GID_TYPE_IB:
595                 roce_version = MLX5_ROCE_VERSION_1;
596                 break;
597         case IB_GID_TYPE_ROCE_UDP_ENCAP:
598                 roce_version = MLX5_ROCE_VERSION_2;
599                 if (ipv6_addr_v4mapped((void *)gid))
600                         roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
601                 else
602                         roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
603                 break;
604
605         default:
606                 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
607         }
608
609         return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
610                                       roce_l3_type, gid->raw, mac,
611                                       vlan_info.vlan, vlan_info.vlan_id,
612                                       port_num);
613 }
614
615 static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
616                            __always_unused void **context)
617 {
618         return set_roce_addr(to_mdev(attr->device), attr->port_num,
619                              attr->index, &attr->gid, attr);
620 }
621
622 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
623                            __always_unused void **context)
624 {
625         return set_roce_addr(to_mdev(attr->device), attr->port_num,
626                              attr->index, NULL, NULL);
627 }
628
629 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev,
630                                const struct ib_gid_attr *attr)
631 {
632         if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
633                 return 0;
634
635         return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
636 }
637
638 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
639 {
640         if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
641                 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
642         return 0;
643 }
644
645 enum {
646         MLX5_VPORT_ACCESS_METHOD_MAD,
647         MLX5_VPORT_ACCESS_METHOD_HCA,
648         MLX5_VPORT_ACCESS_METHOD_NIC,
649 };
650
651 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
652 {
653         if (mlx5_use_mad_ifc(to_mdev(ibdev)))
654                 return MLX5_VPORT_ACCESS_METHOD_MAD;
655
656         if (mlx5_ib_port_link_layer(ibdev, 1) ==
657             IB_LINK_LAYER_ETHERNET)
658                 return MLX5_VPORT_ACCESS_METHOD_NIC;
659
660         return MLX5_VPORT_ACCESS_METHOD_HCA;
661 }
662
663 static void get_atomic_caps(struct mlx5_ib_dev *dev,
664                             u8 atomic_size_qp,
665                             struct ib_device_attr *props)
666 {
667         u8 tmp;
668         u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
669         u8 atomic_req_8B_endianness_mode =
670                 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
671
672         /* Check if HW supports 8 bytes standard atomic operations and capable
673          * of host endianness respond
674          */
675         tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
676         if (((atomic_operations & tmp) == tmp) &&
677             (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
678             (atomic_req_8B_endianness_mode)) {
679                 props->atomic_cap = IB_ATOMIC_HCA;
680         } else {
681                 props->atomic_cap = IB_ATOMIC_NONE;
682         }
683 }
684
685 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
686                                struct ib_device_attr *props)
687 {
688         u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
689
690         get_atomic_caps(dev, atomic_size_qp, props);
691 }
692
693 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
694                                struct ib_device_attr *props)
695 {
696         u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
697
698         get_atomic_caps(dev, atomic_size_qp, props);
699 }
700
701 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
702 {
703         struct ib_device_attr props = {};
704
705         get_atomic_caps_dc(dev, &props);
706         return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
707 }
708 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
709                                         __be64 *sys_image_guid)
710 {
711         struct mlx5_ib_dev *dev = to_mdev(ibdev);
712         struct mlx5_core_dev *mdev = dev->mdev;
713         u64 tmp;
714         int err;
715
716         switch (mlx5_get_vport_access_method(ibdev)) {
717         case MLX5_VPORT_ACCESS_METHOD_MAD:
718                 return mlx5_query_mad_ifc_system_image_guid(ibdev,
719                                                             sys_image_guid);
720
721         case MLX5_VPORT_ACCESS_METHOD_HCA:
722                 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
723                 break;
724
725         case MLX5_VPORT_ACCESS_METHOD_NIC:
726                 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
727                 break;
728
729         default:
730                 return -EINVAL;
731         }
732
733         if (!err)
734                 *sys_image_guid = cpu_to_be64(tmp);
735
736         return err;
737
738 }
739
740 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
741                                 u16 *max_pkeys)
742 {
743         struct mlx5_ib_dev *dev = to_mdev(ibdev);
744         struct mlx5_core_dev *mdev = dev->mdev;
745
746         switch (mlx5_get_vport_access_method(ibdev)) {
747         case MLX5_VPORT_ACCESS_METHOD_MAD:
748                 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
749
750         case MLX5_VPORT_ACCESS_METHOD_HCA:
751         case MLX5_VPORT_ACCESS_METHOD_NIC:
752                 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
753                                                 pkey_table_size));
754                 return 0;
755
756         default:
757                 return -EINVAL;
758         }
759 }
760
761 static int mlx5_query_vendor_id(struct ib_device *ibdev,
762                                 u32 *vendor_id)
763 {
764         struct mlx5_ib_dev *dev = to_mdev(ibdev);
765
766         switch (mlx5_get_vport_access_method(ibdev)) {
767         case MLX5_VPORT_ACCESS_METHOD_MAD:
768                 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
769
770         case MLX5_VPORT_ACCESS_METHOD_HCA:
771         case MLX5_VPORT_ACCESS_METHOD_NIC:
772                 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
773
774         default:
775                 return -EINVAL;
776         }
777 }
778
779 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
780                                 __be64 *node_guid)
781 {
782         u64 tmp;
783         int err;
784
785         switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
786         case MLX5_VPORT_ACCESS_METHOD_MAD:
787                 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
788
789         case MLX5_VPORT_ACCESS_METHOD_HCA:
790                 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
791                 break;
792
793         case MLX5_VPORT_ACCESS_METHOD_NIC:
794                 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
795                 break;
796
797         default:
798                 return -EINVAL;
799         }
800
801         if (!err)
802                 *node_guid = cpu_to_be64(tmp);
803
804         return err;
805 }
806
807 struct mlx5_reg_node_desc {
808         u8      desc[IB_DEVICE_NODE_DESC_MAX];
809 };
810
811 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
812 {
813         struct mlx5_reg_node_desc in;
814
815         if (mlx5_use_mad_ifc(dev))
816                 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
817
818         memset(&in, 0, sizeof(in));
819
820         return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
821                                     sizeof(struct mlx5_reg_node_desc),
822                                     MLX5_REG_NODE_DESC, 0, 0);
823 }
824
825 static int mlx5_ib_query_device(struct ib_device *ibdev,
826                                 struct ib_device_attr *props,
827                                 struct ib_udata *uhw)
828 {
829         struct mlx5_ib_dev *dev = to_mdev(ibdev);
830         struct mlx5_core_dev *mdev = dev->mdev;
831         int err = -ENOMEM;
832         int max_sq_desc;
833         int max_rq_sg;
834         int max_sq_sg;
835         u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
836         bool raw_support = !mlx5_core_mp_enabled(mdev);
837         struct mlx5_ib_query_device_resp resp = {};
838         size_t resp_len;
839         u64 max_tso;
840
841         resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
842         if (uhw->outlen && uhw->outlen < resp_len)
843                 return -EINVAL;
844         else
845                 resp.response_length = resp_len;
846
847         if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
848                 return -EINVAL;
849
850         memset(props, 0, sizeof(*props));
851         err = mlx5_query_system_image_guid(ibdev,
852                                            &props->sys_image_guid);
853         if (err)
854                 return err;
855
856         err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
857         if (err)
858                 return err;
859
860         err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
861         if (err)
862                 return err;
863
864         props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
865                 (fw_rev_min(dev->mdev) << 16) |
866                 fw_rev_sub(dev->mdev);
867         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
868                 IB_DEVICE_PORT_ACTIVE_EVENT             |
869                 IB_DEVICE_SYS_IMAGE_GUID                |
870                 IB_DEVICE_RC_RNR_NAK_GEN;
871
872         if (MLX5_CAP_GEN(mdev, pkv))
873                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
874         if (MLX5_CAP_GEN(mdev, qkv))
875                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
876         if (MLX5_CAP_GEN(mdev, apm))
877                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
878         if (MLX5_CAP_GEN(mdev, xrc))
879                 props->device_cap_flags |= IB_DEVICE_XRC;
880         if (MLX5_CAP_GEN(mdev, imaicl)) {
881                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
882                                            IB_DEVICE_MEM_WINDOW_TYPE_2B;
883                 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
884                 /* We support 'Gappy' memory registration too */
885                 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
886         }
887         props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
888         if (MLX5_CAP_GEN(mdev, sho)) {
889                 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
890                 /* At this stage no support for signature handover */
891                 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
892                                       IB_PROT_T10DIF_TYPE_2 |
893                                       IB_PROT_T10DIF_TYPE_3;
894                 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
895                                        IB_GUARD_T10DIF_CSUM;
896         }
897         if (MLX5_CAP_GEN(mdev, block_lb_mc))
898                 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
899
900         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
901                 if (MLX5_CAP_ETH(mdev, csum_cap)) {
902                         /* Legacy bit to support old userspace libraries */
903                         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
904                         props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
905                 }
906
907                 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
908                         props->raw_packet_caps |=
909                                 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
910
911                 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
912                         max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
913                         if (max_tso) {
914                                 resp.tso_caps.max_tso = 1 << max_tso;
915                                 resp.tso_caps.supported_qpts |=
916                                         1 << IB_QPT_RAW_PACKET;
917                                 resp.response_length += sizeof(resp.tso_caps);
918                         }
919                 }
920
921                 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
922                         resp.rss_caps.rx_hash_function =
923                                                 MLX5_RX_HASH_FUNC_TOEPLITZ;
924                         resp.rss_caps.rx_hash_fields_mask =
925                                                 MLX5_RX_HASH_SRC_IPV4 |
926                                                 MLX5_RX_HASH_DST_IPV4 |
927                                                 MLX5_RX_HASH_SRC_IPV6 |
928                                                 MLX5_RX_HASH_DST_IPV6 |
929                                                 MLX5_RX_HASH_SRC_PORT_TCP |
930                                                 MLX5_RX_HASH_DST_PORT_TCP |
931                                                 MLX5_RX_HASH_SRC_PORT_UDP |
932                                                 MLX5_RX_HASH_DST_PORT_UDP |
933                                                 MLX5_RX_HASH_INNER;
934                         if (mlx5_accel_ipsec_device_caps(dev->mdev) &
935                             MLX5_ACCEL_IPSEC_CAP_DEVICE)
936                                 resp.rss_caps.rx_hash_fields_mask |=
937                                         MLX5_RX_HASH_IPSEC_SPI;
938                         resp.response_length += sizeof(resp.rss_caps);
939                 }
940         } else {
941                 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
942                         resp.response_length += sizeof(resp.tso_caps);
943                 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
944                         resp.response_length += sizeof(resp.rss_caps);
945         }
946
947         if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
948                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
949                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
950         }
951
952         if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
953             MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
954             raw_support)
955                 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
956
957         if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
958             MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
959                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
960
961         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
962             MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
963             raw_support) {
964                 /* Legacy bit to support old userspace libraries */
965                 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
966                 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
967         }
968
969         if (MLX5_CAP_DEV_MEM(mdev, memic)) {
970                 props->max_dm_size =
971                         MLX5_CAP_DEV_MEM(mdev, max_memic_size);
972         }
973
974         if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
975                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
976
977         if (MLX5_CAP_GEN(mdev, end_pad))
978                 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
979
980         props->vendor_part_id      = mdev->pdev->device;
981         props->hw_ver              = mdev->pdev->revision;
982
983         props->max_mr_size         = ~0ull;
984         props->page_size_cap       = ~(min_page_size - 1);
985         props->max_qp              = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
986         props->max_qp_wr           = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
987         max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
988                      sizeof(struct mlx5_wqe_data_seg);
989         max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
990         max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
991                      sizeof(struct mlx5_wqe_raddr_seg)) /
992                 sizeof(struct mlx5_wqe_data_seg);
993         props->max_send_sge = max_sq_sg;
994         props->max_recv_sge = max_rq_sg;
995         props->max_sge_rd          = MLX5_MAX_SGE_RD;
996         props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
997         props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
998         props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
999         props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1000         props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1001         props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1002         props->max_srq             = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1003         props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1004         props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1005         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
1006         props->max_srq_sge         = max_rq_sg - 1;
1007         props->max_fast_reg_page_list_len =
1008                 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1009         get_atomic_caps_qp(dev, props);
1010         props->masked_atomic_cap   = IB_ATOMIC_NONE;
1011         props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1012         props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1013         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1014                                            props->max_mcast_grp;
1015         props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
1016         props->max_ah = INT_MAX;
1017         props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1018         props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
1019
1020         if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
1021                 if (MLX5_CAP_GEN(mdev, pg))
1022                         props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
1023                 props->odp_caps = dev->odp_caps;
1024         }
1025
1026         if (MLX5_CAP_GEN(mdev, cd))
1027                 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
1028
1029         if (!mlx5_core_is_pf(mdev))
1030                 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
1031
1032         if (mlx5_ib_port_link_layer(ibdev, 1) ==
1033             IB_LINK_LAYER_ETHERNET && raw_support) {
1034                 props->rss_caps.max_rwq_indirection_tables =
1035                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1036                 props->rss_caps.max_rwq_indirection_table_size =
1037                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1038                 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
1039                 props->max_wq_type_rq =
1040                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1041         }
1042
1043         if (MLX5_CAP_GEN(mdev, tag_matching)) {
1044                 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
1045                 props->tm_caps.max_num_tags =
1046                         (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1047                 props->tm_caps.flags = IB_TM_CAP_RC;
1048                 props->tm_caps.max_ops =
1049                         1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1050                 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
1051         }
1052
1053         if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1054                 props->cq_caps.max_cq_moderation_count =
1055                                                 MLX5_MAX_CQ_COUNT;
1056                 props->cq_caps.max_cq_moderation_period =
1057                                                 MLX5_MAX_CQ_PERIOD;
1058         }
1059
1060         if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
1061                 resp.response_length += sizeof(resp.cqe_comp_caps);
1062
1063                 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1064                         resp.cqe_comp_caps.max_num =
1065                                 MLX5_CAP_GEN(dev->mdev,
1066                                              cqe_compression_max_num);
1067
1068                         resp.cqe_comp_caps.supported_format =
1069                                 MLX5_IB_CQE_RES_FORMAT_HASH |
1070                                 MLX5_IB_CQE_RES_FORMAT_CSUM;
1071
1072                         if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1073                                 resp.cqe_comp_caps.supported_format |=
1074                                         MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
1075                 }
1076         }
1077
1078         if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
1079             raw_support) {
1080                 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1081                     MLX5_CAP_GEN(mdev, qos)) {
1082                         resp.packet_pacing_caps.qp_rate_limit_max =
1083                                 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1084                         resp.packet_pacing_caps.qp_rate_limit_min =
1085                                 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1086                         resp.packet_pacing_caps.supported_qpts |=
1087                                 1 << IB_QPT_RAW_PACKET;
1088                         if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1089                             MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1090                                 resp.packet_pacing_caps.cap_flags |=
1091                                         MLX5_IB_PP_SUPPORT_BURST;
1092                 }
1093                 resp.response_length += sizeof(resp.packet_pacing_caps);
1094         }
1095
1096         if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1097                         uhw->outlen)) {
1098                 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1099                         resp.mlx5_ib_support_multi_pkt_send_wqes =
1100                                 MLX5_IB_ALLOW_MPW;
1101
1102                 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1103                         resp.mlx5_ib_support_multi_pkt_send_wqes |=
1104                                 MLX5_IB_SUPPORT_EMPW;
1105
1106                 resp.response_length +=
1107                         sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1108         }
1109
1110         if (field_avail(typeof(resp), flags, uhw->outlen)) {
1111                 resp.response_length += sizeof(resp.flags);
1112
1113                 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1114                         resp.flags |=
1115                                 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1116
1117                 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1118                         resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1119                 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1120                         resp.flags |=
1121                                 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
1122         }
1123
1124         if (field_avail(typeof(resp), sw_parsing_caps,
1125                         uhw->outlen)) {
1126                 resp.response_length += sizeof(resp.sw_parsing_caps);
1127                 if (MLX5_CAP_ETH(mdev, swp)) {
1128                         resp.sw_parsing_caps.sw_parsing_offloads |=
1129                                 MLX5_IB_SW_PARSING;
1130
1131                         if (MLX5_CAP_ETH(mdev, swp_csum))
1132                                 resp.sw_parsing_caps.sw_parsing_offloads |=
1133                                         MLX5_IB_SW_PARSING_CSUM;
1134
1135                         if (MLX5_CAP_ETH(mdev, swp_lso))
1136                                 resp.sw_parsing_caps.sw_parsing_offloads |=
1137                                         MLX5_IB_SW_PARSING_LSO;
1138
1139                         if (resp.sw_parsing_caps.sw_parsing_offloads)
1140                                 resp.sw_parsing_caps.supported_qpts =
1141                                         BIT(IB_QPT_RAW_PACKET);
1142                 }
1143         }
1144
1145         if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1146             raw_support) {
1147                 resp.response_length += sizeof(resp.striding_rq_caps);
1148                 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1149                         resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1150                                 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1151                         resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1152                                 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1153                         resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
1154                                 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1155                         resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1156                                 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1157                         resp.striding_rq_caps.supported_qpts =
1158                                 BIT(IB_QPT_RAW_PACKET);
1159                 }
1160         }
1161
1162         if (field_avail(typeof(resp), tunnel_offloads_caps,
1163                         uhw->outlen)) {
1164                 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1165                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1166                         resp.tunnel_offloads_caps |=
1167                                 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1168                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1169                         resp.tunnel_offloads_caps |=
1170                                 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1171                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1172                         resp.tunnel_offloads_caps |=
1173                                 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1174                 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1175                     MLX5_FLEX_PROTO_CW_MPLS_GRE)
1176                         resp.tunnel_offloads_caps |=
1177                                 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1178                 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1179                     MLX5_FLEX_PROTO_CW_MPLS_UDP)
1180                         resp.tunnel_offloads_caps |=
1181                                 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1182         }
1183
1184         if (uhw->outlen) {
1185                 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1186
1187                 if (err)
1188                         return err;
1189         }
1190
1191         return 0;
1192 }
1193
1194 enum mlx5_ib_width {
1195         MLX5_IB_WIDTH_1X        = 1 << 0,
1196         MLX5_IB_WIDTH_2X        = 1 << 1,
1197         MLX5_IB_WIDTH_4X        = 1 << 2,
1198         MLX5_IB_WIDTH_8X        = 1 << 3,
1199         MLX5_IB_WIDTH_12X       = 1 << 4
1200 };
1201
1202 static void translate_active_width(struct ib_device *ibdev, u8 active_width,
1203                                   u8 *ib_width)
1204 {
1205         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1206
1207         if (active_width & MLX5_IB_WIDTH_1X)
1208                 *ib_width = IB_WIDTH_1X;
1209         else if (active_width & MLX5_IB_WIDTH_2X)
1210                 *ib_width = IB_WIDTH_2X;
1211         else if (active_width & MLX5_IB_WIDTH_4X)
1212                 *ib_width = IB_WIDTH_4X;
1213         else if (active_width & MLX5_IB_WIDTH_8X)
1214                 *ib_width = IB_WIDTH_8X;
1215         else if (active_width & MLX5_IB_WIDTH_12X)
1216                 *ib_width = IB_WIDTH_12X;
1217         else {
1218                 mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
1219                             (int)active_width);
1220                 *ib_width = IB_WIDTH_4X;
1221         }
1222
1223         return;
1224 }
1225
1226 static int mlx5_mtu_to_ib_mtu(int mtu)
1227 {
1228         switch (mtu) {
1229         case 256: return 1;
1230         case 512: return 2;
1231         case 1024: return 3;
1232         case 2048: return 4;
1233         case 4096: return 5;
1234         default:
1235                 pr_warn("invalid mtu\n");
1236                 return -1;
1237         }
1238 }
1239
1240 enum ib_max_vl_num {
1241         __IB_MAX_VL_0           = 1,
1242         __IB_MAX_VL_0_1         = 2,
1243         __IB_MAX_VL_0_3         = 3,
1244         __IB_MAX_VL_0_7         = 4,
1245         __IB_MAX_VL_0_14        = 5,
1246 };
1247
1248 enum mlx5_vl_hw_cap {
1249         MLX5_VL_HW_0    = 1,
1250         MLX5_VL_HW_0_1  = 2,
1251         MLX5_VL_HW_0_2  = 3,
1252         MLX5_VL_HW_0_3  = 4,
1253         MLX5_VL_HW_0_4  = 5,
1254         MLX5_VL_HW_0_5  = 6,
1255         MLX5_VL_HW_0_6  = 7,
1256         MLX5_VL_HW_0_7  = 8,
1257         MLX5_VL_HW_0_14 = 15
1258 };
1259
1260 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1261                                 u8 *max_vl_num)
1262 {
1263         switch (vl_hw_cap) {
1264         case MLX5_VL_HW_0:
1265                 *max_vl_num = __IB_MAX_VL_0;
1266                 break;
1267         case MLX5_VL_HW_0_1:
1268                 *max_vl_num = __IB_MAX_VL_0_1;
1269                 break;
1270         case MLX5_VL_HW_0_3:
1271                 *max_vl_num = __IB_MAX_VL_0_3;
1272                 break;
1273         case MLX5_VL_HW_0_7:
1274                 *max_vl_num = __IB_MAX_VL_0_7;
1275                 break;
1276         case MLX5_VL_HW_0_14:
1277                 *max_vl_num = __IB_MAX_VL_0_14;
1278                 break;
1279
1280         default:
1281                 return -EINVAL;
1282         }
1283
1284         return 0;
1285 }
1286
1287 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1288                                struct ib_port_attr *props)
1289 {
1290         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1291         struct mlx5_core_dev *mdev = dev->mdev;
1292         struct mlx5_hca_vport_context *rep;
1293         u16 max_mtu;
1294         u16 oper_mtu;
1295         int err;
1296         u8 ib_link_width_oper;
1297         u8 vl_hw_cap;
1298
1299         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1300         if (!rep) {
1301                 err = -ENOMEM;
1302                 goto out;
1303         }
1304
1305         /* props being zeroed by the caller, avoid zeroing it here */
1306
1307         err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1308         if (err)
1309                 goto out;
1310
1311         props->lid              = rep->lid;
1312         props->lmc              = rep->lmc;
1313         props->sm_lid           = rep->sm_lid;
1314         props->sm_sl            = rep->sm_sl;
1315         props->state            = rep->vport_state;
1316         props->phys_state       = rep->port_physical_state;
1317         props->port_cap_flags   = rep->cap_mask1;
1318         props->gid_tbl_len      = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1319         props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1320         props->pkey_tbl_len     = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1321         props->bad_pkey_cntr    = rep->pkey_violation_counter;
1322         props->qkey_viol_cntr   = rep->qkey_violation_counter;
1323         props->subnet_timeout   = rep->subnet_timeout;
1324         props->init_type_reply  = rep->init_type_reply;
1325
1326         if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
1327                 props->port_cap_flags2 = rep->cap_mask2;
1328
1329         err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1330         if (err)
1331                 goto out;
1332
1333         translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
1334
1335         err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1336         if (err)
1337                 goto out;
1338
1339         mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1340
1341         props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1342
1343         mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1344
1345         props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1346
1347         err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1348         if (err)
1349                 goto out;
1350
1351         err = translate_max_vl_num(ibdev, vl_hw_cap,
1352                                    &props->max_vl_num);
1353 out:
1354         kfree(rep);
1355         return err;
1356 }
1357
1358 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1359                        struct ib_port_attr *props)
1360 {
1361         unsigned int count;
1362         int ret;
1363
1364         switch (mlx5_get_vport_access_method(ibdev)) {
1365         case MLX5_VPORT_ACCESS_METHOD_MAD:
1366                 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1367                 break;
1368
1369         case MLX5_VPORT_ACCESS_METHOD_HCA:
1370                 ret = mlx5_query_hca_port(ibdev, port, props);
1371                 break;
1372
1373         case MLX5_VPORT_ACCESS_METHOD_NIC:
1374                 ret = mlx5_query_port_roce(ibdev, port, props);
1375                 break;
1376
1377         default:
1378                 ret = -EINVAL;
1379         }
1380
1381         if (!ret && props) {
1382                 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1383                 struct mlx5_core_dev *mdev;
1384                 bool put_mdev = true;
1385
1386                 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1387                 if (!mdev) {
1388                         /* If the port isn't affiliated yet query the master.
1389                          * The master and slave will have the same values.
1390                          */
1391                         mdev = dev->mdev;
1392                         port = 1;
1393                         put_mdev = false;
1394                 }
1395                 count = mlx5_core_reserved_gids_count(mdev);
1396                 if (put_mdev)
1397                         mlx5_ib_put_native_port_mdev(dev, port);
1398                 props->gid_tbl_len -= count;
1399         }
1400         return ret;
1401 }
1402
1403 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1404                                   struct ib_port_attr *props)
1405 {
1406         int ret;
1407
1408         /* Only link layer == ethernet is valid for representors */
1409         ret = mlx5_query_port_roce(ibdev, port, props);
1410         if (ret || !props)
1411                 return ret;
1412
1413         /* We don't support GIDS */
1414         props->gid_tbl_len = 0;
1415
1416         return ret;
1417 }
1418
1419 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1420                              union ib_gid *gid)
1421 {
1422         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1423         struct mlx5_core_dev *mdev = dev->mdev;
1424
1425         switch (mlx5_get_vport_access_method(ibdev)) {
1426         case MLX5_VPORT_ACCESS_METHOD_MAD:
1427                 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1428
1429         case MLX5_VPORT_ACCESS_METHOD_HCA:
1430                 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1431
1432         default:
1433                 return -EINVAL;
1434         }
1435
1436 }
1437
1438 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1439                                    u16 index, u16 *pkey)
1440 {
1441         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1442         struct mlx5_core_dev *mdev;
1443         bool put_mdev = true;
1444         u8 mdev_port_num;
1445         int err;
1446
1447         mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1448         if (!mdev) {
1449                 /* The port isn't affiliated yet, get the PKey from the master
1450                  * port. For RoCE the PKey tables will be the same.
1451                  */
1452                 put_mdev = false;
1453                 mdev = dev->mdev;
1454                 mdev_port_num = 1;
1455         }
1456
1457         err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1458                                         index, pkey);
1459         if (put_mdev)
1460                 mlx5_ib_put_native_port_mdev(dev, port);
1461
1462         return err;
1463 }
1464
1465 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1466                               u16 *pkey)
1467 {
1468         switch (mlx5_get_vport_access_method(ibdev)) {
1469         case MLX5_VPORT_ACCESS_METHOD_MAD:
1470                 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1471
1472         case MLX5_VPORT_ACCESS_METHOD_HCA:
1473         case MLX5_VPORT_ACCESS_METHOD_NIC:
1474                 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1475         default:
1476                 return -EINVAL;
1477         }
1478 }
1479
1480 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1481                                  struct ib_device_modify *props)
1482 {
1483         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1484         struct mlx5_reg_node_desc in;
1485         struct mlx5_reg_node_desc out;
1486         int err;
1487
1488         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1489                 return -EOPNOTSUPP;
1490
1491         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1492                 return 0;
1493
1494         /*
1495          * If possible, pass node desc to FW, so it can generate
1496          * a 144 trap.  If cmd fails, just ignore.
1497          */
1498         memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1499         err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1500                                    sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1501         if (err)
1502                 return err;
1503
1504         memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1505
1506         return err;
1507 }
1508
1509 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1510                                 u32 value)
1511 {
1512         struct mlx5_hca_vport_context ctx = {};
1513         struct mlx5_core_dev *mdev;
1514         u8 mdev_port_num;
1515         int err;
1516
1517         mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1518         if (!mdev)
1519                 return -ENODEV;
1520
1521         err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1522         if (err)
1523                 goto out;
1524
1525         if (~ctx.cap_mask1_perm & mask) {
1526                 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1527                              mask, ctx.cap_mask1_perm);
1528                 err = -EINVAL;
1529                 goto out;
1530         }
1531
1532         ctx.cap_mask1 = value;
1533         ctx.cap_mask1_perm = mask;
1534         err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1535                                                  0, &ctx);
1536
1537 out:
1538         mlx5_ib_put_native_port_mdev(dev, port_num);
1539
1540         return err;
1541 }
1542
1543 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1544                                struct ib_port_modify *props)
1545 {
1546         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1547         struct ib_port_attr attr;
1548         u32 tmp;
1549         int err;
1550         u32 change_mask;
1551         u32 value;
1552         bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1553                       IB_LINK_LAYER_INFINIBAND);
1554
1555         /* CM layer calls ib_modify_port() regardless of the link layer. For
1556          * Ethernet ports, qkey violation and Port capabilities are meaningless.
1557          */
1558         if (!is_ib)
1559                 return 0;
1560
1561         if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1562                 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1563                 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1564                 return set_port_caps_atomic(dev, port, change_mask, value);
1565         }
1566
1567         mutex_lock(&dev->cap_mask_mutex);
1568
1569         err = ib_query_port(ibdev, port, &attr);
1570         if (err)
1571                 goto out;
1572
1573         tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1574                 ~props->clr_port_cap_mask;
1575
1576         err = mlx5_set_port_caps(dev->mdev, port, tmp);
1577
1578 out:
1579         mutex_unlock(&dev->cap_mask_mutex);
1580         return err;
1581 }
1582
1583 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1584 {
1585         mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1586                     caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1587 }
1588
1589 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1590 {
1591         /* Large page with non 4k uar support might limit the dynamic size */
1592         if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1593                 return MLX5_MIN_DYN_BFREGS;
1594
1595         return MLX5_MAX_DYN_BFREGS;
1596 }
1597
1598 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1599                              struct mlx5_ib_alloc_ucontext_req_v2 *req,
1600                              struct mlx5_bfreg_info *bfregi)
1601 {
1602         int uars_per_sys_page;
1603         int bfregs_per_sys_page;
1604         int ref_bfregs = req->total_num_bfregs;
1605
1606         if (req->total_num_bfregs == 0)
1607                 return -EINVAL;
1608
1609         BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1610         BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1611
1612         if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1613                 return -ENOMEM;
1614
1615         uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1616         bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1617         /* This holds the required static allocation asked by the user */
1618         req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1619         if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1620                 return -EINVAL;
1621
1622         bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1623         bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1624         bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1625         bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1626
1627         mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1628                     MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1629                     lib_uar_4k ? "yes" : "no", ref_bfregs,
1630                     req->total_num_bfregs, bfregi->total_num_bfregs,
1631                     bfregi->num_sys_pages);
1632
1633         return 0;
1634 }
1635
1636 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1637 {
1638         struct mlx5_bfreg_info *bfregi;
1639         int err;
1640         int i;
1641
1642         bfregi = &context->bfregi;
1643         for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1644                 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1645                 if (err)
1646                         goto error;
1647
1648                 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1649         }
1650
1651         for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1652                 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1653
1654         return 0;
1655
1656 error:
1657         for (--i; i >= 0; i--)
1658                 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1659                         mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1660
1661         return err;
1662 }
1663
1664 static void deallocate_uars(struct mlx5_ib_dev *dev,
1665                             struct mlx5_ib_ucontext *context)
1666 {
1667         struct mlx5_bfreg_info *bfregi;
1668         int i;
1669
1670         bfregi = &context->bfregi;
1671         for (i = 0; i < bfregi->num_sys_pages; i++)
1672                 if (i < bfregi->num_static_sys_pages ||
1673                     bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
1674                         mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1675 }
1676
1677 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1678 {
1679         int err = 0;
1680
1681         mutex_lock(&dev->lb.mutex);
1682         if (td)
1683                 dev->lb.user_td++;
1684         if (qp)
1685                 dev->lb.qps++;
1686
1687         if (dev->lb.user_td == 2 ||
1688             dev->lb.qps == 1) {
1689                 if (!dev->lb.enabled) {
1690                         err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1691                         dev->lb.enabled = true;
1692                 }
1693         }
1694
1695         mutex_unlock(&dev->lb.mutex);
1696
1697         return err;
1698 }
1699
1700 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
1701 {
1702         mutex_lock(&dev->lb.mutex);
1703         if (td)
1704                 dev->lb.user_td--;
1705         if (qp)
1706                 dev->lb.qps--;
1707
1708         if (dev->lb.user_td == 1 &&
1709             dev->lb.qps == 0) {
1710                 if (dev->lb.enabled) {
1711                         mlx5_nic_vport_update_local_lb(dev->mdev, false);
1712                         dev->lb.enabled = false;
1713                 }
1714         }
1715
1716         mutex_unlock(&dev->lb.mutex);
1717 }
1718
1719 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
1720                                           u16 uid)
1721 {
1722         int err;
1723
1724         if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1725                 return 0;
1726
1727         err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1728         if (err)
1729                 return err;
1730
1731         if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1732             (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1733              !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1734                 return err;
1735
1736         return mlx5_ib_enable_lb(dev, true, false);
1737 }
1738
1739 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
1740                                              u16 uid)
1741 {
1742         if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1743                 return;
1744
1745         mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1746
1747         if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1748             (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1749              !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1750                 return;
1751
1752         mlx5_ib_disable_lb(dev, true, false);
1753 }
1754
1755 static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
1756                                   struct ib_udata *udata)
1757 {
1758         struct ib_device *ibdev = uctx->device;
1759         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1760         struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1761         struct mlx5_ib_alloc_ucontext_resp resp = {};
1762         struct mlx5_core_dev *mdev = dev->mdev;
1763         struct mlx5_ib_ucontext *context = to_mucontext(uctx);
1764         struct mlx5_bfreg_info *bfregi;
1765         int ver;
1766         int err;
1767         size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1768                                      max_cqe_version);
1769         u32 dump_fill_mkey;
1770         bool lib_uar_4k;
1771
1772         if (!dev->ib_active)
1773                 return -EAGAIN;
1774
1775         if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1776                 ver = 0;
1777         else if (udata->inlen >= min_req_v2)
1778                 ver = 2;
1779         else
1780                 return -EINVAL;
1781
1782         err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1783         if (err)
1784                 return err;
1785
1786         if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
1787                 return -EOPNOTSUPP;
1788
1789         if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1790                 return -EOPNOTSUPP;
1791
1792         req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1793                                     MLX5_NON_FP_BFREGS_PER_UAR);
1794         if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1795                 return -EINVAL;
1796
1797         resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1798         if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1799                 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1800         resp.cache_line_size = cache_line_size();
1801         resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1802         resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1803         resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1804         resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1805         resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1806         resp.cqe_version = min_t(__u8,
1807                                  (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1808                                  req.max_cqe_version);
1809         resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1810                                 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1811         resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1812                                         MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1813         resp.response_length = min(offsetof(typeof(resp), response_length) +
1814                                    sizeof(resp.response_length), udata->outlen);
1815
1816         if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1817                 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1818                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1819                 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1820                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1821                 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1822                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1823                 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1824                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1825                 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1826         }
1827
1828         lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1829         bfregi = &context->bfregi;
1830
1831         /* updates req->total_num_bfregs */
1832         err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1833         if (err)
1834                 goto out_ctx;
1835
1836         mutex_init(&bfregi->lock);
1837         bfregi->lib_uar_4k = lib_uar_4k;
1838         bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1839                                 GFP_KERNEL);
1840         if (!bfregi->count) {
1841                 err = -ENOMEM;
1842                 goto out_ctx;
1843         }
1844
1845         bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1846                                     sizeof(*bfregi->sys_pages),
1847                                     GFP_KERNEL);
1848         if (!bfregi->sys_pages) {
1849                 err = -ENOMEM;
1850                 goto out_count;
1851         }
1852
1853         err = allocate_uars(dev, context);
1854         if (err)
1855                 goto out_sys_pages;
1856
1857         if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)
1858                 context->ibucontext.invalidate_range =
1859                         &mlx5_ib_invalidate_range;
1860
1861         if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
1862                 err = mlx5_ib_devx_create(dev, true);
1863                 if (err < 0)
1864                         goto out_uars;
1865                 context->devx_uid = err;
1866         }
1867
1868         err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
1869                                              context->devx_uid);
1870         if (err)
1871                 goto out_devx;
1872
1873         if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1874                 err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
1875                 if (err)
1876                         goto out_mdev;
1877         }
1878
1879         INIT_LIST_HEAD(&context->db_page_list);
1880         mutex_init(&context->db_page_mutex);
1881
1882         resp.tot_bfregs = req.total_num_bfregs;
1883         resp.num_ports = dev->num_ports;
1884
1885         if (field_avail(typeof(resp), cqe_version, udata->outlen))
1886                 resp.response_length += sizeof(resp.cqe_version);
1887
1888         if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1889                 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1890                                       MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1891                 resp.response_length += sizeof(resp.cmds_supp_uhw);
1892         }
1893
1894         if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1895                 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1896                         mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1897                         resp.eth_min_inline++;
1898                 }
1899                 resp.response_length += sizeof(resp.eth_min_inline);
1900         }
1901
1902         if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1903                 if (mdev->clock_info)
1904                         resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1905                 resp.response_length += sizeof(resp.clock_info_versions);
1906         }
1907
1908         /*
1909          * We don't want to expose information from the PCI bar that is located
1910          * after 4096 bytes, so if the arch only supports larger pages, let's
1911          * pretend we don't support reading the HCA's core clock. This is also
1912          * forced by mmap function.
1913          */
1914         if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1915                 if (PAGE_SIZE <= 4096) {
1916                         resp.comp_mask |=
1917                                 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1918                         resp.hca_core_clock_offset =
1919                                 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1920                 }
1921                 resp.response_length += sizeof(resp.hca_core_clock_offset);
1922         }
1923
1924         if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1925                 resp.response_length += sizeof(resp.log_uar_size);
1926
1927         if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1928                 resp.response_length += sizeof(resp.num_uars_per_page);
1929
1930         if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1931                 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1932                 resp.response_length += sizeof(resp.num_dyn_bfregs);
1933         }
1934
1935         if (field_avail(typeof(resp), dump_fill_mkey, udata->outlen)) {
1936                 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1937                         resp.dump_fill_mkey = dump_fill_mkey;
1938                         resp.comp_mask |=
1939                                 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
1940                 }
1941                 resp.response_length += sizeof(resp.dump_fill_mkey);
1942         }
1943
1944         err = ib_copy_to_udata(udata, &resp, resp.response_length);
1945         if (err)
1946                 goto out_mdev;
1947
1948         bfregi->ver = ver;
1949         bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1950         context->cqe_version = resp.cqe_version;
1951         context->lib_caps = req.lib_caps;
1952         print_lib_caps(dev, context->lib_caps);
1953
1954         if (dev->lag_active) {
1955                 u8 port = mlx5_core_native_port_num(dev->mdev);
1956
1957                 atomic_set(&context->tx_port_affinity,
1958                            atomic_add_return(
1959                                    1, &dev->roce[port].tx_port_affinity));
1960         }
1961
1962         return 0;
1963
1964 out_mdev:
1965         mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1966 out_devx:
1967         if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
1968                 mlx5_ib_devx_destroy(dev, context->devx_uid);
1969
1970 out_uars:
1971         deallocate_uars(dev, context);
1972
1973 out_sys_pages:
1974         kfree(bfregi->sys_pages);
1975
1976 out_count:
1977         kfree(bfregi->count);
1978
1979 out_ctx:
1980         return err;
1981 }
1982
1983 static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1984 {
1985         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1986         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1987         struct mlx5_bfreg_info *bfregi;
1988
1989         /* All umem's must be destroyed before destroying the ucontext. */
1990         mutex_lock(&ibcontext->per_mm_list_lock);
1991         WARN_ON(!list_empty(&ibcontext->per_mm_list));
1992         mutex_unlock(&ibcontext->per_mm_list_lock);
1993
1994         bfregi = &context->bfregi;
1995         mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
1996
1997         if (context->devx_uid)
1998                 mlx5_ib_devx_destroy(dev, context->devx_uid);
1999
2000         deallocate_uars(dev, context);
2001         kfree(bfregi->sys_pages);
2002         kfree(bfregi->count);
2003 }
2004
2005 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
2006                                  int uar_idx)
2007 {
2008         int fw_uars_per_page;
2009
2010         fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2011
2012         return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2013 }
2014
2015 static int get_command(unsigned long offset)
2016 {
2017         return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
2018 }
2019
2020 static int get_arg(unsigned long offset)
2021 {
2022         return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
2023 }
2024
2025 static int get_index(unsigned long offset)
2026 {
2027         return get_arg(offset);
2028 }
2029
2030 /* Index resides in an extra byte to enable larger values than 255 */
2031 static int get_extended_index(unsigned long offset)
2032 {
2033         return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
2034 }
2035
2036
2037 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
2038 {
2039 }
2040
2041 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
2042 {
2043         switch (cmd) {
2044         case MLX5_IB_MMAP_WC_PAGE:
2045                 return "WC";
2046         case MLX5_IB_MMAP_REGULAR_PAGE:
2047                 return "best effort WC";
2048         case MLX5_IB_MMAP_NC_PAGE:
2049                 return "NC";
2050         case MLX5_IB_MMAP_DEVICE_MEM:
2051                 return "Device Memory";
2052         default:
2053                 return NULL;
2054         }
2055 }
2056
2057 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2058                                         struct vm_area_struct *vma,
2059                                         struct mlx5_ib_ucontext *context)
2060 {
2061         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2062                 return -EINVAL;
2063
2064         if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2065                 return -EOPNOTSUPP;
2066
2067         if (vma->vm_flags & VM_WRITE)
2068                 return -EPERM;
2069
2070         if (!dev->mdev->clock_info_page)
2071                 return -EOPNOTSUPP;
2072
2073         return rdma_user_mmap_page(&context->ibucontext, vma,
2074                                    dev->mdev->clock_info_page, PAGE_SIZE);
2075 }
2076
2077 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2078                     struct vm_area_struct *vma,
2079                     struct mlx5_ib_ucontext *context)
2080 {
2081         struct mlx5_bfreg_info *bfregi = &context->bfregi;
2082         int err;
2083         unsigned long idx;
2084         phys_addr_t pfn;
2085         pgprot_t prot;
2086         u32 bfreg_dyn_idx = 0;
2087         u32 uar_index;
2088         int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2089         int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2090                                 bfregi->num_static_sys_pages;
2091
2092         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2093                 return -EINVAL;
2094
2095         if (dyn_uar)
2096                 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2097         else
2098                 idx = get_index(vma->vm_pgoff);
2099
2100         if (idx >= max_valid_idx) {
2101                 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2102                              idx, max_valid_idx);
2103                 return -EINVAL;
2104         }
2105
2106         switch (cmd) {
2107         case MLX5_IB_MMAP_WC_PAGE:
2108         case MLX5_IB_MMAP_ALLOC_WC:
2109 /* Some architectures don't support WC memory */
2110 #if defined(CONFIG_X86)
2111                 if (!pat_enabled())
2112                         return -EPERM;
2113 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2114                         return -EPERM;
2115 #endif
2116         /* fall through */
2117         case MLX5_IB_MMAP_REGULAR_PAGE:
2118                 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2119                 prot = pgprot_writecombine(vma->vm_page_prot);
2120                 break;
2121         case MLX5_IB_MMAP_NC_PAGE:
2122                 prot = pgprot_noncached(vma->vm_page_prot);
2123                 break;
2124         default:
2125                 return -EINVAL;
2126         }
2127
2128         if (dyn_uar) {
2129                 int uars_per_page;
2130
2131                 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2132                 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2133                 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2134                         mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2135                                      bfreg_dyn_idx, bfregi->total_num_bfregs);
2136                         return -EINVAL;
2137                 }
2138
2139                 mutex_lock(&bfregi->lock);
2140                 /* Fail if uar already allocated, first bfreg index of each
2141                  * page holds its count.
2142                  */
2143                 if (bfregi->count[bfreg_dyn_idx]) {
2144                         mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2145                         mutex_unlock(&bfregi->lock);
2146                         return -EINVAL;
2147                 }
2148
2149                 bfregi->count[bfreg_dyn_idx]++;
2150                 mutex_unlock(&bfregi->lock);
2151
2152                 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2153                 if (err) {
2154                         mlx5_ib_warn(dev, "UAR alloc failed\n");
2155                         goto free_bfreg;
2156                 }
2157         } else {
2158                 uar_index = bfregi->sys_pages[idx];
2159         }
2160
2161         pfn = uar_index2pfn(dev, uar_index);
2162         mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2163
2164         err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
2165                                 prot);
2166         if (err) {
2167                 mlx5_ib_err(dev,
2168                             "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
2169                             err, mmap_cmd2str(cmd));
2170                 goto err;
2171         }
2172
2173         if (dyn_uar)
2174                 bfregi->sys_pages[idx] = uar_index;
2175         return 0;
2176
2177 err:
2178         if (!dyn_uar)
2179                 return err;
2180
2181         mlx5_cmd_free_uar(dev->mdev, idx);
2182
2183 free_bfreg:
2184         mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2185
2186         return err;
2187 }
2188
2189 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2190 {
2191         struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2192         struct mlx5_ib_dev *dev = to_mdev(context->device);
2193         u16 page_idx = get_extended_index(vma->vm_pgoff);
2194         size_t map_size = vma->vm_end - vma->vm_start;
2195         u32 npages = map_size >> PAGE_SHIFT;
2196         phys_addr_t pfn;
2197
2198         if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2199             page_idx + npages)
2200                 return -EINVAL;
2201
2202         pfn = ((dev->mdev->bar_addr +
2203               MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2204               PAGE_SHIFT) +
2205               page_idx;
2206         return rdma_user_mmap_io(context, vma, pfn, map_size,
2207                                  pgprot_writecombine(vma->vm_page_prot));
2208 }
2209
2210 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2211 {
2212         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2213         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2214         unsigned long command;
2215         phys_addr_t pfn;
2216
2217         command = get_command(vma->vm_pgoff);
2218         switch (command) {
2219         case MLX5_IB_MMAP_WC_PAGE:
2220         case MLX5_IB_MMAP_NC_PAGE:
2221         case MLX5_IB_MMAP_REGULAR_PAGE:
2222         case MLX5_IB_MMAP_ALLOC_WC:
2223                 return uar_mmap(dev, command, vma, context);
2224
2225         case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2226                 return -ENOSYS;
2227
2228         case MLX5_IB_MMAP_CORE_CLOCK:
2229                 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2230                         return -EINVAL;
2231
2232                 if (vma->vm_flags & VM_WRITE)
2233                         return -EPERM;
2234
2235                 /* Don't expose to user-space information it shouldn't have */
2236                 if (PAGE_SIZE > 4096)
2237                         return -EOPNOTSUPP;
2238
2239                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2240                 pfn = (dev->mdev->iseg_base +
2241                        offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2242                         PAGE_SHIFT;
2243                 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
2244                                        PAGE_SIZE, vma->vm_page_prot))
2245                         return -EAGAIN;
2246                 break;
2247         case MLX5_IB_MMAP_CLOCK_INFO:
2248                 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2249
2250         case MLX5_IB_MMAP_DEVICE_MEM:
2251                 return dm_mmap(ibcontext, vma);
2252
2253         default:
2254                 return -EINVAL;
2255         }
2256
2257         return 0;
2258 }
2259
2260 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2261                                struct ib_ucontext *context,
2262                                struct ib_dm_alloc_attr *attr,
2263                                struct uverbs_attr_bundle *attrs)
2264 {
2265         u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2266         struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
2267         phys_addr_t memic_addr;
2268         struct mlx5_ib_dm *dm;
2269         u64 start_offset;
2270         u32 page_idx;
2271         int err;
2272
2273         dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2274         if (!dm)
2275                 return ERR_PTR(-ENOMEM);
2276
2277         mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
2278                     attr->length, act_size, attr->alignment);
2279
2280         err = mlx5_cmd_alloc_memic(memic, &memic_addr,
2281                                    act_size, attr->alignment);
2282         if (err)
2283                 goto err_free;
2284
2285         start_offset = memic_addr & ~PAGE_MASK;
2286         page_idx = (memic_addr - memic->dev->bar_addr -
2287                     MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2288                     PAGE_SHIFT;
2289
2290         err = uverbs_copy_to(attrs,
2291                              MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2292                              &start_offset, sizeof(start_offset));
2293         if (err)
2294                 goto err_dealloc;
2295
2296         err = uverbs_copy_to(attrs,
2297                              MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2298                              &page_idx, sizeof(page_idx));
2299         if (err)
2300                 goto err_dealloc;
2301
2302         bitmap_set(to_mucontext(context)->dm_pages, page_idx,
2303                    DIV_ROUND_UP(act_size, PAGE_SIZE));
2304
2305         dm->dev_addr = memic_addr;
2306
2307         return &dm->ibdm;
2308
2309 err_dealloc:
2310         mlx5_cmd_dealloc_memic(memic, memic_addr,
2311                                act_size);
2312 err_free:
2313         kfree(dm);
2314         return ERR_PTR(err);
2315 }
2316
2317 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
2318 {
2319         struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
2320         struct mlx5_ib_dm *dm = to_mdm(ibdm);
2321         u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
2322         u32 page_idx;
2323         int ret;
2324
2325         ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
2326         if (ret)
2327                 return ret;
2328
2329         page_idx = (dm->dev_addr - memic->dev->bar_addr -
2330                     MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2331                     PAGE_SHIFT;
2332         bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
2333                      page_idx,
2334                      DIV_ROUND_UP(act_size, PAGE_SIZE));
2335
2336         kfree(dm);
2337
2338         return 0;
2339 }
2340
2341 static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
2342                             struct ib_udata *udata)
2343 {
2344         struct mlx5_ib_pd *pd = to_mpd(ibpd);
2345         struct ib_device *ibdev = ibpd->device;
2346         struct mlx5_ib_alloc_pd_resp resp;
2347         int err;
2348         u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
2349         u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {};
2350         u16 uid = 0;
2351
2352         uid = context ? to_mucontext(context)->devx_uid : 0;
2353         MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
2354         MLX5_SET(alloc_pd_in, in, uid, uid);
2355         err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
2356                             out, sizeof(out));
2357         if (err)
2358                 return err;
2359
2360         pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
2361         pd->uid = uid;
2362         if (context) {
2363                 resp.pdn = pd->pdn;
2364                 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2365                         mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2366                         return -EFAULT;
2367                 }
2368         }
2369
2370         return 0;
2371 }
2372
2373 static void mlx5_ib_dealloc_pd(struct ib_pd *pd)
2374 {
2375         struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2376         struct mlx5_ib_pd *mpd = to_mpd(pd);
2377
2378         mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2379 }
2380
2381 enum {
2382         MATCH_CRITERIA_ENABLE_OUTER_BIT,
2383         MATCH_CRITERIA_ENABLE_MISC_BIT,
2384         MATCH_CRITERIA_ENABLE_INNER_BIT,
2385         MATCH_CRITERIA_ENABLE_MISC2_BIT
2386 };
2387
2388 #define HEADER_IS_ZERO(match_criteria, headers)                            \
2389         !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2390                     0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
2391
2392 static u8 get_match_criteria_enable(u32 *match_criteria)
2393 {
2394         u8 match_criteria_enable;
2395
2396         match_criteria_enable =
2397                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2398                 MATCH_CRITERIA_ENABLE_OUTER_BIT;
2399         match_criteria_enable |=
2400                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2401                 MATCH_CRITERIA_ENABLE_MISC_BIT;
2402         match_criteria_enable |=
2403                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2404                 MATCH_CRITERIA_ENABLE_INNER_BIT;
2405         match_criteria_enable |=
2406                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2407                 MATCH_CRITERIA_ENABLE_MISC2_BIT;
2408
2409         return match_criteria_enable;
2410 }
2411
2412 static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2413 {
2414         u8 entry_mask;
2415         u8 entry_val;
2416         int err = 0;
2417
2418         if (!mask)
2419                 goto out;
2420
2421         entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
2422                               ip_protocol);
2423         entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
2424                              ip_protocol);
2425         if (!entry_mask) {
2426                 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2427                 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2428                 goto out;
2429         }
2430         /* Don't override existing ip protocol */
2431         if (mask != entry_mask || val != entry_val)
2432                 err = -EINVAL;
2433 out:
2434         return err;
2435 }
2436
2437 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2438                            bool inner)
2439 {
2440         if (inner) {
2441                 MLX5_SET(fte_match_set_misc,
2442                          misc_c, inner_ipv6_flow_label, mask);
2443                 MLX5_SET(fte_match_set_misc,
2444                          misc_v, inner_ipv6_flow_label, val);
2445         } else {
2446                 MLX5_SET(fte_match_set_misc,
2447                          misc_c, outer_ipv6_flow_label, mask);
2448                 MLX5_SET(fte_match_set_misc,
2449                          misc_v, outer_ipv6_flow_label, val);
2450         }
2451 }
2452
2453 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2454 {
2455         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2456         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2457         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2458         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2459 }
2460
2461 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2462 {
2463         if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2464             !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2465                 return -EOPNOTSUPP;
2466
2467         if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2468             !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2469                 return -EOPNOTSUPP;
2470
2471         if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2472             !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2473                 return -EOPNOTSUPP;
2474
2475         if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2476             !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2477                 return -EOPNOTSUPP;
2478
2479         return 0;
2480 }
2481
2482 #define LAST_ETH_FIELD vlan_tag
2483 #define LAST_IB_FIELD sl
2484 #define LAST_IPV4_FIELD tos
2485 #define LAST_IPV6_FIELD traffic_class
2486 #define LAST_TCP_UDP_FIELD src_port
2487 #define LAST_TUNNEL_FIELD tunnel_id
2488 #define LAST_FLOW_TAG_FIELD tag_id
2489 #define LAST_DROP_FIELD size
2490 #define LAST_COUNTERS_FIELD counters
2491
2492 /* Field is the last supported field */
2493 #define FIELDS_NOT_SUPPORTED(filter, field)\
2494         memchr_inv((void *)&filter.field  +\
2495                    sizeof(filter.field), 0,\
2496                    sizeof(filter) -\
2497                    offsetof(typeof(filter), field) -\
2498                    sizeof(filter.field))
2499
2500 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
2501                            bool is_egress,
2502                            struct mlx5_flow_act *action)
2503 {
2504
2505         switch (maction->ib_action.type) {
2506         case IB_FLOW_ACTION_ESP:
2507                 if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2508                                       MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
2509                         return -EINVAL;
2510                 /* Currently only AES_GCM keymat is supported by the driver */
2511                 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2512                 action->action |= is_egress ?
2513                         MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2514                         MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2515                 return 0;
2516         case IB_FLOW_ACTION_UNSPECIFIED:
2517                 if (maction->flow_action_raw.sub_type ==
2518                     MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
2519                         if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2520                                 return -EINVAL;
2521                         action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2522                         action->modify_id = maction->flow_action_raw.action_id;
2523                         return 0;
2524                 }
2525                 if (maction->flow_action_raw.sub_type ==
2526                     MLX5_IB_FLOW_ACTION_DECAP) {
2527                         if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2528                                 return -EINVAL;
2529                         action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2530                         return 0;
2531                 }
2532                 if (maction->flow_action_raw.sub_type ==
2533                     MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
2534                         if (action->action &
2535                             MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
2536                                 return -EINVAL;
2537                         action->action |=
2538                                 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
2539                         action->reformat_id =
2540                                 maction->flow_action_raw.action_id;
2541                         return 0;
2542                 }
2543                 /* fall through */
2544         default:
2545                 return -EOPNOTSUPP;
2546         }
2547 }
2548
2549 static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2550                            u32 *match_v, const union ib_flow_spec *ib_spec,
2551                            const struct ib_flow_attr *flow_attr,
2552                            struct mlx5_flow_act *action, u32 prev_type)
2553 {
2554         void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2555                                            misc_parameters);
2556         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2557                                            misc_parameters);
2558         void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2559                                             misc_parameters_2);
2560         void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2561                                             misc_parameters_2);
2562         void *headers_c;
2563         void *headers_v;
2564         int match_ipv;
2565         int ret;
2566
2567         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2568                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2569                                          inner_headers);
2570                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2571                                          inner_headers);
2572                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2573                                         ft_field_support.inner_ip_version);
2574         } else {
2575                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2576                                          outer_headers);
2577                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2578                                          outer_headers);
2579                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2580                                         ft_field_support.outer_ip_version);
2581         }
2582
2583         switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2584         case IB_FLOW_SPEC_ETH:
2585                 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
2586                         return -EOPNOTSUPP;
2587
2588                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2589                                              dmac_47_16),
2590                                 ib_spec->eth.mask.dst_mac);
2591                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2592                                              dmac_47_16),
2593                                 ib_spec->eth.val.dst_mac);
2594
2595                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2596                                              smac_47_16),
2597                                 ib_spec->eth.mask.src_mac);
2598                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2599                                              smac_47_16),
2600                                 ib_spec->eth.val.src_mac);
2601
2602                 if (ib_spec->eth.mask.vlan_tag) {
2603                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2604                                  cvlan_tag, 1);
2605                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2606                                  cvlan_tag, 1);
2607
2608                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2609                                  first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2610                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2611                                  first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2612
2613                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2614                                  first_cfi,
2615                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2616                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2617                                  first_cfi,
2618                                  ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2619
2620                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2621                                  first_prio,
2622                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2623                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2624                                  first_prio,
2625                                  ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2626                 }
2627                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2628                          ethertype, ntohs(ib_spec->eth.mask.ether_type));
2629                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2630                          ethertype, ntohs(ib_spec->eth.val.ether_type));
2631                 break;
2632         case IB_FLOW_SPEC_IPV4:
2633                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2634                         return -EOPNOTSUPP;
2635
2636                 if (match_ipv) {
2637                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2638                                  ip_version, 0xf);
2639                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2640                                  ip_version, MLX5_FS_IPV4_VERSION);
2641                 } else {
2642                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2643                                  ethertype, 0xffff);
2644                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2645                                  ethertype, ETH_P_IP);
2646                 }
2647
2648                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2649                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2650                        &ib_spec->ipv4.mask.src_ip,
2651                        sizeof(ib_spec->ipv4.mask.src_ip));
2652                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2653                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2654                        &ib_spec->ipv4.val.src_ip,
2655                        sizeof(ib_spec->ipv4.val.src_ip));
2656                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2657                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2658                        &ib_spec->ipv4.mask.dst_ip,
2659                        sizeof(ib_spec->ipv4.mask.dst_ip));
2660                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2661                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2662                        &ib_spec->ipv4.val.dst_ip,
2663                        sizeof(ib_spec->ipv4.val.dst_ip));
2664
2665                 set_tos(headers_c, headers_v,
2666                         ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2667
2668                 if (set_proto(headers_c, headers_v,
2669                               ib_spec->ipv4.mask.proto,
2670                               ib_spec->ipv4.val.proto))
2671                         return -EINVAL;
2672                 break;
2673         case IB_FLOW_SPEC_IPV6:
2674                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2675                         return -EOPNOTSUPP;
2676
2677                 if (match_ipv) {
2678                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2679                                  ip_version, 0xf);
2680                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2681                                  ip_version, MLX5_FS_IPV6_VERSION);
2682                 } else {
2683                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2684                                  ethertype, 0xffff);
2685                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2686                                  ethertype, ETH_P_IPV6);
2687                 }
2688
2689                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2690                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2691                        &ib_spec->ipv6.mask.src_ip,
2692                        sizeof(ib_spec->ipv6.mask.src_ip));
2693                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2694                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2695                        &ib_spec->ipv6.val.src_ip,
2696                        sizeof(ib_spec->ipv6.val.src_ip));
2697                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2698                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2699                        &ib_spec->ipv6.mask.dst_ip,
2700                        sizeof(ib_spec->ipv6.mask.dst_ip));
2701                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2702                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2703                        &ib_spec->ipv6.val.dst_ip,
2704                        sizeof(ib_spec->ipv6.val.dst_ip));
2705
2706                 set_tos(headers_c, headers_v,
2707                         ib_spec->ipv6.mask.traffic_class,
2708                         ib_spec->ipv6.val.traffic_class);
2709
2710                 if (set_proto(headers_c, headers_v,
2711                               ib_spec->ipv6.mask.next_hdr,
2712                               ib_spec->ipv6.val.next_hdr))
2713                         return -EINVAL;
2714
2715                 set_flow_label(misc_params_c, misc_params_v,
2716                                ntohl(ib_spec->ipv6.mask.flow_label),
2717                                ntohl(ib_spec->ipv6.val.flow_label),
2718                                ib_spec->type & IB_FLOW_SPEC_INNER);
2719                 break;
2720         case IB_FLOW_SPEC_ESP:
2721                 if (ib_spec->esp.mask.seq)
2722                         return -EOPNOTSUPP;
2723
2724                 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2725                          ntohl(ib_spec->esp.mask.spi));
2726                 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2727                          ntohl(ib_spec->esp.val.spi));
2728                 break;
2729         case IB_FLOW_SPEC_TCP:
2730                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2731                                          LAST_TCP_UDP_FIELD))
2732                         return -EOPNOTSUPP;
2733
2734                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
2735                         return -EINVAL;
2736
2737                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2738                          ntohs(ib_spec->tcp_udp.mask.src_port));
2739                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2740                          ntohs(ib_spec->tcp_udp.val.src_port));
2741
2742                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2743                          ntohs(ib_spec->tcp_udp.mask.dst_port));
2744                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2745                          ntohs(ib_spec->tcp_udp.val.dst_port));
2746                 break;
2747         case IB_FLOW_SPEC_UDP:
2748                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2749                                          LAST_TCP_UDP_FIELD))
2750                         return -EOPNOTSUPP;
2751
2752                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
2753                         return -EINVAL;
2754
2755                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2756                          ntohs(ib_spec->tcp_udp.mask.src_port));
2757                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2758                          ntohs(ib_spec->tcp_udp.val.src_port));
2759
2760                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2761                          ntohs(ib_spec->tcp_udp.mask.dst_port));
2762                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2763                          ntohs(ib_spec->tcp_udp.val.dst_port));
2764                 break;
2765         case IB_FLOW_SPEC_GRE:
2766                 if (ib_spec->gre.mask.c_ks_res0_ver)
2767                         return -EOPNOTSUPP;
2768
2769                 if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
2770                         return -EINVAL;
2771
2772                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2773                          0xff);
2774                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2775                          IPPROTO_GRE);
2776
2777                 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
2778                          ntohs(ib_spec->gre.mask.protocol));
2779                 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2780                          ntohs(ib_spec->gre.val.protocol));
2781
2782                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2783                                     gre_key.nvgre.hi),
2784                        &ib_spec->gre.mask.key,
2785                        sizeof(ib_spec->gre.mask.key));
2786                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2787                                     gre_key.nvgre.hi),
2788                        &ib_spec->gre.val.key,
2789                        sizeof(ib_spec->gre.val.key));
2790                 break;
2791         case IB_FLOW_SPEC_MPLS:
2792                 switch (prev_type) {
2793                 case IB_FLOW_SPEC_UDP:
2794                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2795                                                    ft_field_support.outer_first_mpls_over_udp),
2796                                                    &ib_spec->mpls.mask.tag))
2797                                 return -EOPNOTSUPP;
2798
2799                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2800                                             outer_first_mpls_over_udp),
2801                                &ib_spec->mpls.val.tag,
2802                                sizeof(ib_spec->mpls.val.tag));
2803                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2804                                             outer_first_mpls_over_udp),
2805                                &ib_spec->mpls.mask.tag,
2806                                sizeof(ib_spec->mpls.mask.tag));
2807                         break;
2808                 case IB_FLOW_SPEC_GRE:
2809                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2810                                                    ft_field_support.outer_first_mpls_over_gre),
2811                                                    &ib_spec->mpls.mask.tag))
2812                                 return -EOPNOTSUPP;
2813
2814                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2815                                             outer_first_mpls_over_gre),
2816                                &ib_spec->mpls.val.tag,
2817                                sizeof(ib_spec->mpls.val.tag));
2818                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2819                                             outer_first_mpls_over_gre),
2820                                &ib_spec->mpls.mask.tag,
2821                                sizeof(ib_spec->mpls.mask.tag));
2822                         break;
2823                 default:
2824                         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2825                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2826                                                            ft_field_support.inner_first_mpls),
2827                                                            &ib_spec->mpls.mask.tag))
2828                                         return -EOPNOTSUPP;
2829
2830                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2831                                                     inner_first_mpls),
2832                                        &ib_spec->mpls.val.tag,
2833                                        sizeof(ib_spec->mpls.val.tag));
2834                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2835                                                     inner_first_mpls),
2836                                        &ib_spec->mpls.mask.tag,
2837                                        sizeof(ib_spec->mpls.mask.tag));
2838                         } else {
2839                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2840                                                            ft_field_support.outer_first_mpls),
2841                                                            &ib_spec->mpls.mask.tag))
2842                                         return -EOPNOTSUPP;
2843
2844                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2845                                                     outer_first_mpls),
2846                                        &ib_spec->mpls.val.tag,
2847                                        sizeof(ib_spec->mpls.val.tag));
2848                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2849                                                     outer_first_mpls),
2850                                        &ib_spec->mpls.mask.tag,
2851                                        sizeof(ib_spec->mpls.mask.tag));
2852                         }
2853                 }
2854                 break;
2855         case IB_FLOW_SPEC_VXLAN_TUNNEL:
2856                 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2857                                          LAST_TUNNEL_FIELD))
2858                         return -EOPNOTSUPP;
2859
2860                 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2861                          ntohl(ib_spec->tunnel.mask.tunnel_id));
2862                 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2863                          ntohl(ib_spec->tunnel.val.tunnel_id));
2864                 break;
2865         case IB_FLOW_SPEC_ACTION_TAG:
2866                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2867                                          LAST_FLOW_TAG_FIELD))
2868                         return -EOPNOTSUPP;
2869                 if (ib_spec->flow_tag.tag_id >= BIT(24))
2870                         return -EINVAL;
2871
2872                 action->flow_tag = ib_spec->flow_tag.tag_id;
2873                 action->flags |= FLOW_ACT_HAS_TAG;
2874                 break;
2875         case IB_FLOW_SPEC_ACTION_DROP:
2876                 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2877                                          LAST_DROP_FIELD))
2878                         return -EOPNOTSUPP;
2879                 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2880                 break;
2881         case IB_FLOW_SPEC_ACTION_HANDLE:
2882                 ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
2883                         flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
2884                 if (ret)
2885                         return ret;
2886                 break;
2887         case IB_FLOW_SPEC_ACTION_COUNT:
2888                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2889                                          LAST_COUNTERS_FIELD))
2890                         return -EOPNOTSUPP;
2891
2892                 /* for now support only one counters spec per flow */
2893                 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
2894                         return -EINVAL;
2895
2896                 action->counters = ib_spec->flow_count.counters;
2897                 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2898                 break;
2899         default:
2900                 return -EINVAL;
2901         }
2902
2903         return 0;
2904 }
2905
2906 /* If a flow could catch both multicast and unicast packets,
2907  * it won't fall into the multicast flow steering table and this rule
2908  * could steal other multicast packets.
2909  */
2910 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
2911 {
2912         union ib_flow_spec *flow_spec;
2913
2914         if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
2915             ib_attr->num_of_specs < 1)
2916                 return false;
2917
2918         flow_spec = (union ib_flow_spec *)(ib_attr + 1);
2919         if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
2920                 struct ib_flow_spec_ipv4 *ipv4_spec;
2921
2922                 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
2923                 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
2924                         return true;
2925
2926                 return false;
2927         }
2928
2929         if (flow_spec->type == IB_FLOW_SPEC_ETH) {
2930                 struct ib_flow_spec_eth *eth_spec;
2931
2932                 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2933                 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2934                        is_multicast_ether_addr(eth_spec->val.dst_mac);
2935         }
2936
2937         return false;
2938 }
2939
2940 enum valid_spec {
2941         VALID_SPEC_INVALID,
2942         VALID_SPEC_VALID,
2943         VALID_SPEC_NA,
2944 };
2945
2946 static enum valid_spec
2947 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
2948                      const struct mlx5_flow_spec *spec,
2949                      const struct mlx5_flow_act *flow_act,
2950                      bool egress)
2951 {
2952         const u32 *match_c = spec->match_criteria;
2953         bool is_crypto =
2954                 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2955                                      MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
2956         bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2957         bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2958
2959         /*
2960          * Currently only crypto is supported in egress, when regular egress
2961          * rules would be supported, always return VALID_SPEC_NA.
2962          */
2963         if (!is_crypto)
2964                 return VALID_SPEC_NA;
2965
2966         return is_crypto && is_ipsec &&
2967                 (!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
2968                 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2969 }
2970
2971 static bool is_valid_spec(struct mlx5_core_dev *mdev,
2972                           const struct mlx5_flow_spec *spec,
2973                           const struct mlx5_flow_act *flow_act,
2974                           bool egress)
2975 {
2976         /* We curretly only support ipsec egress flow */
2977         return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
2978 }
2979
2980 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2981                                const struct ib_flow_attr *flow_attr,
2982                                bool check_inner)
2983 {
2984         union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2985         int match_ipv = check_inner ?
2986                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2987                                         ft_field_support.inner_ip_version) :
2988                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2989                                         ft_field_support.outer_ip_version);
2990         int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
2991         bool ipv4_spec_valid, ipv6_spec_valid;
2992         unsigned int ip_spec_type = 0;
2993         bool has_ethertype = false;
2994         unsigned int spec_index;
2995         bool mask_valid = true;
2996         u16 eth_type = 0;
2997         bool type_valid;
2998
2999         /* Validate that ethertype is correct */
3000         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3001                 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
3002                     ib_spec->eth.mask.ether_type) {
3003                         mask_valid = (ib_spec->eth.mask.ether_type ==
3004                                       htons(0xffff));
3005                         has_ethertype = true;
3006                         eth_type = ntohs(ib_spec->eth.val.ether_type);
3007                 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
3008                            (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
3009                         ip_spec_type = ib_spec->type;
3010                 }
3011                 ib_spec = (void *)ib_spec + ib_spec->size;
3012         }
3013
3014         type_valid = (!has_ethertype) || (!ip_spec_type);
3015         if (!type_valid && mask_valid) {
3016                 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
3017                         (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
3018                 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
3019                         (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
3020
3021                 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
3022                              (((eth_type == ETH_P_MPLS_UC) ||
3023                                (eth_type == ETH_P_MPLS_MC)) && match_ipv);
3024         }
3025
3026         return type_valid;
3027 }
3028
3029 static bool is_valid_attr(struct mlx5_core_dev *mdev,
3030                           const struct ib_flow_attr *flow_attr)
3031 {
3032         return is_valid_ethertype(mdev, flow_attr, false) &&
3033                is_valid_ethertype(mdev, flow_attr, true);
3034 }
3035
3036 static void put_flow_table(struct mlx5_ib_dev *dev,
3037                            struct mlx5_ib_flow_prio *prio, bool ft_added)
3038 {
3039         prio->refcount -= !!ft_added;
3040         if (!prio->refcount) {
3041                 mlx5_destroy_flow_table(prio->flow_table);
3042                 prio->flow_table = NULL;
3043         }
3044 }
3045
3046 static void counters_clear_description(struct ib_counters *counters)
3047 {
3048         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3049
3050         mutex_lock(&mcounters->mcntrs_mutex);
3051         kfree(mcounters->counters_data);
3052         mcounters->counters_data = NULL;
3053         mcounters->cntrs_max_index = 0;
3054         mutex_unlock(&mcounters->mcntrs_mutex);
3055 }
3056
3057 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
3058 {
3059         struct mlx5_ib_flow_handler *handler = container_of(flow_id,
3060                                                           struct mlx5_ib_flow_handler,
3061                                                           ibflow);
3062         struct mlx5_ib_flow_handler *iter, *tmp;
3063         struct mlx5_ib_dev *dev = handler->dev;
3064
3065         mutex_lock(&dev->flow_db->lock);
3066
3067         list_for_each_entry_safe(iter, tmp, &handler->list, list) {
3068                 mlx5_del_flow_rules(iter->rule);
3069                 put_flow_table(dev, iter->prio, true);
3070                 list_del(&iter->list);
3071                 kfree(iter);
3072         }
3073
3074         mlx5_del_flow_rules(handler->rule);
3075         put_flow_table(dev, handler->prio, true);
3076         if (handler->ibcounters &&
3077             atomic_read(&handler->ibcounters->usecnt) == 1)
3078                 counters_clear_description(handler->ibcounters);
3079
3080         mutex_unlock(&dev->flow_db->lock);
3081         if (handler->flow_matcher)
3082                 atomic_dec(&handler->flow_matcher->usecnt);
3083         kfree(handler);
3084
3085         return 0;
3086 }
3087
3088 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3089 {
3090         priority *= 2;
3091         if (!dont_trap)
3092                 priority++;
3093         return priority;
3094 }
3095
3096 enum flow_table_type {
3097         MLX5_IB_FT_RX,
3098         MLX5_IB_FT_TX
3099 };
3100
3101 #define MLX5_FS_MAX_TYPES        6
3102 #define MLX5_FS_MAX_ENTRIES      BIT(16)
3103
3104 static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
3105                                            struct mlx5_ib_flow_prio *prio,
3106                                            int priority,
3107                                            int num_entries, int num_groups,
3108                                            u32 flags)
3109 {
3110         struct mlx5_flow_table *ft;
3111
3112         ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3113                                                  num_entries,
3114                                                  num_groups,
3115                                                  0, flags);
3116         if (IS_ERR(ft))
3117                 return ERR_CAST(ft);
3118
3119         prio->flow_table = ft;
3120         prio->refcount = 0;
3121         return prio;
3122 }
3123
3124 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3125                                                 struct ib_flow_attr *flow_attr,
3126                                                 enum flow_table_type ft_type)
3127 {
3128         bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3129         struct mlx5_flow_namespace *ns = NULL;
3130         struct mlx5_ib_flow_prio *prio;
3131         struct mlx5_flow_table *ft;
3132         int max_table_size;
3133         int num_entries;
3134         int num_groups;
3135         u32 flags = 0;
3136         int priority;
3137
3138         max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3139                                                        log_max_ft_size));
3140         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3141                 enum mlx5_flow_namespace_type fn_type;
3142
3143                 if (flow_is_multicast_only(flow_attr) &&
3144                     !dont_trap)
3145                         priority = MLX5_IB_FLOW_MCAST_PRIO;
3146                 else
3147                         priority = ib_prio_to_core_prio(flow_attr->priority,
3148                                                         dont_trap);
3149                 if (ft_type == MLX5_IB_FT_RX) {
3150                         fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
3151                         prio = &dev->flow_db->prios[priority];
3152                         if (!dev->rep &&
3153                             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3154                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3155                         if (!dev->rep &&
3156                             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3157                                         reformat_l3_tunnel_to_l2))
3158                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3159                 } else {
3160                         max_table_size =
3161                                 BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3162                                                               log_max_ft_size));
3163                         fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
3164                         prio = &dev->flow_db->egress_prios[priority];
3165                         if (!dev->rep &&
3166                             MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3167                                 flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3168                 }
3169                 ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
3170                 num_entries = MLX5_FS_MAX_ENTRIES;
3171                 num_groups = MLX5_FS_MAX_TYPES;
3172         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3173                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3174                 ns = mlx5_get_flow_namespace(dev->mdev,
3175                                              MLX5_FLOW_NAMESPACE_LEFTOVERS);
3176                 build_leftovers_ft_param(&priority,
3177                                          &num_entries,
3178                                          &num_groups);
3179                 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
3180         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3181                 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3182                                         allow_sniffer_and_nic_rx_shared_tir))
3183                         return ERR_PTR(-ENOTSUPP);
3184
3185                 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3186                                              MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3187                                              MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3188
3189                 prio = &dev->flow_db->sniffer[ft_type];
3190                 priority = 0;
3191                 num_entries = 1;
3192                 num_groups = 1;
3193         }
3194
3195         if (!ns)
3196                 return ERR_PTR(-ENOTSUPP);
3197
3198         if (num_entries > max_table_size)
3199                 return ERR_PTR(-ENOMEM);
3200
3201         ft = prio->flow_table;
3202         if (!ft)
3203                 return _get_prio(ns, prio, priority, num_entries, num_groups,
3204                                  flags);
3205
3206         return prio;
3207 }
3208
3209 static void set_underlay_qp(struct mlx5_ib_dev *dev,
3210                             struct mlx5_flow_spec *spec,
3211                             u32 underlay_qpn)
3212 {
3213         void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3214                                            spec->match_criteria,
3215                                            misc_parameters);
3216         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3217                                            misc_parameters);
3218
3219         if (underlay_qpn &&
3220             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3221                                       ft_field_support.bth_dst_qp)) {
3222                 MLX5_SET(fte_match_set_misc,
3223                          misc_params_v, bth_dst_qp, underlay_qpn);
3224                 MLX5_SET(fte_match_set_misc,
3225                          misc_params_c, bth_dst_qp, 0xffffff);
3226         }
3227 }
3228
3229 static int read_flow_counters(struct ib_device *ibdev,
3230                               struct mlx5_read_counters_attr *read_attr)
3231 {
3232         struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3233         struct mlx5_ib_dev *dev = to_mdev(ibdev);
3234
3235         return mlx5_fc_query(dev->mdev, fc,
3236                              &read_attr->out[IB_COUNTER_PACKETS],
3237                              &read_attr->out[IB_COUNTER_BYTES]);
3238 }
3239
3240 /* flow counters currently expose two counters packets and bytes */
3241 #define FLOW_COUNTERS_NUM 2
3242 static int counters_set_description(struct ib_counters *counters,
3243                                     enum mlx5_ib_counters_type counters_type,
3244                                     struct mlx5_ib_flow_counters_desc *desc_data,
3245                                     u32 ncounters)
3246 {
3247         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3248         u32 cntrs_max_index = 0;
3249         int i;
3250
3251         if (counters_type != MLX5_IB_COUNTERS_FLOW)
3252                 return -EINVAL;
3253
3254         /* init the fields for the object */
3255         mcounters->type = counters_type;
3256         mcounters->read_counters = read_flow_counters;
3257         mcounters->counters_num = FLOW_COUNTERS_NUM;
3258         mcounters->ncounters = ncounters;
3259         /* each counter entry have both description and index pair */
3260         for (i = 0; i < ncounters; i++) {
3261                 if (desc_data[i].description > IB_COUNTER_BYTES)
3262                         return -EINVAL;
3263
3264                 if (cntrs_max_index <= desc_data[i].index)
3265                         cntrs_max_index = desc_data[i].index + 1;
3266         }
3267
3268         mutex_lock(&mcounters->mcntrs_mutex);
3269         mcounters->counters_data = desc_data;
3270         mcounters->cntrs_max_index = cntrs_max_index;
3271         mutex_unlock(&mcounters->mcntrs_mutex);
3272
3273         return 0;
3274 }
3275
3276 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3277 static int flow_counters_set_data(struct ib_counters *ibcounters,
3278                                   struct mlx5_ib_create_flow *ucmd)
3279 {
3280         struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3281         struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3282         struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3283         bool hw_hndl = false;
3284         int ret = 0;
3285
3286         if (ucmd && ucmd->ncounters_data != 0) {
3287                 cntrs_data = ucmd->data;
3288                 if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3289                         return -EINVAL;
3290
3291                 desc_data = kcalloc(cntrs_data->ncounters,
3292                                     sizeof(*desc_data),
3293                                     GFP_KERNEL);
3294                 if (!desc_data)
3295                         return  -ENOMEM;
3296
3297                 if (copy_from_user(desc_data,
3298                                    u64_to_user_ptr(cntrs_data->counters_data),
3299                                    sizeof(*desc_data) * cntrs_data->ncounters)) {
3300                         ret = -EFAULT;
3301                         goto free;
3302                 }
3303         }
3304
3305         if (!mcounters->hw_cntrs_hndl) {
3306                 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3307                         to_mdev(ibcounters->device)->mdev, false);
3308                 if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3309                         ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3310                         goto free;
3311                 }
3312                 hw_hndl = true;
3313         }
3314
3315         if (desc_data) {
3316                 /* counters already bound to at least one flow */
3317                 if (mcounters->cntrs_max_index) {
3318                         ret = -EINVAL;
3319                         goto free_hndl;
3320                 }
3321
3322                 ret = counters_set_description(ibcounters,
3323                                                MLX5_IB_COUNTERS_FLOW,
3324                                                desc_data,
3325                                                cntrs_data->ncounters);
3326                 if (ret)
3327                         goto free_hndl;
3328
3329         } else if (!mcounters->cntrs_max_index) {
3330                 /* counters not bound yet, must have udata passed */
3331                 ret = -EINVAL;
3332                 goto free_hndl;
3333         }
3334
3335         return 0;
3336
3337 free_hndl:
3338         if (hw_hndl) {
3339                 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3340                                 mcounters->hw_cntrs_hndl);
3341                 mcounters->hw_cntrs_hndl = NULL;
3342         }
3343 free:
3344         kfree(desc_data);
3345         return ret;
3346 }
3347
3348 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3349                                                       struct mlx5_ib_flow_prio *ft_prio,
3350                                                       const struct ib_flow_attr *flow_attr,
3351                                                       struct mlx5_flow_destination *dst,
3352                                                       u32 underlay_qpn,
3353                                                       struct mlx5_ib_create_flow *ucmd)
3354 {
3355         struct mlx5_flow_table  *ft = ft_prio->flow_table;
3356         struct mlx5_ib_flow_handler *handler;
3357         struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
3358         struct mlx5_flow_spec *spec;
3359         struct mlx5_flow_destination dest_arr[2] = {};
3360         struct mlx5_flow_destination *rule_dst = dest_arr;
3361         const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3362         unsigned int spec_index;
3363         u32 prev_type = 0;
3364         int err = 0;
3365         int dest_num = 0;
3366         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3367
3368         if (!is_valid_attr(dev->mdev, flow_attr))
3369                 return ERR_PTR(-EINVAL);
3370
3371         if (dev->rep && is_egress)
3372                 return ERR_PTR(-EINVAL);
3373
3374         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3375         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3376         if (!handler || !spec) {
3377                 err = -ENOMEM;
3378                 goto free;
3379         }
3380
3381         INIT_LIST_HEAD(&handler->list);
3382         if (dst) {
3383                 memcpy(&dest_arr[0], dst, sizeof(*dst));
3384                 dest_num++;
3385         }
3386
3387         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3388                 err = parse_flow_attr(dev->mdev, spec->match_criteria,
3389                                       spec->match_value,
3390                                       ib_flow, flow_attr, &flow_act,
3391                                       prev_type);
3392                 if (err < 0)
3393                         goto free;
3394
3395                 prev_type = ((union ib_flow_spec *)ib_flow)->type;
3396                 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3397         }
3398
3399         if (!flow_is_multicast_only(flow_attr))
3400                 set_underlay_qp(dev, spec, underlay_qpn);
3401
3402         if (dev->rep) {
3403                 void *misc;
3404
3405                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3406                                     misc_parameters);
3407                 MLX5_SET(fte_match_set_misc, misc, source_port,
3408                          dev->rep->vport);
3409                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3410                                     misc_parameters);
3411                 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3412         }
3413
3414         spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3415
3416         if (is_egress &&
3417             !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3418                 err = -EINVAL;
3419                 goto free;
3420         }
3421
3422         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3423                 struct mlx5_ib_mcounters *mcounters;
3424
3425                 err = flow_counters_set_data(flow_act.counters, ucmd);
3426                 if (err)
3427                         goto free;
3428
3429                 mcounters = to_mcounters(flow_act.counters);
3430                 handler->ibcounters = flow_act.counters;
3431                 dest_arr[dest_num].type =
3432                         MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3433                 dest_arr[dest_num].counter_id =
3434                         mlx5_fc_id(mcounters->hw_cntrs_hndl);
3435                 dest_num++;
3436         }
3437
3438         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3439                 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3440                         rule_dst = NULL;
3441                         dest_num = 0;
3442                 }
3443         } else {
3444                 if (is_egress)
3445                         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3446                 else
3447                         flow_act.action |=
3448                                 dest_num ?  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3449                                         MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3450         }
3451
3452         if ((flow_act.flags & FLOW_ACT_HAS_TAG)  &&
3453             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3454              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3455                 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3456                              flow_act.flow_tag, flow_attr->type);
3457                 err = -EINVAL;
3458                 goto free;
3459         }
3460         handler->rule = mlx5_add_flow_rules(ft, spec,
3461                                             &flow_act,
3462                                             rule_dst, dest_num);
3463
3464         if (IS_ERR(handler->rule)) {
3465                 err = PTR_ERR(handler->rule);
3466                 goto free;
3467         }
3468
3469         ft_prio->refcount++;
3470         handler->prio = ft_prio;
3471         handler->dev = dev;
3472
3473         ft_prio->flow_table = ft;
3474 free:
3475         if (err && handler) {
3476                 if (handler->ibcounters &&
3477                     atomic_read(&handler->ibcounters->usecnt) == 1)
3478                         counters_clear_description(handler->ibcounters);
3479                 kfree(handler);
3480         }
3481         kvfree(spec);
3482         return err ? ERR_PTR(err) : handler;
3483 }
3484
3485 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3486                                                      struct mlx5_ib_flow_prio *ft_prio,
3487                                                      const struct ib_flow_attr *flow_attr,
3488                                                      struct mlx5_flow_destination *dst)
3489 {
3490         return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3491 }
3492
3493 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3494                                                           struct mlx5_ib_flow_prio *ft_prio,
3495                                                           struct ib_flow_attr *flow_attr,
3496                                                           struct mlx5_flow_destination *dst)
3497 {
3498         struct mlx5_ib_flow_handler *handler_dst = NULL;
3499         struct mlx5_ib_flow_handler *handler = NULL;
3500
3501         handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3502         if (!IS_ERR(handler)) {
3503                 handler_dst = create_flow_rule(dev, ft_prio,
3504                                                flow_attr, dst);
3505                 if (IS_ERR(handler_dst)) {
3506                         mlx5_del_flow_rules(handler->rule);
3507                         ft_prio->refcount--;
3508                         kfree(handler);
3509                         handler = handler_dst;
3510                 } else {
3511                         list_add(&handler_dst->list, &handler->list);
3512                 }
3513         }
3514
3515         return handler;
3516 }
3517 enum {
3518         LEFTOVERS_MC,
3519         LEFTOVERS_UC,
3520 };
3521
3522 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3523                                                           struct mlx5_ib_flow_prio *ft_prio,
3524                                                           struct ib_flow_attr *flow_attr,
3525                                                           struct mlx5_flow_destination *dst)
3526 {
3527         struct mlx5_ib_flow_handler *handler_ucast = NULL;
3528         struct mlx5_ib_flow_handler *handler = NULL;
3529
3530         static struct {
3531                 struct ib_flow_attr     flow_attr;
3532                 struct ib_flow_spec_eth eth_flow;
3533         } leftovers_specs[] = {
3534                 [LEFTOVERS_MC] = {
3535                         .flow_attr = {
3536                                 .num_of_specs = 1,
3537                                 .size = sizeof(leftovers_specs[0])
3538                         },
3539                         .eth_flow = {
3540                                 .type = IB_FLOW_SPEC_ETH,
3541                                 .size = sizeof(struct ib_flow_spec_eth),
3542                                 .mask = {.dst_mac = {0x1} },
3543                                 .val =  {.dst_mac = {0x1} }
3544                         }
3545                 },
3546                 [LEFTOVERS_UC] = {
3547                         .flow_attr = {
3548                                 .num_of_specs = 1,
3549                                 .size = sizeof(leftovers_specs[0])
3550                         },
3551                         .eth_flow = {
3552                                 .type = IB_FLOW_SPEC_ETH,
3553                                 .size = sizeof(struct ib_flow_spec_eth),
3554                                 .mask = {.dst_mac = {0x1} },
3555                                 .val = {.dst_mac = {} }
3556                         }
3557                 }
3558         };
3559
3560         handler = create_flow_rule(dev, ft_prio,
3561                                    &leftovers_specs[LEFTOVERS_MC].flow_attr,
3562                                    dst);
3563         if (!IS_ERR(handler) &&
3564             flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3565                 handler_ucast = create_flow_rule(dev, ft_prio,
3566                                                  &leftovers_specs[LEFTOVERS_UC].flow_attr,
3567                                                  dst);
3568                 if (IS_ERR(handler_ucast)) {
3569                         mlx5_del_flow_rules(handler->rule);
3570                         ft_prio->refcount--;
3571                         kfree(handler);
3572                         handler = handler_ucast;
3573                 } else {
3574                         list_add(&handler_ucast->list, &handler->list);
3575                 }
3576         }
3577
3578         return handler;
3579 }
3580
3581 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3582                                                         struct mlx5_ib_flow_prio *ft_rx,
3583                                                         struct mlx5_ib_flow_prio *ft_tx,
3584                                                         struct mlx5_flow_destination *dst)
3585 {
3586         struct mlx5_ib_flow_handler *handler_rx;
3587         struct mlx5_ib_flow_handler *handler_tx;
3588         int err;
3589         static const struct ib_flow_attr flow_attr  = {
3590                 .num_of_specs = 0,
3591                 .size = sizeof(flow_attr)
3592         };
3593
3594         handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3595         if (IS_ERR(handler_rx)) {
3596                 err = PTR_ERR(handler_rx);
3597                 goto err;
3598         }
3599
3600         handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3601         if (IS_ERR(handler_tx)) {
3602                 err = PTR_ERR(handler_tx);
3603                 goto err_tx;
3604         }
3605
3606         list_add(&handler_tx->list, &handler_rx->list);
3607
3608         return handler_rx;
3609
3610 err_tx:
3611         mlx5_del_flow_rules(handler_rx->rule);
3612         ft_rx->refcount--;
3613         kfree(handler_rx);
3614 err:
3615         return ERR_PTR(err);
3616 }
3617
3618 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3619                                            struct ib_flow_attr *flow_attr,
3620                                            int domain,
3621                                            struct ib_udata *udata)
3622 {
3623         struct mlx5_ib_dev *dev = to_mdev(qp->device);
3624         struct mlx5_ib_qp *mqp = to_mqp(qp);
3625         struct mlx5_ib_flow_handler *handler = NULL;
3626         struct mlx5_flow_destination *dst = NULL;
3627         struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3628         struct mlx5_ib_flow_prio *ft_prio;
3629         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3630         struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3631         size_t min_ucmd_sz, required_ucmd_sz;
3632         int err;
3633         int underlay_qpn;
3634
3635         if (udata && udata->inlen) {
3636                 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3637                                 sizeof(ucmd_hdr.reserved);
3638                 if (udata->inlen < min_ucmd_sz)
3639                         return ERR_PTR(-EOPNOTSUPP);
3640
3641                 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3642                 if (err)
3643                         return ERR_PTR(err);
3644
3645                 /* currently supports only one counters data */
3646                 if (ucmd_hdr.ncounters_data > 1)
3647                         return ERR_PTR(-EINVAL);
3648
3649                 required_ucmd_sz = min_ucmd_sz +
3650                         sizeof(struct mlx5_ib_flow_counters_data) *
3651                         ucmd_hdr.ncounters_data;
3652                 if (udata->inlen > required_ucmd_sz &&
3653                     !ib_is_udata_cleared(udata, required_ucmd_sz,
3654                                          udata->inlen - required_ucmd_sz))
3655                         return ERR_PTR(-EOPNOTSUPP);
3656
3657                 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3658                 if (!ucmd)
3659                         return ERR_PTR(-ENOMEM);
3660
3661                 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3662                 if (err)
3663                         goto free_ucmd;
3664         }
3665
3666         if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3667                 err = -ENOMEM;
3668                 goto free_ucmd;
3669         }
3670
3671         if (domain != IB_FLOW_DOMAIN_USER ||
3672             flow_attr->port > dev->num_ports ||
3673             (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3674                                   IB_FLOW_ATTR_FLAGS_EGRESS))) {
3675                 err = -EINVAL;
3676                 goto free_ucmd;
3677         }
3678
3679         if (is_egress &&
3680             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3681              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3682                 err = -EINVAL;
3683                 goto free_ucmd;
3684         }
3685
3686         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3687         if (!dst) {
3688                 err = -ENOMEM;
3689                 goto free_ucmd;
3690         }
3691
3692         mutex_lock(&dev->flow_db->lock);
3693
3694         ft_prio = get_flow_table(dev, flow_attr,
3695                                  is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3696         if (IS_ERR(ft_prio)) {
3697                 err = PTR_ERR(ft_prio);
3698                 goto unlock;
3699         }
3700         if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3701                 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3702                 if (IS_ERR(ft_prio_tx)) {
3703                         err = PTR_ERR(ft_prio_tx);
3704                         ft_prio_tx = NULL;
3705                         goto destroy_ft;
3706                 }
3707         }
3708
3709         if (is_egress) {
3710                 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3711         } else {
3712                 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3713                 if (mqp->flags & MLX5_IB_QP_RSS)
3714                         dst->tir_num = mqp->rss_qp.tirn;
3715                 else
3716                         dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3717         }
3718
3719         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3720                 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
3721                         handler = create_dont_trap_rule(dev, ft_prio,
3722                                                         flow_attr, dst);
3723                 } else {
3724                         underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3725                                         mqp->underlay_qpn : 0;
3726                         handler = _create_flow_rule(dev, ft_prio, flow_attr,
3727                                                     dst, underlay_qpn, ucmd);
3728                 }
3729         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3730                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3731                 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3732                                                 dst);
3733         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3734                 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
3735         } else {
3736                 err = -EINVAL;
3737                 goto destroy_ft;
3738         }
3739
3740         if (IS_ERR(handler)) {
3741                 err = PTR_ERR(handler);
3742                 handler = NULL;
3743                 goto destroy_ft;
3744         }
3745
3746         mutex_unlock(&dev->flow_db->lock);
3747         kfree(dst);
3748         kfree(ucmd);
3749
3750         return &handler->ibflow;
3751
3752 destroy_ft:
3753         put_flow_table(dev, ft_prio, false);
3754         if (ft_prio_tx)
3755                 put_flow_table(dev, ft_prio_tx, false);
3756 unlock:
3757         mutex_unlock(&dev->flow_db->lock);
3758         kfree(dst);
3759 free_ucmd:
3760         kfree(ucmd);
3761         return ERR_PTR(err);
3762 }
3763
3764 static struct mlx5_ib_flow_prio *
3765 _get_flow_table(struct mlx5_ib_dev *dev,
3766                 struct mlx5_ib_flow_matcher *fs_matcher,
3767                 bool mcast)
3768 {
3769         struct mlx5_flow_namespace *ns = NULL;
3770         struct mlx5_ib_flow_prio *prio;
3771         int max_table_size;
3772         u32 flags = 0;
3773         int priority;
3774
3775         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
3776                 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3777                                         log_max_ft_size));
3778                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
3779                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
3780                 if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3781                                               reformat_l3_tunnel_to_l2))
3782                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3783         } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
3784                 max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
3785                                         log_max_ft_size));
3786                 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
3787                         flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
3788         }
3789
3790         if (max_table_size < MLX5_FS_MAX_ENTRIES)
3791                 return ERR_PTR(-ENOMEM);
3792
3793         if (mcast)
3794                 priority = MLX5_IB_FLOW_MCAST_PRIO;
3795         else
3796                 priority = ib_prio_to_core_prio(fs_matcher->priority, false);
3797
3798         ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
3799         if (!ns)
3800                 return ERR_PTR(-ENOTSUPP);
3801
3802         if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
3803                 prio = &dev->flow_db->prios[priority];
3804         else
3805                 prio = &dev->flow_db->egress_prios[priority];
3806
3807         if (prio->flow_table)
3808                 return prio;
3809
3810         return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
3811                          MLX5_FS_MAX_TYPES, flags);
3812 }
3813
3814 static struct mlx5_ib_flow_handler *
3815 _create_raw_flow_rule(struct mlx5_ib_dev *dev,
3816                       struct mlx5_ib_flow_prio *ft_prio,
3817                       struct mlx5_flow_destination *dst,
3818                       struct mlx5_ib_flow_matcher  *fs_matcher,
3819                       struct mlx5_flow_act *flow_act,
3820                       void *cmd_in, int inlen,
3821                       int dst_num)
3822 {
3823         struct mlx5_ib_flow_handler *handler;
3824         struct mlx5_flow_spec *spec;
3825         struct mlx5_flow_table *ft = ft_prio->flow_table;
3826         int err = 0;
3827
3828         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3829         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3830         if (!handler || !spec) {
3831                 err = -ENOMEM;
3832                 goto free;
3833         }
3834
3835         INIT_LIST_HEAD(&handler->list);
3836
3837         memcpy(spec->match_value, cmd_in, inlen);
3838         memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
3839                fs_matcher->mask_len);
3840         spec->match_criteria_enable = fs_matcher->match_criteria_enable;
3841
3842         handler->rule = mlx5_add_flow_rules(ft, spec,
3843                                             flow_act, dst, dst_num);
3844
3845         if (IS_ERR(handler->rule)) {
3846                 err = PTR_ERR(handler->rule);
3847                 goto free;
3848         }
3849
3850         ft_prio->refcount++;
3851         handler->prio = ft_prio;
3852         handler->dev = dev;
3853         ft_prio->flow_table = ft;
3854
3855 free:
3856         if (err)
3857                 kfree(handler);
3858         kvfree(spec);
3859         return err ? ERR_PTR(err) : handler;
3860 }
3861
3862 static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
3863                                 void *match_v)
3864 {
3865         void *match_c;
3866         void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
3867         void *dmac, *dmac_mask;
3868         void *ipv4, *ipv4_mask;
3869
3870         if (!(fs_matcher->match_criteria_enable &
3871               (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
3872                 return false;
3873
3874         match_c = fs_matcher->matcher_mask.match_params;
3875         match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
3876                                            outer_headers);
3877         match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
3878                                            outer_headers);
3879
3880         dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
3881                             dmac_47_16);
3882         dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
3883                                  dmac_47_16);
3884
3885         if (is_multicast_ether_addr(dmac) &&
3886             is_multicast_ether_addr(dmac_mask))
3887                 return true;
3888
3889         ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
3890                             dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3891
3892         ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
3893                                  dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3894
3895         if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
3896             ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
3897                 return true;
3898
3899         return false;
3900 }
3901
3902 struct mlx5_ib_flow_handler *
3903 mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
3904                         struct mlx5_ib_flow_matcher *fs_matcher,
3905                         struct mlx5_flow_act *flow_act,
3906                         u32 counter_id,
3907                         void *cmd_in, int inlen, int dest_id,
3908                         int dest_type)
3909 {
3910         struct mlx5_flow_destination *dst;
3911         struct mlx5_ib_flow_prio *ft_prio;
3912         struct mlx5_ib_flow_handler *handler;
3913         int dst_num = 0;
3914         bool mcast;
3915         int err;
3916
3917         if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
3918                 return ERR_PTR(-EOPNOTSUPP);
3919
3920         if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
3921                 return ERR_PTR(-ENOMEM);
3922
3923         dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
3924         if (!dst)
3925                 return ERR_PTR(-ENOMEM);
3926
3927         mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
3928         mutex_lock(&dev->flow_db->lock);
3929
3930         ft_prio = _get_flow_table(dev, fs_matcher, mcast);
3931         if (IS_ERR(ft_prio)) {
3932                 err = PTR_ERR(ft_prio);
3933                 goto unlock;
3934         }
3935
3936         if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
3937                 dst[dst_num].type = dest_type;
3938                 dst[dst_num].tir_num = dest_id;
3939                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3940         } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
3941                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
3942                 dst[dst_num].ft_num = dest_id;
3943                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3944         } else {
3945                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3946                 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3947         }
3948
3949         dst_num++;
3950
3951         if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3952                 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3953                 dst[dst_num].counter_id = counter_id;
3954                 dst_num++;
3955         }
3956
3957         handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
3958                                         cmd_in, inlen, dst_num);
3959
3960         if (IS_ERR(handler)) {
3961                 err = PTR_ERR(handler);
3962                 goto destroy_ft;
3963         }
3964
3965         mutex_unlock(&dev->flow_db->lock);
3966         atomic_inc(&fs_matcher->usecnt);
3967         handler->flow_matcher = fs_matcher;
3968
3969         kfree(dst);
3970
3971         return handler;
3972
3973 destroy_ft:
3974         put_flow_table(dev, ft_prio, false);
3975 unlock:
3976         mutex_unlock(&dev->flow_db->lock);
3977         kfree(dst);
3978
3979         return ERR_PTR(err);
3980 }
3981
3982 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
3983 {
3984         u32 flags = 0;
3985
3986         if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
3987                 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
3988
3989         return flags;
3990 }
3991
3992 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED      MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
3993 static struct ib_flow_action *
3994 mlx5_ib_create_flow_action_esp(struct ib_device *device,
3995                                const struct ib_flow_action_attrs_esp *attr,
3996                                struct uverbs_attr_bundle *attrs)
3997 {
3998         struct mlx5_ib_dev *mdev = to_mdev(device);
3999         struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
4000         struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
4001         struct mlx5_ib_flow_action *action;
4002         u64 action_flags;
4003         u64 flags;
4004         int err = 0;
4005
4006         err = uverbs_get_flags64(
4007                 &action_flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
4008                 ((MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1) - 1));
4009         if (err)
4010                 return ERR_PTR(err);
4011
4012         flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
4013
4014         /* We current only support a subset of the standard features. Only a
4015          * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
4016          * (with overlap). Full offload mode isn't supported.
4017          */
4018         if (!attr->keymat || attr->replay || attr->encap ||
4019             attr->spi || attr->seq || attr->tfc_pad ||
4020             attr->hard_limit_pkts ||
4021             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4022                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
4023                 return ERR_PTR(-EOPNOTSUPP);
4024
4025         if (attr->keymat->protocol !=
4026             IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
4027                 return ERR_PTR(-EOPNOTSUPP);
4028
4029         aes_gcm = &attr->keymat->keymat.aes_gcm;
4030
4031         if (aes_gcm->icv_len != 16 ||
4032             aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
4033                 return ERR_PTR(-EOPNOTSUPP);
4034
4035         action = kmalloc(sizeof(*action), GFP_KERNEL);
4036         if (!action)
4037                 return ERR_PTR(-ENOMEM);
4038
4039         action->esp_aes_gcm.ib_flags = attr->flags;
4040         memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
4041                sizeof(accel_attrs.keymat.aes_gcm.aes_key));
4042         accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
4043         memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
4044                sizeof(accel_attrs.keymat.aes_gcm.salt));
4045         memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
4046                sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
4047         accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
4048         accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
4049         accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
4050
4051         accel_attrs.esn = attr->esn;
4052         if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
4053                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
4054         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4055                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4056
4057         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
4058                 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
4059
4060         action->esp_aes_gcm.ctx =
4061                 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
4062         if (IS_ERR(action->esp_aes_gcm.ctx)) {
4063                 err = PTR_ERR(action->esp_aes_gcm.ctx);
4064                 goto err_parse;
4065         }
4066
4067         action->esp_aes_gcm.ib_flags = attr->flags;
4068
4069         return &action->ib_action;
4070
4071 err_parse:
4072         kfree(action);
4073         return ERR_PTR(err);
4074 }
4075
4076 static int
4077 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
4078                                const struct ib_flow_action_attrs_esp *attr,
4079                                struct uverbs_attr_bundle *attrs)
4080 {
4081         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4082         struct mlx5_accel_esp_xfrm_attrs accel_attrs;
4083         int err = 0;
4084
4085         if (attr->keymat || attr->replay || attr->encap ||
4086             attr->spi || attr->seq || attr->tfc_pad ||
4087             attr->hard_limit_pkts ||
4088             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4089                              IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
4090                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
4091                 return -EOPNOTSUPP;
4092
4093         /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
4094          * be modified.
4095          */
4096         if (!(maction->esp_aes_gcm.ib_flags &
4097               IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
4098             attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
4099                            IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
4100                 return -EINVAL;
4101
4102         memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
4103                sizeof(accel_attrs));
4104
4105         accel_attrs.esn = attr->esn;
4106         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
4107                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4108         else
4109                 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
4110
4111         err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
4112                                          &accel_attrs);
4113         if (err)
4114                 return err;
4115
4116         maction->esp_aes_gcm.ib_flags &=
4117                 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4118         maction->esp_aes_gcm.ib_flags |=
4119                 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
4120
4121         return 0;
4122 }
4123
4124 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
4125 {
4126         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
4127
4128         switch (action->type) {
4129         case IB_FLOW_ACTION_ESP:
4130                 /*
4131                  * We only support aes_gcm by now, so we implicitly know this is
4132                  * the underline crypto.
4133                  */
4134                 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
4135                 break;
4136         case IB_FLOW_ACTION_UNSPECIFIED:
4137                 mlx5_ib_destroy_flow_action_raw(maction);
4138                 break;
4139         default:
4140                 WARN_ON(true);
4141                 break;
4142         }
4143
4144         kfree(maction);
4145         return 0;
4146 }
4147
4148 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4149 {
4150         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4151         struct mlx5_ib_qp *mqp = to_mqp(ibqp);
4152         int err;
4153         u16 uid;
4154
4155         uid = ibqp->pd ?
4156                 to_mpd(ibqp->pd)->uid : 0;
4157
4158         if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
4159                 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
4160                 return -EOPNOTSUPP;
4161         }
4162
4163         err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4164         if (err)
4165                 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
4166                              ibqp->qp_num, gid->raw);
4167
4168         return err;
4169 }
4170
4171 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
4172 {
4173         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
4174         int err;
4175         u16 uid;
4176
4177         uid = ibqp->pd ?
4178                 to_mpd(ibqp->pd)->uid : 0;
4179         err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
4180         if (err)
4181                 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
4182                              ibqp->qp_num, gid->raw);
4183
4184         return err;
4185 }
4186
4187 static int init_node_data(struct mlx5_ib_dev *dev)
4188 {
4189         int err;
4190
4191         err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
4192         if (err)
4193                 return err;
4194
4195         dev->mdev->rev_id = dev->mdev->pdev->revision;
4196
4197         return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
4198 }
4199
4200 static ssize_t fw_pages_show(struct device *device,
4201                              struct device_attribute *attr, char *buf)
4202 {
4203         struct mlx5_ib_dev *dev =
4204                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4205
4206         return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
4207 }
4208 static DEVICE_ATTR_RO(fw_pages);
4209
4210 static ssize_t reg_pages_show(struct device *device,
4211                               struct device_attribute *attr, char *buf)
4212 {
4213         struct mlx5_ib_dev *dev =
4214                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4215
4216         return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
4217 }
4218 static DEVICE_ATTR_RO(reg_pages);
4219
4220 static ssize_t hca_type_show(struct device *device,
4221                              struct device_attribute *attr, char *buf)
4222 {
4223         struct mlx5_ib_dev *dev =
4224                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4225
4226         return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
4227 }
4228 static DEVICE_ATTR_RO(hca_type);
4229
4230 static ssize_t hw_rev_show(struct device *device,
4231                            struct device_attribute *attr, char *buf)
4232 {
4233         struct mlx5_ib_dev *dev =
4234                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4235
4236         return sprintf(buf, "%x\n", dev->mdev->rev_id);
4237 }
4238 static DEVICE_ATTR_RO(hw_rev);
4239
4240 static ssize_t board_id_show(struct device *device,
4241                              struct device_attribute *attr, char *buf)
4242 {
4243         struct mlx5_ib_dev *dev =
4244                 rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
4245
4246         return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
4247                        dev->mdev->board_id);
4248 }
4249 static DEVICE_ATTR_RO(board_id);
4250
4251 static struct attribute *mlx5_class_attributes[] = {
4252         &dev_attr_hw_rev.attr,
4253         &dev_attr_hca_type.attr,
4254         &dev_attr_board_id.attr,
4255         &dev_attr_fw_pages.attr,
4256         &dev_attr_reg_pages.attr,
4257         NULL,
4258 };
4259
4260 static const struct attribute_group mlx5_attr_group = {
4261         .attrs = mlx5_class_attributes,
4262 };
4263
4264 static void pkey_change_handler(struct work_struct *work)
4265 {
4266         struct mlx5_ib_port_resources *ports =
4267                 container_of(work, struct mlx5_ib_port_resources,
4268                              pkey_change_work);
4269
4270         mutex_lock(&ports->devr->mutex);
4271         mlx5_ib_gsi_pkey_change(ports->gsi);
4272         mutex_unlock(&ports->devr->mutex);
4273 }
4274
4275 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
4276 {
4277         struct mlx5_ib_qp *mqp;
4278         struct mlx5_ib_cq *send_mcq, *recv_mcq;
4279         struct mlx5_core_cq *mcq;
4280         struct list_head cq_armed_list;
4281         unsigned long flags_qp;
4282         unsigned long flags_cq;
4283         unsigned long flags;
4284
4285         INIT_LIST_HEAD(&cq_armed_list);
4286
4287         /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
4288         spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
4289         list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
4290                 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
4291                 if (mqp->sq.tail != mqp->sq.head) {
4292                         send_mcq = to_mcq(mqp->ibqp.send_cq);
4293                         spin_lock_irqsave(&send_mcq->lock, flags_cq);
4294                         if (send_mcq->mcq.comp &&
4295                             mqp->ibqp.send_cq->comp_handler) {
4296                                 if (!send_mcq->mcq.reset_notify_added) {
4297                                         send_mcq->mcq.reset_notify_added = 1;
4298                                         list_add_tail(&send_mcq->mcq.reset_notify,
4299                                                       &cq_armed_list);
4300                                 }
4301                         }
4302                         spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
4303                 }
4304                 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
4305                 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
4306                 /* no handling is needed for SRQ */
4307                 if (!mqp->ibqp.srq) {
4308                         if (mqp->rq.tail != mqp->rq.head) {
4309                                 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
4310                                 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
4311                                 if (recv_mcq->mcq.comp &&
4312                                     mqp->ibqp.recv_cq->comp_handler) {
4313                                         if (!recv_mcq->mcq.reset_notify_added) {
4314                                                 recv_mcq->mcq.reset_notify_added = 1;
4315                                                 list_add_tail(&recv_mcq->mcq.reset_notify,
4316                                                               &cq_armed_list);
4317                                         }
4318                                 }
4319                                 spin_unlock_irqrestore(&recv_mcq->lock,
4320                                                        flags_cq);
4321                         }
4322                 }
4323                 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
4324         }
4325         /*At that point all inflight post send were put to be executed as of we
4326          * lock/unlock above locks Now need to arm all involved CQs.
4327          */
4328         list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
4329                 mcq->comp(mcq);
4330         }
4331         spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
4332 }
4333
4334 static void delay_drop_handler(struct work_struct *work)
4335 {
4336         int err;
4337         struct mlx5_ib_delay_drop *delay_drop =
4338                 container_of(work, struct mlx5_ib_delay_drop,
4339                              delay_drop_work);
4340
4341         atomic_inc(&delay_drop->events_cnt);
4342
4343         mutex_lock(&delay_drop->lock);
4344         err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
4345                                        delay_drop->timeout);
4346         if (err) {
4347                 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4348                              delay_drop->timeout);
4349                 delay_drop->activate = false;
4350         }
4351         mutex_unlock(&delay_drop->lock);
4352 }
4353
4354 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4355                                  struct ib_event *ibev)
4356 {
4357         switch (eqe->sub_type) {
4358         case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
4359                 schedule_work(&ibdev->delay_drop.delay_drop_work);
4360                 break;
4361         default: /* do nothing */
4362                 return;
4363         }
4364 }
4365
4366 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
4367                               struct ib_event *ibev)
4368 {
4369         u8 port = (eqe->data.port.port >> 4) & 0xf;
4370
4371         ibev->element.port_num = port;
4372
4373         switch (eqe->sub_type) {
4374         case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
4375         case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
4376         case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
4377                 /* In RoCE, port up/down events are handled in
4378                  * mlx5_netdev_event().
4379                  */
4380                 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4381                                             IB_LINK_LAYER_ETHERNET)
4382                         return -EINVAL;
4383
4384                 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
4385                                 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4386                 break;
4387
4388         case MLX5_PORT_CHANGE_SUBTYPE_LID:
4389                 ibev->event = IB_EVENT_LID_CHANGE;
4390                 break;
4391
4392         case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
4393                 ibev->event = IB_EVENT_PKEY_CHANGE;
4394                 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4395                 break;
4396
4397         case MLX5_PORT_CHANGE_SUBTYPE_GUID:
4398                 ibev->event = IB_EVENT_GID_CHANGE;
4399                 break;
4400
4401         case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
4402                 ibev->event = IB_EVENT_CLIENT_REREGISTER;
4403                 break;
4404         default:
4405                 return -EINVAL;
4406         }
4407
4408         return 0;
4409 }
4410
4411 static void mlx5_ib_handle_event(struct work_struct *_work)
4412 {
4413         struct mlx5_ib_event_work *work =
4414                 container_of(_work, struct mlx5_ib_event_work, work);
4415         struct mlx5_ib_dev *ibdev;
4416         struct ib_event ibev;
4417         bool fatal = false;
4418
4419         if (work->is_slave) {
4420                 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
4421                 if (!ibdev)
4422                         goto out;
4423         } else {
4424                 ibdev = work->dev;
4425         }
4426
4427         switch (work->event) {
4428         case MLX5_DEV_EVENT_SYS_ERROR:
4429                 ibev.event = IB_EVENT_DEVICE_FATAL;
4430                 mlx5_ib_handle_internal_error(ibdev);
4431                 ibev.element.port_num  = (u8)(unsigned long)work->param;
4432                 fatal = true;
4433                 break;
4434         case MLX5_EVENT_TYPE_PORT_CHANGE:
4435                 if (handle_port_change(ibdev, work->param, &ibev))
4436                         goto out;
4437                 break;
4438         case MLX5_EVENT_TYPE_GENERAL_EVENT:
4439                 handle_general_event(ibdev, work->param, &ibev);
4440                 /* fall through */
4441         default:
4442                 goto out;
4443         }
4444
4445         ibev.device = &ibdev->ib_dev;
4446
4447         if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
4448                 mlx5_ib_warn(ibdev, "warning: event on port %d\n",  ibev.element.port_num);
4449                 goto out;
4450         }
4451
4452         if (ibdev->ib_active)
4453                 ib_dispatch_event(&ibev);
4454
4455         if (fatal)
4456                 ibdev->ib_active = false;
4457 out:
4458         kfree(work);
4459 }
4460
4461 static int mlx5_ib_event(struct notifier_block *nb,
4462                          unsigned long event, void *param)
4463 {
4464         struct mlx5_ib_event_work *work;
4465
4466         work = kmalloc(sizeof(*work), GFP_ATOMIC);
4467         if (!work)
4468                 return NOTIFY_DONE;
4469
4470         INIT_WORK(&work->work, mlx5_ib_handle_event);
4471         work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
4472         work->is_slave = false;
4473         work->param = param;
4474         work->event = event;
4475
4476         queue_work(mlx5_ib_event_wq, &work->work);
4477
4478         return NOTIFY_OK;
4479 }
4480
4481 static int mlx5_ib_event_slave_port(struct notifier_block *nb,
4482                                     unsigned long event, void *param)
4483 {
4484         struct mlx5_ib_event_work *work;
4485
4486         work = kmalloc(sizeof(*work), GFP_ATOMIC);
4487         if (!work)
4488                 return NOTIFY_DONE;
4489
4490         INIT_WORK(&work->work, mlx5_ib_handle_event);
4491         work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
4492         work->is_slave = true;
4493         work->param = param;
4494         work->event = event;
4495         queue_work(mlx5_ib_event_wq, &work->work);
4496
4497         return NOTIFY_OK;
4498 }
4499
4500 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4501 {
4502         struct mlx5_hca_vport_context vport_ctx;
4503         int err;
4504         int port;
4505
4506         for (port = 1; port <= dev->num_ports; port++) {
4507                 dev->mdev->port_caps[port - 1].has_smi = false;
4508                 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4509                     MLX5_CAP_PORT_TYPE_IB) {
4510                         if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4511                                 err = mlx5_query_hca_vport_context(dev->mdev, 0,
4512                                                                    port, 0,
4513                                                                    &vport_ctx);
4514                                 if (err) {
4515                                         mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4516                                                     port, err);
4517                                         return err;
4518                                 }
4519                                 dev->mdev->port_caps[port - 1].has_smi =
4520                                         vport_ctx.has_smi;
4521                         } else {
4522                                 dev->mdev->port_caps[port - 1].has_smi = true;
4523                         }
4524                 }
4525         }
4526         return 0;
4527 }
4528
4529 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4530 {
4531         int port;
4532
4533         for (port = 1; port <= dev->num_ports; port++)
4534                 mlx5_query_ext_port_caps(dev, port);
4535 }
4536
4537 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4538 {
4539         struct ib_device_attr *dprops = NULL;
4540         struct ib_port_attr *pprops = NULL;
4541         int err = -ENOMEM;
4542         struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4543
4544         pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
4545         if (!pprops)
4546                 goto out;
4547
4548         dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4549         if (!dprops)
4550                 goto out;
4551
4552         err = set_has_smi_cap(dev);
4553         if (err)
4554                 goto out;
4555
4556         err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4557         if (err) {
4558                 mlx5_ib_warn(dev, "query_device failed %d\n", err);
4559                 goto out;
4560         }
4561
4562         memset(pprops, 0, sizeof(*pprops));
4563         err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4564         if (err) {
4565                 mlx5_ib_warn(dev, "query_port %d failed %d\n",
4566                              port, err);
4567                 goto out;
4568         }
4569
4570         dev->mdev->port_caps[port - 1].pkey_table_len =
4571                                         dprops->max_pkeys;
4572         dev->mdev->port_caps[port - 1].gid_table_len =
4573                                         pprops->gid_tbl_len;
4574         mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4575                     port, dprops->max_pkeys, pprops->gid_tbl_len);
4576
4577 out:
4578         kfree(pprops);
4579         kfree(dprops);
4580
4581         return err;
4582 }
4583
4584 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4585 {
4586         int err;
4587
4588         err = mlx5_mr_cache_cleanup(dev);
4589         if (err)
4590                 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4591
4592         if (dev->umrc.qp)
4593                 mlx5_ib_destroy_qp(dev->umrc.qp);
4594         if (dev->umrc.cq)
4595                 ib_free_cq(dev->umrc.cq);
4596         if (dev->umrc.pd)
4597                 ib_dealloc_pd(dev->umrc.pd);
4598 }
4599
4600 enum {
4601         MAX_UMR_WR = 128,
4602 };
4603
4604 static int create_umr_res(struct mlx5_ib_dev *dev)
4605 {
4606         struct ib_qp_init_attr *init_attr = NULL;
4607         struct ib_qp_attr *attr = NULL;
4608         struct ib_pd *pd;
4609         struct ib_cq *cq;
4610         struct ib_qp *qp;
4611         int ret;
4612
4613         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4614         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4615         if (!attr || !init_attr) {
4616                 ret = -ENOMEM;
4617                 goto error_0;
4618         }
4619
4620         pd = ib_alloc_pd(&dev->ib_dev, 0);
4621         if (IS_ERR(pd)) {
4622                 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4623                 ret = PTR_ERR(pd);
4624                 goto error_0;
4625         }
4626
4627         cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4628         if (IS_ERR(cq)) {
4629                 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4630                 ret = PTR_ERR(cq);
4631                 goto error_2;
4632         }
4633
4634         init_attr->send_cq = cq;
4635         init_attr->recv_cq = cq;
4636         init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4637         init_attr->cap.max_send_wr = MAX_UMR_WR;
4638         init_attr->cap.max_send_sge = 1;
4639         init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4640         init_attr->port_num = 1;
4641         qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4642         if (IS_ERR(qp)) {
4643                 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4644                 ret = PTR_ERR(qp);
4645                 goto error_3;
4646         }
4647         qp->device     = &dev->ib_dev;
4648         qp->real_qp    = qp;
4649         qp->uobject    = NULL;
4650         qp->qp_type    = MLX5_IB_QPT_REG_UMR;
4651         qp->send_cq    = init_attr->send_cq;
4652         qp->recv_cq    = init_attr->recv_cq;
4653
4654         attr->qp_state = IB_QPS_INIT;
4655         attr->port_num = 1;
4656         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4657                                 IB_QP_PORT, NULL);
4658         if (ret) {
4659                 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4660                 goto error_4;
4661         }
4662
4663         memset(attr, 0, sizeof(*attr));
4664         attr->qp_state = IB_QPS_RTR;
4665         attr->path_mtu = IB_MTU_256;
4666
4667         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4668         if (ret) {
4669                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4670                 goto error_4;
4671         }
4672
4673         memset(attr, 0, sizeof(*attr));
4674         attr->qp_state = IB_QPS_RTS;
4675         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4676         if (ret) {
4677                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4678                 goto error_4;
4679         }
4680
4681         dev->umrc.qp = qp;
4682         dev->umrc.cq = cq;
4683         dev->umrc.pd = pd;
4684
4685         sema_init(&dev->umrc.sem, MAX_UMR_WR);
4686         ret = mlx5_mr_cache_init(dev);
4687         if (ret) {
4688                 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4689                 goto error_4;
4690         }
4691
4692         kfree(attr);
4693         kfree(init_attr);
4694
4695         return 0;
4696
4697 error_4:
4698         mlx5_ib_destroy_qp(qp);
4699         dev->umrc.qp = NULL;
4700
4701 error_3:
4702         ib_free_cq(cq);
4703         dev->umrc.cq = NULL;
4704
4705 error_2:
4706         ib_dealloc_pd(pd);
4707         dev->umrc.pd = NULL;
4708
4709 error_0:
4710         kfree(attr);
4711         kfree(init_attr);
4712         return ret;
4713 }
4714
4715 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4716 {
4717         switch (umr_fence_cap) {
4718         case MLX5_CAP_UMR_FENCE_NONE:
4719                 return MLX5_FENCE_MODE_NONE;
4720         case MLX5_CAP_UMR_FENCE_SMALL:
4721                 return MLX5_FENCE_MODE_INITIATOR_SMALL;
4722         default:
4723                 return MLX5_FENCE_MODE_STRONG_ORDERING;
4724         }
4725 }
4726
4727 static int create_dev_resources(struct mlx5_ib_resources *devr)
4728 {
4729         struct ib_srq_init_attr attr;
4730         struct mlx5_ib_dev *dev;
4731         struct ib_device *ibdev;
4732         struct ib_cq_init_attr cq_attr = {.cqe = 1};
4733         int port;
4734         int ret = 0;
4735
4736         dev = container_of(devr, struct mlx5_ib_dev, devr);
4737         ibdev = &dev->ib_dev;
4738
4739         mutex_init(&devr->mutex);
4740
4741         devr->p0 = rdma_zalloc_drv_obj(ibdev, ib_pd);
4742         if (!devr->p0)
4743                 return -ENOMEM;
4744
4745         devr->p0->device  = ibdev;
4746         devr->p0->uobject = NULL;
4747         atomic_set(&devr->p0->usecnt, 0);
4748
4749         ret = mlx5_ib_alloc_pd(devr->p0, NULL, NULL);
4750         if (ret)
4751                 goto error0;
4752
4753         devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
4754         if (IS_ERR(devr->c0)) {
4755                 ret = PTR_ERR(devr->c0);
4756                 goto error1;
4757         }
4758         devr->c0->device        = &dev->ib_dev;
4759         devr->c0->uobject       = NULL;
4760         devr->c0->comp_handler  = NULL;
4761         devr->c0->event_handler = NULL;
4762         devr->c0->cq_context    = NULL;
4763         atomic_set(&devr->c0->usecnt, 0);
4764
4765         devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
4766         if (IS_ERR(devr->x0)) {
4767                 ret = PTR_ERR(devr->x0);
4768                 goto error2;
4769         }
4770         devr->x0->device = &dev->ib_dev;
4771         devr->x0->inode = NULL;
4772         atomic_set(&devr->x0->usecnt, 0);
4773         mutex_init(&devr->x0->tgt_qp_mutex);
4774         INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4775
4776         devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
4777         if (IS_ERR(devr->x1)) {
4778                 ret = PTR_ERR(devr->x1);
4779                 goto error3;
4780         }
4781         devr->x1->device = &dev->ib_dev;
4782         devr->x1->inode = NULL;
4783         atomic_set(&devr->x1->usecnt, 0);
4784         mutex_init(&devr->x1->tgt_qp_mutex);
4785         INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4786
4787         memset(&attr, 0, sizeof(attr));
4788         attr.attr.max_sge = 1;
4789         attr.attr.max_wr = 1;
4790         attr.srq_type = IB_SRQT_XRC;
4791         attr.ext.cq = devr->c0;
4792         attr.ext.xrc.xrcd = devr->x0;
4793
4794         devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
4795         if (IS_ERR(devr->s0)) {
4796                 ret = PTR_ERR(devr->s0);
4797                 goto error4;
4798         }
4799         devr->s0->device        = &dev->ib_dev;
4800         devr->s0->pd            = devr->p0;
4801         devr->s0->uobject       = NULL;
4802         devr->s0->event_handler = NULL;
4803         devr->s0->srq_context   = NULL;
4804         devr->s0->srq_type      = IB_SRQT_XRC;
4805         devr->s0->ext.xrc.xrcd  = devr->x0;
4806         devr->s0->ext.cq        = devr->c0;
4807         atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
4808         atomic_inc(&devr->s0->ext.cq->usecnt);
4809         atomic_inc(&devr->p0->usecnt);
4810         atomic_set(&devr->s0->usecnt, 0);
4811
4812         memset(&attr, 0, sizeof(attr));
4813         attr.attr.max_sge = 1;
4814         attr.attr.max_wr = 1;
4815         attr.srq_type = IB_SRQT_BASIC;
4816         devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
4817         if (IS_ERR(devr->s1)) {
4818                 ret = PTR_ERR(devr->s1);
4819                 goto error5;
4820         }
4821         devr->s1->device        = &dev->ib_dev;
4822         devr->s1->pd            = devr->p0;
4823         devr->s1->uobject       = NULL;
4824         devr->s1->event_handler = NULL;
4825         devr->s1->srq_context   = NULL;
4826         devr->s1->srq_type      = IB_SRQT_BASIC;
4827         devr->s1->ext.cq        = devr->c0;
4828         atomic_inc(&devr->p0->usecnt);
4829         atomic_set(&devr->s1->usecnt, 0);
4830
4831         for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
4832                 INIT_WORK(&devr->ports[port].pkey_change_work,
4833                           pkey_change_handler);
4834                 devr->ports[port].devr = devr;
4835         }
4836
4837         return 0;
4838
4839 error5:
4840         mlx5_ib_destroy_srq(devr->s0);
4841 error4:
4842         mlx5_ib_dealloc_xrcd(devr->x1);
4843 error3:
4844         mlx5_ib_dealloc_xrcd(devr->x0);
4845 error2:
4846         mlx5_ib_destroy_cq(devr->c0);
4847 error1:
4848         mlx5_ib_dealloc_pd(devr->p0);
4849 error0:
4850         kfree(devr->p0);
4851         return ret;
4852 }
4853
4854 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
4855 {
4856         struct mlx5_ib_dev *dev =
4857                 container_of(devr, struct mlx5_ib_dev, devr);
4858         int port;
4859
4860         mlx5_ib_destroy_srq(devr->s1);
4861         mlx5_ib_destroy_srq(devr->s0);
4862         mlx5_ib_dealloc_xrcd(devr->x0);
4863         mlx5_ib_dealloc_xrcd(devr->x1);
4864         mlx5_ib_destroy_cq(devr->c0);
4865         mlx5_ib_dealloc_pd(devr->p0);
4866         kfree(devr->p0);
4867
4868         /* Make sure no change P_Key work items are still executing */
4869         for (port = 0; port < dev->num_ports; ++port)
4870                 cancel_work_sync(&devr->ports[port].pkey_change_work);
4871 }
4872
4873 static u32 get_core_cap_flags(struct ib_device *ibdev,
4874                               struct mlx5_hca_vport_context *rep)
4875 {
4876         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4877         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
4878         u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
4879         u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
4880         bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
4881         u32 ret = 0;
4882
4883         if (rep->grh_required)
4884                 ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
4885
4886         if (ll == IB_LINK_LAYER_INFINIBAND)
4887                 return ret | RDMA_CORE_PORT_IBA_IB;
4888
4889         if (raw_support)
4890                 ret |= RDMA_CORE_PORT_RAW_PACKET;
4891
4892         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
4893                 return ret;
4894
4895         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
4896                 return ret;
4897
4898         if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
4899                 ret |= RDMA_CORE_PORT_IBA_ROCE;
4900
4901         if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
4902                 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
4903
4904         return ret;
4905 }
4906
4907 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
4908                                struct ib_port_immutable *immutable)
4909 {
4910         struct ib_port_attr attr;
4911         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4912         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
4913         struct mlx5_hca_vport_context rep = {0};
4914         int err;
4915
4916         err = ib_query_port(ibdev, port_num, &attr);
4917         if (err)
4918                 return err;
4919
4920         if (ll == IB_LINK_LAYER_INFINIBAND) {
4921                 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
4922                                                    &rep);
4923                 if (err)
4924                         return err;
4925         }
4926
4927         immutable->pkey_tbl_len = attr.pkey_tbl_len;
4928         immutable->gid_tbl_len = attr.gid_tbl_len;
4929         immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
4930         if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
4931                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
4932
4933         return 0;
4934 }
4935
4936 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
4937                                    struct ib_port_immutable *immutable)
4938 {
4939         struct ib_port_attr attr;
4940         int err;
4941
4942         immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4943
4944         err = ib_query_port(ibdev, port_num, &attr);
4945         if (err)
4946                 return err;
4947
4948         immutable->pkey_tbl_len = attr.pkey_tbl_len;
4949         immutable->gid_tbl_len = attr.gid_tbl_len;
4950         immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4951
4952         return 0;
4953 }
4954
4955 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
4956 {
4957         struct mlx5_ib_dev *dev =
4958                 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
4959         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
4960                  fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
4961                  fw_rev_sub(dev->mdev));
4962 }
4963
4964 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
4965 {
4966         struct mlx5_core_dev *mdev = dev->mdev;
4967         struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
4968                                                                  MLX5_FLOW_NAMESPACE_LAG);
4969         struct mlx5_flow_table *ft;
4970         int err;
4971
4972         if (!ns || !mlx5_lag_is_roce(mdev))
4973                 return 0;
4974
4975         err = mlx5_cmd_create_vport_lag(mdev);
4976         if (err)
4977                 return err;
4978
4979         ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
4980         if (IS_ERR(ft)) {
4981                 err = PTR_ERR(ft);
4982                 goto err_destroy_vport_lag;
4983         }
4984
4985         dev->flow_db->lag_demux_ft = ft;
4986         dev->lag_active = true;
4987         return 0;
4988
4989 err_destroy_vport_lag:
4990         mlx5_cmd_destroy_vport_lag(mdev);
4991         return err;
4992 }
4993
4994 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
4995 {
4996         struct mlx5_core_dev *mdev = dev->mdev;
4997
4998         if (dev->lag_active) {
4999                 dev->lag_active = false;
5000
5001                 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
5002                 dev->flow_db->lag_demux_ft = NULL;
5003
5004                 mlx5_cmd_destroy_vport_lag(mdev);
5005         }
5006 }
5007
5008 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5009 {
5010         int err;
5011
5012         dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
5013         err = register_netdevice_notifier(&dev->roce[port_num].nb);
5014         if (err) {
5015                 dev->roce[port_num].nb.notifier_call = NULL;
5016                 return err;
5017         }
5018
5019         return 0;
5020 }
5021
5022 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
5023 {
5024         if (dev->roce[port_num].nb.notifier_call) {
5025                 unregister_netdevice_notifier(&dev->roce[port_num].nb);
5026                 dev->roce[port_num].nb.notifier_call = NULL;
5027         }
5028 }
5029
5030 static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
5031 {
5032         int err;
5033
5034         if (MLX5_CAP_GEN(dev->mdev, roce)) {
5035                 err = mlx5_nic_vport_enable_roce(dev->mdev);
5036                 if (err)
5037                         return err;
5038         }
5039
5040         err = mlx5_eth_lag_init(dev);
5041         if (err)
5042                 goto err_disable_roce;
5043
5044         return 0;
5045
5046 err_disable_roce:
5047         if (MLX5_CAP_GEN(dev->mdev, roce))
5048                 mlx5_nic_vport_disable_roce(dev->mdev);
5049
5050         return err;
5051 }
5052
5053 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
5054 {
5055         mlx5_eth_lag_cleanup(dev);
5056         if (MLX5_CAP_GEN(dev->mdev, roce))
5057                 mlx5_nic_vport_disable_roce(dev->mdev);
5058 }
5059
5060 struct mlx5_ib_counter {
5061         const char *name;
5062         size_t offset;
5063 };
5064
5065 #define INIT_Q_COUNTER(_name)           \
5066         { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
5067
5068 static const struct mlx5_ib_counter basic_q_cnts[] = {
5069         INIT_Q_COUNTER(rx_write_requests),
5070         INIT_Q_COUNTER(rx_read_requests),
5071         INIT_Q_COUNTER(rx_atomic_requests),
5072         INIT_Q_COUNTER(out_of_buffer),
5073 };
5074
5075 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
5076         INIT_Q_COUNTER(out_of_sequence),
5077 };
5078
5079 static const struct mlx5_ib_counter retrans_q_cnts[] = {
5080         INIT_Q_COUNTER(duplicate_request),
5081         INIT_Q_COUNTER(rnr_nak_retry_err),
5082         INIT_Q_COUNTER(packet_seq_err),
5083         INIT_Q_COUNTER(implied_nak_seq_err),
5084         INIT_Q_COUNTER(local_ack_timeout_err),
5085 };
5086
5087 #define INIT_CONG_COUNTER(_name)                \
5088         { .name = #_name, .offset =     \
5089                 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
5090
5091 static const struct mlx5_ib_counter cong_cnts[] = {
5092         INIT_CONG_COUNTER(rp_cnp_ignored),
5093         INIT_CONG_COUNTER(rp_cnp_handled),
5094         INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
5095         INIT_CONG_COUNTER(np_cnp_sent),
5096 };
5097
5098 static const struct mlx5_ib_counter extended_err_cnts[] = {
5099         INIT_Q_COUNTER(resp_local_length_error),
5100         INIT_Q_COUNTER(resp_cqe_error),
5101         INIT_Q_COUNTER(req_cqe_error),
5102         INIT_Q_COUNTER(req_remote_invalid_request),
5103         INIT_Q_COUNTER(req_remote_access_errors),
5104         INIT_Q_COUNTER(resp_remote_access_errors),
5105         INIT_Q_COUNTER(resp_cqe_flush_error),
5106         INIT_Q_COUNTER(req_cqe_flush_error),
5107 };
5108
5109 #define INIT_EXT_PPCNT_COUNTER(_name)           \
5110         { .name = #_name, .offset =     \
5111         MLX5_BYTE_OFF(ppcnt_reg, \
5112                       counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
5113
5114 static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
5115         INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
5116 };
5117
5118 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
5119 {
5120         int i;
5121
5122         for (i = 0; i < dev->num_ports; i++) {
5123                 if (dev->port[i].cnts.set_id_valid)
5124                         mlx5_core_dealloc_q_counter(dev->mdev,
5125                                                     dev->port[i].cnts.set_id);
5126                 kfree(dev->port[i].cnts.names);
5127                 kfree(dev->port[i].cnts.offsets);
5128         }
5129 }
5130
5131 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
5132                                     struct mlx5_ib_counters *cnts)
5133 {
5134         u32 num_counters;
5135
5136         num_counters = ARRAY_SIZE(basic_q_cnts);
5137
5138         if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
5139                 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
5140
5141         if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
5142                 num_counters += ARRAY_SIZE(retrans_q_cnts);
5143
5144         if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
5145                 num_counters += ARRAY_SIZE(extended_err_cnts);
5146
5147         cnts->num_q_counters = num_counters;
5148
5149         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5150                 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
5151                 num_counters += ARRAY_SIZE(cong_cnts);
5152         }
5153         if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5154                 cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
5155                 num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
5156         }
5157         cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
5158         if (!cnts->names)
5159                 return -ENOMEM;
5160
5161         cnts->offsets = kcalloc(num_counters,
5162                                 sizeof(cnts->offsets), GFP_KERNEL);
5163         if (!cnts->offsets)
5164                 goto err_names;
5165
5166         return 0;
5167
5168 err_names:
5169         kfree(cnts->names);
5170         cnts->names = NULL;
5171         return -ENOMEM;
5172 }
5173
5174 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
5175                                   const char **names,
5176                                   size_t *offsets)
5177 {
5178         int i;
5179         int j = 0;
5180
5181         for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
5182                 names[j] = basic_q_cnts[i].name;
5183                 offsets[j] = basic_q_cnts[i].offset;
5184         }
5185
5186         if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
5187                 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
5188                         names[j] = out_of_seq_q_cnts[i].name;
5189                         offsets[j] = out_of_seq_q_cnts[i].offset;
5190                 }
5191         }
5192
5193         if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
5194                 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
5195                         names[j] = retrans_q_cnts[i].name;
5196                         offsets[j] = retrans_q_cnts[i].offset;
5197                 }
5198         }
5199
5200         if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
5201                 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
5202                         names[j] = extended_err_cnts[i].name;
5203                         offsets[j] = extended_err_cnts[i].offset;
5204                 }
5205         }
5206
5207         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5208                 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
5209                         names[j] = cong_cnts[i].name;
5210                         offsets[j] = cong_cnts[i].offset;
5211                 }
5212         }
5213
5214         if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5215                 for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
5216                         names[j] = ext_ppcnt_cnts[i].name;
5217                         offsets[j] = ext_ppcnt_cnts[i].offset;
5218                 }
5219         }
5220 }
5221
5222 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
5223 {
5224         int err = 0;
5225         int i;
5226         bool is_shared;
5227
5228         is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
5229
5230         for (i = 0; i < dev->num_ports; i++) {
5231                 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
5232                 if (err)
5233                         goto err_alloc;
5234
5235                 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
5236                                       dev->port[i].cnts.offsets);
5237
5238                 err = mlx5_cmd_alloc_q_counter(dev->mdev,
5239                                                &dev->port[i].cnts.set_id,
5240                                                is_shared ?
5241                                                MLX5_SHARED_RESOURCE_UID : 0);
5242                 if (err) {
5243                         mlx5_ib_warn(dev,
5244                                      "couldn't allocate queue counter for port %d, err %d\n",
5245                                      i + 1, err);
5246                         goto err_alloc;
5247                 }
5248                 dev->port[i].cnts.set_id_valid = true;
5249         }
5250
5251         return 0;
5252
5253 err_alloc:
5254         mlx5_ib_dealloc_counters(dev);
5255         return err;
5256 }
5257
5258 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
5259                                                     u8 port_num)
5260 {
5261         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5262         struct mlx5_ib_port *port = &dev->port[port_num - 1];
5263
5264         /* We support only per port stats */
5265         if (port_num == 0)
5266                 return NULL;
5267
5268         return rdma_alloc_hw_stats_struct(port->cnts.names,
5269                                           port->cnts.num_q_counters +
5270                                           port->cnts.num_cong_counters +
5271                                           port->cnts.num_ext_ppcnt_counters,
5272                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
5273 }
5274
5275 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
5276                                     struct mlx5_ib_port *port,
5277                                     struct rdma_hw_stats *stats)
5278 {
5279         int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
5280         void *out;
5281         __be32 val;
5282         int ret, i;
5283
5284         out = kvzalloc(outlen, GFP_KERNEL);
5285         if (!out)
5286                 return -ENOMEM;
5287
5288         ret = mlx5_core_query_q_counter(mdev,
5289                                         port->cnts.set_id, 0,
5290                                         out, outlen);
5291         if (ret)
5292                 goto free;
5293
5294         for (i = 0; i < port->cnts.num_q_counters; i++) {
5295                 val = *(__be32 *)(out + port->cnts.offsets[i]);
5296                 stats->value[i] = (u64)be32_to_cpu(val);
5297         }
5298
5299 free:
5300         kvfree(out);
5301         return ret;
5302 }
5303
5304 static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
5305                                           struct mlx5_ib_port *port,
5306                                           struct rdma_hw_stats *stats)
5307 {
5308         int offset = port->cnts.num_q_counters + port->cnts.num_cong_counters;
5309         int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
5310         int ret, i;
5311         void *out;
5312
5313         out = kvzalloc(sz, GFP_KERNEL);
5314         if (!out)
5315                 return -ENOMEM;
5316
5317         ret = mlx5_cmd_query_ext_ppcnt_counters(dev->mdev, out);
5318         if (ret)
5319                 goto free;
5320
5321         for (i = 0; i < port->cnts.num_ext_ppcnt_counters; i++) {
5322                 stats->value[i + offset] =
5323                         be64_to_cpup((__be64 *)(out +
5324                                     port->cnts.offsets[i + offset]));
5325         }
5326
5327 free:
5328         kvfree(out);
5329         return ret;
5330 }
5331
5332 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
5333                                 struct rdma_hw_stats *stats,
5334                                 u8 port_num, int index)
5335 {
5336         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5337         struct mlx5_ib_port *port = &dev->port[port_num - 1];
5338         struct mlx5_core_dev *mdev;
5339         int ret, num_counters;
5340         u8 mdev_port_num;
5341
5342         if (!stats)
5343                 return -EINVAL;
5344
5345         num_counters = port->cnts.num_q_counters +
5346                        port->cnts.num_cong_counters +
5347                        port->cnts.num_ext_ppcnt_counters;
5348
5349         /* q_counters are per IB device, query the master mdev */
5350         ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
5351         if (ret)
5352                 return ret;
5353
5354         if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
5355                 ret =  mlx5_ib_query_ext_ppcnt_counters(dev, port, stats);
5356                 if (ret)
5357                         return ret;
5358         }
5359
5360         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
5361                 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
5362                                                     &mdev_port_num);
5363                 if (!mdev) {
5364                         /* If port is not affiliated yet, its in down state
5365                          * which doesn't have any counters yet, so it would be
5366                          * zero. So no need to read from the HCA.
5367                          */
5368                         goto done;
5369                 }
5370                 ret = mlx5_lag_query_cong_counters(dev->mdev,
5371                                                    stats->value +
5372                                                    port->cnts.num_q_counters,
5373                                                    port->cnts.num_cong_counters,
5374                                                    port->cnts.offsets +
5375                                                    port->cnts.num_q_counters);
5376
5377                 mlx5_ib_put_native_port_mdev(dev, port_num);
5378                 if (ret)
5379                         return ret;
5380         }
5381
5382 done:
5383         return num_counters;
5384 }
5385
5386 static int mlx5_ib_rn_get_params(struct ib_device *device, u8 port_num,
5387                                  enum rdma_netdev_t type,
5388                                  struct rdma_netdev_alloc_params *params)
5389 {
5390         if (type != RDMA_NETDEV_IPOIB)
5391                 return -EOPNOTSUPP;
5392
5393         return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
5394 }
5395
5396 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
5397 {
5398         if (!dev->delay_drop.dbg)
5399                 return;
5400         debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
5401         kfree(dev->delay_drop.dbg);
5402         dev->delay_drop.dbg = NULL;
5403 }
5404
5405 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
5406 {
5407         if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5408                 return;
5409
5410         cancel_work_sync(&dev->delay_drop.delay_drop_work);
5411         delay_drop_debugfs_cleanup(dev);
5412 }
5413
5414 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
5415                                        size_t count, loff_t *pos)
5416 {
5417         struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5418         char lbuf[20];
5419         int len;
5420
5421         len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
5422         return simple_read_from_buffer(buf, count, pos, lbuf, len);
5423 }
5424
5425 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
5426                                         size_t count, loff_t *pos)
5427 {
5428         struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
5429         u32 timeout;
5430         u32 var;
5431
5432         if (kstrtouint_from_user(buf, count, 0, &var))
5433                 return -EFAULT;
5434
5435         timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
5436                         1000);
5437         if (timeout != var)
5438                 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
5439                             timeout);
5440
5441         delay_drop->timeout = timeout;
5442
5443         return count;
5444 }
5445
5446 static const struct file_operations fops_delay_drop_timeout = {
5447         .owner  = THIS_MODULE,
5448         .open   = simple_open,
5449         .write  = delay_drop_timeout_write,
5450         .read   = delay_drop_timeout_read,
5451 };
5452
5453 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
5454 {
5455         struct mlx5_ib_dbg_delay_drop *dbg;
5456
5457         if (!mlx5_debugfs_root)
5458                 return 0;
5459
5460         dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
5461         if (!dbg)
5462                 return -ENOMEM;
5463
5464         dev->delay_drop.dbg = dbg;
5465
5466         dbg->dir_debugfs =
5467                 debugfs_create_dir("delay_drop",
5468                                    dev->mdev->priv.dbg_root);
5469         if (!dbg->dir_debugfs)
5470                 goto out_debugfs;
5471
5472         dbg->events_cnt_debugfs =
5473                 debugfs_create_atomic_t("num_timeout_events", 0400,
5474                                         dbg->dir_debugfs,
5475                                         &dev->delay_drop.events_cnt);
5476         if (!dbg->events_cnt_debugfs)
5477                 goto out_debugfs;
5478
5479         dbg->rqs_cnt_debugfs =
5480                 debugfs_create_atomic_t("num_rqs", 0400,
5481                                         dbg->dir_debugfs,
5482                                         &dev->delay_drop.rqs_cnt);
5483         if (!dbg->rqs_cnt_debugfs)
5484                 goto out_debugfs;
5485
5486         dbg->timeout_debugfs =
5487                 debugfs_create_file("timeout", 0600,
5488                                     dbg->dir_debugfs,
5489                                     &dev->delay_drop,
5490                                     &fops_delay_drop_timeout);
5491         if (!dbg->timeout_debugfs)
5492                 goto out_debugfs;
5493
5494         return 0;
5495
5496 out_debugfs:
5497         delay_drop_debugfs_cleanup(dev);
5498         return -ENOMEM;
5499 }
5500
5501 static void init_delay_drop(struct mlx5_ib_dev *dev)
5502 {
5503         if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5504                 return;
5505
5506         mutex_init(&dev->delay_drop.lock);
5507         dev->delay_drop.dev = dev;
5508         dev->delay_drop.activate = false;
5509         dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5510         INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
5511         atomic_set(&dev->delay_drop.rqs_cnt, 0);
5512         atomic_set(&dev->delay_drop.events_cnt, 0);
5513
5514         if (delay_drop_debugfs_init(dev))
5515                 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
5516 }
5517
5518 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5519 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5520                                       struct mlx5_ib_multiport_info *mpi)
5521 {
5522         u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5523         struct mlx5_ib_port *port = &ibdev->port[port_num];
5524         int comps;
5525         int err;
5526         int i;
5527
5528         mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5529
5530         spin_lock(&port->mp.mpi_lock);
5531         if (!mpi->ibdev) {
5532                 spin_unlock(&port->mp.mpi_lock);
5533                 return;
5534         }
5535
5536         if (mpi->mdev_events.notifier_call)
5537                 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
5538         mpi->mdev_events.notifier_call = NULL;
5539
5540         mpi->ibdev = NULL;
5541
5542         spin_unlock(&port->mp.mpi_lock);
5543         mlx5_remove_netdev_notifier(ibdev, port_num);
5544         spin_lock(&port->mp.mpi_lock);
5545
5546         comps = mpi->mdev_refcnt;
5547         if (comps) {
5548                 mpi->unaffiliate = true;
5549                 init_completion(&mpi->unref_comp);
5550                 spin_unlock(&port->mp.mpi_lock);
5551
5552                 for (i = 0; i < comps; i++)
5553                         wait_for_completion(&mpi->unref_comp);
5554
5555                 spin_lock(&port->mp.mpi_lock);
5556                 mpi->unaffiliate = false;
5557         }
5558
5559         port->mp.mpi = NULL;
5560
5561         list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5562
5563         spin_unlock(&port->mp.mpi_lock);
5564
5565         err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5566
5567         mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5568         /* Log an error, still needed to cleanup the pointers and add
5569          * it back to the list.
5570          */
5571         if (err)
5572                 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5573                             port_num + 1);
5574
5575         ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
5576 }
5577
5578 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5579 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5580                                     struct mlx5_ib_multiport_info *mpi)
5581 {
5582         u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5583         int err;
5584
5585         spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5586         if (ibdev->port[port_num].mp.mpi) {
5587                 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n",
5588                             port_num + 1);
5589                 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5590                 return false;
5591         }
5592
5593         ibdev->port[port_num].mp.mpi = mpi;
5594         mpi->ibdev = ibdev;
5595         mpi->mdev_events.notifier_call = NULL;
5596         spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5597
5598         err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5599         if (err)
5600                 goto unbind;
5601
5602         err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5603         if (err)
5604                 goto unbind;
5605
5606         err = mlx5_add_netdev_notifier(ibdev, port_num);
5607         if (err) {
5608                 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5609                             port_num + 1);
5610                 goto unbind;
5611         }
5612
5613         mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
5614         mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
5615
5616         mlx5_ib_init_cong_debugfs(ibdev, port_num);
5617
5618         return true;
5619
5620 unbind:
5621         mlx5_ib_unbind_slave_port(ibdev, mpi);
5622         return false;
5623 }
5624
5625 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5626 {
5627         int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5628         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5629                                                           port_num + 1);
5630         struct mlx5_ib_multiport_info *mpi;
5631         int err;
5632         int i;
5633
5634         if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5635                 return 0;
5636
5637         err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5638                                                      &dev->sys_image_guid);
5639         if (err)
5640                 return err;
5641
5642         err = mlx5_nic_vport_enable_roce(dev->mdev);
5643         if (err)
5644                 return err;
5645
5646         mutex_lock(&mlx5_ib_multiport_mutex);
5647         for (i = 0; i < dev->num_ports; i++) {
5648                 bool bound = false;
5649
5650                 /* build a stub multiport info struct for the native port. */
5651                 if (i == port_num) {
5652                         mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5653                         if (!mpi) {
5654                                 mutex_unlock(&mlx5_ib_multiport_mutex);
5655                                 mlx5_nic_vport_disable_roce(dev->mdev);
5656                                 return -ENOMEM;
5657                         }
5658
5659                         mpi->is_master = true;
5660                         mpi->mdev = dev->mdev;
5661                         mpi->sys_image_guid = dev->sys_image_guid;
5662                         dev->port[i].mp.mpi = mpi;
5663                         mpi->ibdev = dev;
5664                         mpi = NULL;
5665                         continue;
5666                 }
5667
5668                 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5669                                     list) {
5670                         if (dev->sys_image_guid == mpi->sys_image_guid &&
5671                             (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5672                                 bound = mlx5_ib_bind_slave_port(dev, mpi);
5673                         }
5674
5675                         if (bound) {
5676                                 dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
5677                                 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5678                                 list_del(&mpi->list);
5679                                 break;
5680                         }
5681                 }
5682                 if (!bound) {
5683                         get_port_caps(dev, i + 1);
5684                         mlx5_ib_dbg(dev, "no free port found for port %d\n",
5685                                     i + 1);
5686                 }
5687         }
5688
5689         list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5690         mutex_unlock(&mlx5_ib_multiport_mutex);
5691         return err;
5692 }
5693
5694 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
5695 {
5696         int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5697         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5698                                                           port_num + 1);
5699         int i;
5700
5701         if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5702                 return;
5703
5704         mutex_lock(&mlx5_ib_multiport_mutex);
5705         for (i = 0; i < dev->num_ports; i++) {
5706                 if (dev->port[i].mp.mpi) {
5707                         /* Destroy the native port stub */
5708                         if (i == port_num) {
5709                                 kfree(dev->port[i].mp.mpi);
5710                                 dev->port[i].mp.mpi = NULL;
5711                         } else {
5712                                 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
5713                                 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
5714                         }
5715                 }
5716         }
5717
5718         mlx5_ib_dbg(dev, "removing from devlist\n");
5719         list_del(&dev->ib_dev_list);
5720         mutex_unlock(&mlx5_ib_multiport_mutex);
5721
5722         mlx5_nic_vport_disable_roce(dev->mdev);
5723 }
5724
5725 ADD_UVERBS_ATTRIBUTES_SIMPLE(
5726         mlx5_ib_dm,
5727         UVERBS_OBJECT_DM,
5728         UVERBS_METHOD_DM_ALLOC,
5729         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
5730                             UVERBS_ATTR_TYPE(u64),
5731                             UA_MANDATORY),
5732         UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
5733                             UVERBS_ATTR_TYPE(u16),
5734                             UA_MANDATORY));
5735
5736 ADD_UVERBS_ATTRIBUTES_SIMPLE(
5737         mlx5_ib_flow_action,
5738         UVERBS_OBJECT_FLOW_ACTION,
5739         UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
5740         UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
5741                              enum mlx5_ib_uapi_flow_action_flags));
5742
5743 static const struct uapi_definition mlx5_ib_defs[] = {
5744 #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
5745         UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
5746         UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
5747 #endif
5748
5749         UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
5750                                 &mlx5_ib_flow_action),
5751         UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
5752         {}
5753 };
5754
5755 static int mlx5_ib_read_counters(struct ib_counters *counters,
5756                                  struct ib_counters_read_attr *read_attr,
5757                                  struct uverbs_attr_bundle *attrs)
5758 {
5759         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5760         struct mlx5_read_counters_attr mread_attr = {};
5761         struct mlx5_ib_flow_counters_desc *desc;
5762         int ret, i;
5763
5764         mutex_lock(&mcounters->mcntrs_mutex);
5765         if (mcounters->cntrs_max_index > read_attr->ncounters) {
5766                 ret = -EINVAL;
5767                 goto err_bound;
5768         }
5769
5770         mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
5771                                  GFP_KERNEL);
5772         if (!mread_attr.out) {
5773                 ret = -ENOMEM;
5774                 goto err_bound;
5775         }
5776
5777         mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
5778         mread_attr.flags = read_attr->flags;
5779         ret = mcounters->read_counters(counters->device, &mread_attr);
5780         if (ret)
5781                 goto err_read;
5782
5783         /* do the pass over the counters data array to assign according to the
5784          * descriptions and indexing pairs
5785          */
5786         desc = mcounters->counters_data;
5787         for (i = 0; i < mcounters->ncounters; i++)
5788                 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
5789
5790 err_read:
5791         kfree(mread_attr.out);
5792 err_bound:
5793         mutex_unlock(&mcounters->mcntrs_mutex);
5794         return ret;
5795 }
5796
5797 static int mlx5_ib_destroy_counters(struct ib_counters *counters)
5798 {
5799         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5800
5801         counters_clear_description(counters);
5802         if (mcounters->hw_cntrs_hndl)
5803                 mlx5_fc_destroy(to_mdev(counters->device)->mdev,
5804                                 mcounters->hw_cntrs_hndl);
5805
5806         kfree(mcounters);
5807
5808         return 0;
5809 }
5810
5811 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
5812                                                    struct uverbs_attr_bundle *attrs)
5813 {
5814         struct mlx5_ib_mcounters *mcounters;
5815
5816         mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
5817         if (!mcounters)
5818                 return ERR_PTR(-ENOMEM);
5819
5820         mutex_init(&mcounters->mcntrs_mutex);
5821
5822         return &mcounters->ibcntrs;
5823 }
5824
5825 void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
5826 {
5827         mlx5_ib_cleanup_multiport_master(dev);
5828         if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
5829                 srcu_barrier(&dev->mr_srcu);
5830                 cleanup_srcu_struct(&dev->mr_srcu);
5831         }
5832         kfree(dev->port);
5833 }
5834
5835 int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
5836 {
5837         struct mlx5_core_dev *mdev = dev->mdev;
5838         int err;
5839         int i;
5840
5841         dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
5842                             GFP_KERNEL);
5843         if (!dev->port)
5844                 return -ENOMEM;
5845
5846         for (i = 0; i < dev->num_ports; i++) {
5847                 spin_lock_init(&dev->port[i].mp.mpi_lock);
5848                 rwlock_init(&dev->roce[i].netdev_lock);
5849         }
5850
5851         err = mlx5_ib_init_multiport_master(dev);
5852         if (err)
5853                 goto err_free_port;
5854
5855         if (!mlx5_core_mp_enabled(mdev)) {
5856                 for (i = 1; i <= dev->num_ports; i++) {
5857                         err = get_port_caps(dev, i);
5858                         if (err)
5859                                 break;
5860                 }
5861         } else {
5862                 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
5863         }
5864         if (err)
5865                 goto err_mp;
5866
5867         if (mlx5_use_mad_ifc(dev))
5868                 get_ext_port_caps(dev);
5869
5870         dev->ib_dev.owner               = THIS_MODULE;
5871         dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
5872         dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
5873         dev->ib_dev.phys_port_cnt       = dev->num_ports;
5874         dev->ib_dev.num_comp_vectors    = mlx5_comp_vectors_count(mdev);
5875         dev->ib_dev.dev.parent          = &mdev->pdev->dev;
5876
5877         mutex_init(&dev->cap_mask_mutex);
5878         INIT_LIST_HEAD(&dev->qp_list);
5879         spin_lock_init(&dev->reset_flow_resource_lock);
5880
5881         spin_lock_init(&dev->memic.memic_lock);
5882         dev->memic.dev = mdev;
5883
5884         if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
5885                 err = init_srcu_struct(&dev->mr_srcu);
5886                 if (err)
5887                         goto err_mp;
5888         }
5889
5890         return 0;
5891 err_mp:
5892         mlx5_ib_cleanup_multiport_master(dev);
5893
5894 err_free_port:
5895         kfree(dev->port);
5896
5897         return -ENOMEM;
5898 }
5899
5900 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
5901 {
5902         dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
5903
5904         if (!dev->flow_db)
5905                 return -ENOMEM;
5906
5907         mutex_init(&dev->flow_db->lock);
5908
5909         return 0;
5910 }
5911
5912 int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
5913 {
5914         struct mlx5_ib_dev *nic_dev;
5915
5916         nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
5917
5918         if (!nic_dev)
5919                 return -EINVAL;
5920
5921         dev->flow_db = nic_dev->flow_db;
5922
5923         return 0;
5924 }
5925
5926 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
5927 {
5928         kfree(dev->flow_db);
5929 }
5930
5931 static const struct ib_device_ops mlx5_ib_dev_ops = {
5932         .add_gid = mlx5_ib_add_gid,
5933         .alloc_mr = mlx5_ib_alloc_mr,
5934         .alloc_pd = mlx5_ib_alloc_pd,
5935         .alloc_ucontext = mlx5_ib_alloc_ucontext,
5936         .attach_mcast = mlx5_ib_mcg_attach,
5937         .check_mr_status = mlx5_ib_check_mr_status,
5938         .create_ah = mlx5_ib_create_ah,
5939         .create_counters = mlx5_ib_create_counters,
5940         .create_cq = mlx5_ib_create_cq,
5941         .create_flow = mlx5_ib_create_flow,
5942         .create_qp = mlx5_ib_create_qp,
5943         .create_srq = mlx5_ib_create_srq,
5944         .dealloc_pd = mlx5_ib_dealloc_pd,
5945         .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
5946         .del_gid = mlx5_ib_del_gid,
5947         .dereg_mr = mlx5_ib_dereg_mr,
5948         .destroy_ah = mlx5_ib_destroy_ah,
5949         .destroy_counters = mlx5_ib_destroy_counters,
5950         .destroy_cq = mlx5_ib_destroy_cq,
5951         .destroy_flow = mlx5_ib_destroy_flow,
5952         .destroy_flow_action = mlx5_ib_destroy_flow_action,
5953         .destroy_qp = mlx5_ib_destroy_qp,
5954         .destroy_srq = mlx5_ib_destroy_srq,
5955         .detach_mcast = mlx5_ib_mcg_detach,
5956         .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
5957         .drain_rq = mlx5_ib_drain_rq,
5958         .drain_sq = mlx5_ib_drain_sq,
5959         .get_dev_fw_str = get_dev_fw_str,
5960         .get_dma_mr = mlx5_ib_get_dma_mr,
5961         .get_link_layer = mlx5_ib_port_link_layer,
5962         .map_mr_sg = mlx5_ib_map_mr_sg,
5963         .mmap = mlx5_ib_mmap,
5964         .modify_cq = mlx5_ib_modify_cq,
5965         .modify_device = mlx5_ib_modify_device,
5966         .modify_port = mlx5_ib_modify_port,
5967         .modify_qp = mlx5_ib_modify_qp,
5968         .modify_srq = mlx5_ib_modify_srq,
5969         .poll_cq = mlx5_ib_poll_cq,
5970         .post_recv = mlx5_ib_post_recv,
5971         .post_send = mlx5_ib_post_send,
5972         .post_srq_recv = mlx5_ib_post_srq_recv,
5973         .process_mad = mlx5_ib_process_mad,
5974         .query_ah = mlx5_ib_query_ah,
5975         .query_device = mlx5_ib_query_device,
5976         .query_gid = mlx5_ib_query_gid,
5977         .query_pkey = mlx5_ib_query_pkey,
5978         .query_qp = mlx5_ib_query_qp,
5979         .query_srq = mlx5_ib_query_srq,
5980         .read_counters = mlx5_ib_read_counters,
5981         .reg_user_mr = mlx5_ib_reg_user_mr,
5982         .req_notify_cq = mlx5_ib_arm_cq,
5983         .rereg_user_mr = mlx5_ib_rereg_user_mr,
5984         .resize_cq = mlx5_ib_resize_cq,
5985         INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
5986         INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
5987 };
5988
5989 static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
5990         .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
5991         .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
5992 };
5993
5994 static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
5995         .rdma_netdev_get_params = mlx5_ib_rn_get_params,
5996 };
5997
5998 static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
5999         .get_vf_config = mlx5_ib_get_vf_config,
6000         .get_vf_stats = mlx5_ib_get_vf_stats,
6001         .set_vf_guid = mlx5_ib_set_vf_guid,
6002         .set_vf_link_state = mlx5_ib_set_vf_link_state,
6003 };
6004
6005 static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
6006         .alloc_mw = mlx5_ib_alloc_mw,
6007         .dealloc_mw = mlx5_ib_dealloc_mw,
6008 };
6009
6010 static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
6011         .alloc_xrcd = mlx5_ib_alloc_xrcd,
6012         .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
6013 };
6014
6015 static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
6016         .alloc_dm = mlx5_ib_alloc_dm,
6017         .dealloc_dm = mlx5_ib_dealloc_dm,
6018         .reg_dm_mr = mlx5_ib_reg_dm_mr,
6019 };
6020
6021 int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
6022 {
6023         struct mlx5_core_dev *mdev = dev->mdev;
6024         int err;
6025
6026         dev->ib_dev.uverbs_abi_ver      = MLX5_IB_UVERBS_ABI_VERSION;
6027         dev->ib_dev.uverbs_cmd_mask     =
6028                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
6029                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
6030                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
6031                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
6032                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
6033                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
6034                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
6035                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
6036                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
6037                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
6038                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
6039                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
6040                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
6041                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
6042                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
6043                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
6044                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
6045                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
6046                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
6047                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
6048                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
6049                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
6050                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
6051                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
6052                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
6053                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
6054         dev->ib_dev.uverbs_ex_cmd_mask =
6055                 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
6056                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
6057                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)        |
6058                 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)        |
6059                 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ)        |
6060                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW)      |
6061                 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
6062
6063         if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
6064             IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
6065                 ib_set_device_ops(&dev->ib_dev,
6066                                   &mlx5_ib_dev_ipoib_enhanced_ops);
6067
6068         if (mlx5_core_is_pf(mdev))
6069                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
6070
6071         dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
6072
6073         if (MLX5_CAP_GEN(mdev, imaicl)) {
6074                 dev->ib_dev.uverbs_cmd_mask |=
6075                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
6076                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
6077                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
6078         }
6079
6080         if (MLX5_CAP_GEN(mdev, xrc)) {
6081                 dev->ib_dev.uverbs_cmd_mask |=
6082                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
6083                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
6084                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
6085         }
6086
6087         if (MLX5_CAP_DEV_MEM(mdev, memic))
6088                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
6089
6090         if (mlx5_accel_ipsec_device_caps(dev->mdev) &
6091             MLX5_ACCEL_IPSEC_CAP_DEVICE)
6092                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
6093         dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
6094         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
6095
6096         if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
6097                 dev->ib_dev.driver_def = mlx5_ib_defs;
6098
6099         err = init_node_data(dev);
6100         if (err)
6101                 return err;
6102
6103         if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
6104             (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
6105              MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
6106                 mutex_init(&dev->lb.mutex);
6107
6108         return 0;
6109 }
6110
6111 static const struct ib_device_ops mlx5_ib_dev_port_ops = {
6112         .get_port_immutable = mlx5_port_immutable,
6113         .query_port = mlx5_ib_query_port,
6114 };
6115
6116 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
6117 {
6118         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
6119         return 0;
6120 }
6121
6122 static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
6123         .get_port_immutable = mlx5_port_rep_immutable,
6124         .query_port = mlx5_ib_rep_query_port,
6125 };
6126
6127 int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
6128 {
6129         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
6130         return 0;
6131 }
6132
6133 static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
6134         .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
6135         .create_wq = mlx5_ib_create_wq,
6136         .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
6137         .destroy_wq = mlx5_ib_destroy_wq,
6138         .get_netdev = mlx5_ib_get_netdev,
6139         .modify_wq = mlx5_ib_modify_wq,
6140 };
6141
6142 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
6143 {
6144         u8 port_num;
6145         int i;
6146
6147         for (i = 0; i < dev->num_ports; i++) {
6148                 dev->roce[i].dev = dev;
6149                 dev->roce[i].native_port_num = i + 1;
6150                 dev->roce[i].last_port_state = IB_PORT_DOWN;
6151         }
6152
6153         dev->ib_dev.uverbs_ex_cmd_mask |=
6154                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
6155                         (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
6156                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
6157                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
6158                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
6159         ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
6160
6161         port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6162
6163         return mlx5_add_netdev_notifier(dev, port_num);
6164 }
6165
6166 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
6167 {
6168         u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
6169
6170         mlx5_remove_netdev_notifier(dev, port_num);
6171 }
6172
6173 int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
6174 {
6175         struct mlx5_core_dev *mdev = dev->mdev;
6176         enum rdma_link_layer ll;
6177         int port_type_cap;
6178         int err = 0;
6179
6180         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6181         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6182
6183         if (ll == IB_LINK_LAYER_ETHERNET)
6184                 err = mlx5_ib_stage_common_roce_init(dev);
6185
6186         return err;
6187 }
6188
6189 void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
6190 {
6191         mlx5_ib_stage_common_roce_cleanup(dev);
6192 }
6193
6194 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
6195 {
6196         struct mlx5_core_dev *mdev = dev->mdev;
6197         enum rdma_link_layer ll;
6198         int port_type_cap;
6199         int err;
6200
6201         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6202         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6203
6204         if (ll == IB_LINK_LAYER_ETHERNET) {
6205                 err = mlx5_ib_stage_common_roce_init(dev);
6206                 if (err)
6207                         return err;
6208
6209                 err = mlx5_enable_eth(dev);
6210                 if (err)
6211                         goto cleanup;
6212         }
6213
6214         return 0;
6215 cleanup:
6216         mlx5_ib_stage_common_roce_cleanup(dev);
6217
6218         return err;
6219 }
6220
6221 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
6222 {
6223         struct mlx5_core_dev *mdev = dev->mdev;
6224         enum rdma_link_layer ll;
6225         int port_type_cap;
6226
6227         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6228         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6229
6230         if (ll == IB_LINK_LAYER_ETHERNET) {
6231                 mlx5_disable_eth(dev);
6232                 mlx5_ib_stage_common_roce_cleanup(dev);
6233         }
6234 }
6235
6236 int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
6237 {
6238         return create_dev_resources(&dev->devr);
6239 }
6240
6241 void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
6242 {
6243         destroy_dev_resources(&dev->devr);
6244 }
6245
6246 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
6247 {
6248         mlx5_ib_internal_fill_odp_caps(dev);
6249
6250         return mlx5_ib_odp_init_one(dev);
6251 }
6252
6253 static void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
6254 {
6255         mlx5_ib_odp_cleanup_one(dev);
6256 }
6257
6258 static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
6259         .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
6260         .get_hw_stats = mlx5_ib_get_hw_stats,
6261 };
6262
6263 int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
6264 {
6265         if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
6266                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
6267
6268                 return mlx5_ib_alloc_counters(dev);
6269         }
6270
6271         return 0;
6272 }
6273
6274 void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
6275 {
6276         if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
6277                 mlx5_ib_dealloc_counters(dev);
6278 }
6279
6280 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
6281 {
6282         mlx5_ib_init_cong_debugfs(dev,
6283                                   mlx5_core_native_port_num(dev->mdev) - 1);
6284         return 0;
6285 }
6286
6287 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
6288 {
6289         mlx5_ib_cleanup_cong_debugfs(dev,
6290                                      mlx5_core_native_port_num(dev->mdev) - 1);
6291 }
6292
6293 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
6294 {
6295         dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
6296         return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
6297 }
6298
6299 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
6300 {
6301         mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
6302 }
6303
6304 int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
6305 {
6306         int err;
6307
6308         err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
6309         if (err)
6310                 return err;
6311
6312         err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
6313         if (err)
6314                 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6315
6316         return err;
6317 }
6318
6319 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
6320 {
6321         mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
6322         mlx5_free_bfreg(dev->mdev, &dev->bfreg);
6323 }
6324
6325 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
6326 {
6327         const char *name;
6328
6329         rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
6330         if (!mlx5_lag_is_roce(dev->mdev))
6331                 name = "mlx5_%d";
6332         else
6333                 name = "mlx5_bond_%d";
6334         return ib_register_device(&dev->ib_dev, name);
6335 }
6336
6337 void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
6338 {
6339         destroy_umrc_res(dev);
6340 }
6341
6342 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
6343 {
6344         ib_unregister_device(&dev->ib_dev);
6345 }
6346
6347 int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
6348 {
6349         return create_umr_res(dev);
6350 }
6351
6352 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
6353 {
6354         init_delay_drop(dev);
6355
6356         return 0;
6357 }
6358
6359 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
6360 {
6361         cancel_delay_drop(dev);
6362 }
6363
6364 static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
6365 {
6366         dev->mdev_events.notifier_call = mlx5_ib_event;
6367         mlx5_notifier_register(dev->mdev, &dev->mdev_events);
6368         return 0;
6369 }
6370
6371 static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
6372 {
6373         mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
6374 }
6375
6376 static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
6377 {
6378         int uid;
6379
6380         uid = mlx5_ib_devx_create(dev, false);
6381         if (uid > 0)
6382                 dev->devx_whitelist_uid = uid;
6383
6384         return 0;
6385 }
6386 static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
6387 {
6388         if (dev->devx_whitelist_uid)
6389                 mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
6390 }
6391
6392 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
6393                       const struct mlx5_ib_profile *profile,
6394                       int stage)
6395 {
6396         /* Number of stages to cleanup */
6397         while (stage) {
6398                 stage--;
6399                 if (profile->stage[stage].cleanup)
6400                         profile->stage[stage].cleanup(dev);
6401         }
6402 }
6403
6404 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
6405                     const struct mlx5_ib_profile *profile)
6406 {
6407         int err;
6408         int i;
6409
6410         for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
6411                 if (profile->stage[i].init) {
6412                         err = profile->stage[i].init(dev);
6413                         if (err)
6414                                 goto err_out;
6415                 }
6416         }
6417
6418         dev->profile = profile;
6419         dev->ib_active = true;
6420
6421         return dev;
6422
6423 err_out:
6424         __mlx5_ib_remove(dev, profile, i);
6425
6426         return NULL;
6427 }
6428
6429 static const struct mlx5_ib_profile pf_profile = {
6430         STAGE_CREATE(MLX5_IB_STAGE_INIT,
6431                      mlx5_ib_stage_init_init,
6432                      mlx5_ib_stage_init_cleanup),
6433         STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6434                      mlx5_ib_stage_flow_db_init,
6435                      mlx5_ib_stage_flow_db_cleanup),
6436         STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6437                      mlx5_ib_stage_caps_init,
6438                      NULL),
6439         STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6440                      mlx5_ib_stage_non_default_cb,
6441                      NULL),
6442         STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6443                      mlx5_ib_stage_roce_init,
6444                      mlx5_ib_stage_roce_cleanup),
6445         STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6446                      mlx5_init_srq_table,
6447                      mlx5_cleanup_srq_table),
6448         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6449                      mlx5_ib_stage_dev_res_init,
6450                      mlx5_ib_stage_dev_res_cleanup),
6451         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6452                      mlx5_ib_stage_dev_notifier_init,
6453                      mlx5_ib_stage_dev_notifier_cleanup),
6454         STAGE_CREATE(MLX5_IB_STAGE_ODP,
6455                      mlx5_ib_stage_odp_init,
6456                      mlx5_ib_stage_odp_cleanup),
6457         STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6458                      mlx5_ib_stage_counters_init,
6459                      mlx5_ib_stage_counters_cleanup),
6460         STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
6461                      mlx5_ib_stage_cong_debugfs_init,
6462                      mlx5_ib_stage_cong_debugfs_cleanup),
6463         STAGE_CREATE(MLX5_IB_STAGE_UAR,
6464                      mlx5_ib_stage_uar_init,
6465                      mlx5_ib_stage_uar_cleanup),
6466         STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6467                      mlx5_ib_stage_bfrag_init,
6468                      mlx5_ib_stage_bfrag_cleanup),
6469         STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6470                      NULL,
6471                      mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6472         STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
6473                      mlx5_ib_stage_devx_init,
6474                      mlx5_ib_stage_devx_cleanup),
6475         STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6476                      mlx5_ib_stage_ib_reg_init,
6477                      mlx5_ib_stage_ib_reg_cleanup),
6478         STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6479                      mlx5_ib_stage_post_ib_reg_umr_init,
6480                      NULL),
6481         STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
6482                      mlx5_ib_stage_delay_drop_init,
6483                      mlx5_ib_stage_delay_drop_cleanup),
6484 };
6485
6486 const struct mlx5_ib_profile uplink_rep_profile = {
6487         STAGE_CREATE(MLX5_IB_STAGE_INIT,
6488                      mlx5_ib_stage_init_init,
6489                      mlx5_ib_stage_init_cleanup),
6490         STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6491                      mlx5_ib_stage_flow_db_init,
6492                      mlx5_ib_stage_flow_db_cleanup),
6493         STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6494                      mlx5_ib_stage_caps_init,
6495                      NULL),
6496         STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6497                      mlx5_ib_stage_rep_non_default_cb,
6498                      NULL),
6499         STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6500                      mlx5_ib_stage_rep_roce_init,
6501                      mlx5_ib_stage_rep_roce_cleanup),
6502         STAGE_CREATE(MLX5_IB_STAGE_SRQ,
6503                      mlx5_init_srq_table,
6504                      mlx5_cleanup_srq_table),
6505         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6506                      mlx5_ib_stage_dev_res_init,
6507                      mlx5_ib_stage_dev_res_cleanup),
6508         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
6509                      mlx5_ib_stage_dev_notifier_init,
6510                      mlx5_ib_stage_dev_notifier_cleanup),
6511         STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6512                      mlx5_ib_stage_counters_init,
6513                      mlx5_ib_stage_counters_cleanup),
6514         STAGE_CREATE(MLX5_IB_STAGE_UAR,
6515                      mlx5_ib_stage_uar_init,
6516                      mlx5_ib_stage_uar_cleanup),
6517         STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6518                      mlx5_ib_stage_bfrag_init,
6519                      mlx5_ib_stage_bfrag_cleanup),
6520         STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6521                      NULL,
6522                      mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6523         STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6524                      mlx5_ib_stage_ib_reg_init,
6525                      mlx5_ib_stage_ib_reg_cleanup),
6526         STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6527                      mlx5_ib_stage_post_ib_reg_umr_init,
6528                      NULL),
6529 };
6530
6531 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
6532 {
6533         struct mlx5_ib_multiport_info *mpi;
6534         struct mlx5_ib_dev *dev;
6535         bool bound = false;
6536         int err;
6537
6538         mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6539         if (!mpi)
6540                 return NULL;
6541
6542         mpi->mdev = mdev;
6543
6544         err = mlx5_query_nic_vport_system_image_guid(mdev,
6545                                                      &mpi->sys_image_guid);
6546         if (err) {
6547                 kfree(mpi);
6548                 return NULL;
6549         }
6550
6551         mutex_lock(&mlx5_ib_multiport_mutex);
6552         list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6553                 if (dev->sys_image_guid == mpi->sys_image_guid)
6554                         bound = mlx5_ib_bind_slave_port(dev, mpi);
6555
6556                 if (bound) {
6557                         rdma_roce_rescan_device(&dev->ib_dev);
6558                         break;
6559                 }
6560         }
6561
6562         if (!bound) {
6563                 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6564                 dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
6565         }
6566         mutex_unlock(&mlx5_ib_multiport_mutex);
6567
6568         return mpi;
6569 }
6570
6571 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6572 {
6573         enum rdma_link_layer ll;
6574         struct mlx5_ib_dev *dev;
6575         int port_type_cap;
6576
6577         printk_once(KERN_INFO "%s", mlx5_version);
6578
6579         if (MLX5_ESWITCH_MANAGER(mdev) &&
6580             mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
6581                 mlx5_ib_register_vport_reps(mdev);
6582                 return mdev;
6583         }
6584
6585         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6586         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6587
6588         if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
6589                 return mlx5_ib_add_slave_port(mdev);
6590
6591         dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
6592         if (!dev)
6593                 return NULL;
6594
6595         dev->mdev = mdev;
6596         dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6597                              MLX5_CAP_GEN(mdev, num_vhca_ports));
6598
6599         return __mlx5_ib_add(dev, &pf_profile);
6600 }
6601
6602 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
6603 {
6604         struct mlx5_ib_multiport_info *mpi;
6605         struct mlx5_ib_dev *dev;
6606
6607         if (MLX5_ESWITCH_MANAGER(mdev) && context == mdev) {
6608                 mlx5_ib_unregister_vport_reps(mdev);
6609                 return;
6610         }
6611
6612         if (mlx5_core_is_mp_slave(mdev)) {
6613                 mpi = context;
6614                 mutex_lock(&mlx5_ib_multiport_mutex);
6615                 if (mpi->ibdev)
6616                         mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
6617                 list_del(&mpi->list);
6618                 mutex_unlock(&mlx5_ib_multiport_mutex);
6619                 return;
6620         }
6621
6622         dev = context;
6623         __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
6624
6625         ib_dealloc_device((struct ib_device *)dev);
6626 }
6627
6628 static struct mlx5_interface mlx5_ib_interface = {
6629         .add            = mlx5_ib_add,
6630         .remove         = mlx5_ib_remove,
6631         .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
6632 };
6633
6634 unsigned long mlx5_ib_get_xlt_emergency_page(void)
6635 {
6636         mutex_lock(&xlt_emergency_page_mutex);
6637         return xlt_emergency_page;
6638 }
6639
6640 void mlx5_ib_put_xlt_emergency_page(void)
6641 {
6642         mutex_unlock(&xlt_emergency_page_mutex);
6643 }
6644
6645 static int __init mlx5_ib_init(void)
6646 {
6647         int err;
6648
6649         xlt_emergency_page = __get_free_page(GFP_KERNEL);
6650         if (!xlt_emergency_page)
6651                 return -ENOMEM;
6652
6653         mutex_init(&xlt_emergency_page_mutex);
6654
6655         mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
6656         if (!mlx5_ib_event_wq) {
6657                 free_page(xlt_emergency_page);
6658                 return -ENOMEM;
6659         }
6660
6661         mlx5_ib_odp_init();
6662
6663         err = mlx5_register_interface(&mlx5_ib_interface);
6664
6665         return err;
6666 }
6667
6668 static void __exit mlx5_ib_cleanup(void)
6669 {
6670         mlx5_unregister_interface(&mlx5_ib_interface);
6671         destroy_workqueue(mlx5_ib_event_wq);
6672         mutex_destroy(&xlt_emergency_page_mutex);
6673         free_page(xlt_emergency_page);
6674 }
6675
6676 module_init(mlx5_ib_init);
6677 module_exit(mlx5_ib_cleanup);