net/mlx4: Adapt code for N-Port VF
[linux-block.git] / drivers / infiniband / hw / mlx4 / mad.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h>
37bfc7c1
JM
35#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
225c7b1f 37
afa8fd1d 38#include <linux/random.h>
225c7b1f 39#include <linux/mlx4/cmd.h>
5a0e3ad6 40#include <linux/gfp.h>
c3779134 41#include <rdma/ib_pma.h>
225c7b1f
RD
42
43#include "mlx4_ib.h"
44
45enum {
46 MLX4_IB_VENDOR_CLASS1 = 0x9,
47 MLX4_IB_VENDOR_CLASS2 = 0xa
48};
49
fc06573d
JM
50#define MLX4_TUN_SEND_WRID_SHIFT 34
51#define MLX4_TUN_QPN_SHIFT 32
52#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
54
55#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
57
2a4fae14
JM
58 /* Port mgmt change event handling */
59
60#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62#define NUM_IDX_IN_PKEY_TBL_BLK 32
63#define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66
fc06573d
JM
67struct mlx4_mad_rcv_buf {
68 struct ib_grh grh;
69 u8 payload[256];
70} __packed;
71
72struct mlx4_mad_snd_buf {
73 u8 payload[256];
74} __packed;
75
76struct mlx4_tunnel_mad {
77 struct ib_grh grh;
78 struct mlx4_ib_tunnel_header hdr;
79 struct ib_mad mad;
80} __packed;
81
82struct mlx4_rcv_tunnel_mad {
83 struct mlx4_rcv_tunnel_hdr hdr;
84 struct ib_grh grh;
85 struct ib_mad mad;
86} __packed;
87
b9c5d6a6 88static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
2a4fae14
JM
89static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
90static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
91 int block, u32 change_bitmap);
b9c5d6a6 92
afa8fd1d
JM
93__be64 mlx4_ib_gen_node_guid(void)
94{
95#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
50bea5c0 96 return cpu_to_be64(NODE_GUID_HI | prandom_u32());
afa8fd1d
JM
97}
98
b9c5d6a6
OD
99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
100{
101 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
102 cpu_to_be64(0xff00000000000000LL);
103}
104
0a9a0188 105int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
225c7b1f
RD
106 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
107 void *in_mad, void *response_mad)
108{
109 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
110 void *inbox;
111 int err;
112 u32 in_modifier = port;
113 u8 op_modifier = 0;
114
115 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
116 if (IS_ERR(inmailbox))
117 return PTR_ERR(inmailbox);
118 inbox = inmailbox->buf;
119
120 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
121 if (IS_ERR(outmailbox)) {
122 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
123 return PTR_ERR(outmailbox);
124 }
125
126 memcpy(inbox, in_mad, 256);
127
128 /*
129 * Key check traps can't be generated unless we have in_wc to
130 * tell us where to send the trap.
131 */
0a9a0188 132 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
225c7b1f 133 op_modifier |= 0x1;
0a9a0188 134 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
225c7b1f 135 op_modifier |= 0x2;
0a9a0188
JM
136 if (mlx4_is_mfunc(dev->dev) &&
137 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
138 op_modifier |= 0x8;
225c7b1f
RD
139
140 if (in_wc) {
141 struct {
142 __be32 my_qpn;
143 u32 reserved1;
144 __be32 rqpn;
145 u8 sl;
146 u8 g_path;
147 u16 reserved2[2];
148 __be16 pkey;
149 u32 reserved3[11];
150 u8 grh[40];
151 } *ext_info;
152
153 memset(inbox + 256, 0, 256);
154 ext_info = inbox + 256;
155
156 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
157 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
158 ext_info->sl = in_wc->sl << 4;
159 ext_info->g_path = in_wc->dlid_path_bits |
160 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
161 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
162
163 if (in_grh)
164 memcpy(ext_info->grh, in_grh, 40);
165
166 op_modifier |= 0x4;
167
168 in_modifier |= in_wc->slid << 16;
169 }
170
0a9a0188
JM
171 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
172 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
f9baff50 173 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
0a9a0188 174 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
225c7b1f 175
fe11cb6b 176 if (!err)
225c7b1f
RD
177 memcpy(response_mad, outmailbox->buf, 256);
178
179 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
180 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
181
182 return err;
183}
184
185static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
186{
187 struct ib_ah *new_ah;
188 struct ib_ah_attr ah_attr;
df7fba66 189 unsigned long flags;
225c7b1f
RD
190
191 if (!dev->send_agent[port_num - 1][0])
192 return;
193
194 memset(&ah_attr, 0, sizeof ah_attr);
195 ah_attr.dlid = lid;
196 ah_attr.sl = sl;
197 ah_attr.port_num = port_num;
198
199 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
200 &ah_attr);
201 if (IS_ERR(new_ah))
202 return;
203
df7fba66 204 spin_lock_irqsave(&dev->sm_lock, flags);
225c7b1f
RD
205 if (dev->sm_ah[port_num - 1])
206 ib_destroy_ah(dev->sm_ah[port_num - 1]);
207 dev->sm_ah[port_num - 1] = new_ah;
df7fba66 208 spin_unlock_irqrestore(&dev->sm_lock, flags);
225c7b1f
RD
209}
210
211/*
00f5ce99
JM
212 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
213 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
225c7b1f 214 */
f0f6f346 215static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
00f5ce99 216 u16 prev_lid)
225c7b1f 217{
00f5ce99
JM
218 struct ib_port_info *pinfo;
219 u16 lid;
54679e14
JM
220 __be16 *base;
221 u32 bn, pkey_change_bitmap;
222 int i;
223
225c7b1f 224
00f5ce99 225 struct mlx4_ib_dev *dev = to_mdev(ibdev);
225c7b1f
RD
226 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
227 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
00f5ce99
JM
228 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
229 switch (mad->mad_hdr.attr_id) {
230 case IB_SMP_ATTR_PORT_INFO:
231 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
232 lid = be16_to_cpu(pinfo->lid);
225c7b1f 233
00f5ce99 234 update_sm_ah(dev, port_num,
225c7b1f
RD
235 be16_to_cpu(pinfo->sm_lid),
236 pinfo->neighbormtu_mastersmsl & 0xf);
237
00f5ce99 238 if (pinfo->clientrereg_resv_subnetto & 0x80)
b9c5d6a6 239 handle_client_rereg_event(dev, port_num);
225c7b1f 240
00f5ce99 241 if (prev_lid != lid)
2a4fae14 242 handle_lid_change_event(dev, port_num);
00f5ce99 243 break;
225c7b1f 244
00f5ce99 245 case IB_SMP_ATTR_PKEY_TABLE:
54679e14
JM
246 if (!mlx4_is_mfunc(dev->dev)) {
247 mlx4_ib_dispatch_event(dev, port_num,
248 IB_EVENT_PKEY_CHANGE);
249 break;
250 }
251
2a4fae14
JM
252 /* at this point, we are running in the master.
253 * Slaves do not receive SMPs.
254 */
54679e14
JM
255 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
256 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
257 pkey_change_bitmap = 0;
258 for (i = 0; i < 32; i++) {
259 pr_debug("PKEY[%d] = x%x\n",
260 i + bn*32, be16_to_cpu(base[i]));
261 if (be16_to_cpu(base[i]) !=
262 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
263 pkey_change_bitmap |= (1 << i);
264 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
265 be16_to_cpu(base[i]);
266 }
267 }
268 pr_debug("PKEY Change event: port=%d, "
269 "block=0x%x, change_bitmap=0x%x\n",
270 port_num, bn, pkey_change_bitmap);
271
2a4fae14 272 if (pkey_change_bitmap) {
54679e14
JM
273 mlx4_ib_dispatch_event(dev, port_num,
274 IB_EVENT_PKEY_CHANGE);
2a4fae14
JM
275 if (!dev->sriov.is_going_down)
276 __propagate_pkey_ev(dev, port_num, bn,
277 pkey_change_bitmap);
278 }
00f5ce99 279 break;
225c7b1f 280
00f5ce99 281 case IB_SMP_ATTR_GUID_INFO:
6634961c
JM
282 /* paravirtualized master's guid is guid 0 -- does not change */
283 if (!mlx4_is_master(dev->dev))
284 mlx4_ib_dispatch_event(dev, port_num,
285 IB_EVENT_GID_CHANGE);
2a4fae14
JM
286 /*if master, notify relevant slaves*/
287 if (mlx4_is_master(dev->dev) &&
288 !dev->sriov.is_going_down) {
289 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
290 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
291 (u8 *)(&((struct ib_smp *)mad)->data));
292 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
293 (u8 *)(&((struct ib_smp *)mad)->data));
294 }
00f5ce99 295 break;
2a4fae14 296
00f5ce99
JM
297 default:
298 break;
225c7b1f 299 }
225c7b1f
RD
300}
301
2a4fae14
JM
302static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
303 int block, u32 change_bitmap)
304{
305 int i, ix, slave, err;
306 int have_event = 0;
307
308 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
309 if (slave == mlx4_master_func_num(dev->dev))
310 continue;
311 if (!mlx4_is_slave_active(dev->dev, slave))
312 continue;
313
314 have_event = 0;
315 for (i = 0; i < 32; i++) {
316 if (!(change_bitmap & (1 << i)))
317 continue;
318 for (ix = 0;
319 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
320 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
321 [ix] == i + 32 * block) {
322 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
323 pr_debug("propagate_pkey_ev: slave %d,"
324 " port %d, ix %d (%d)\n",
325 slave, port_num, ix, err);
326 have_event = 1;
327 break;
328 }
329 }
330 if (have_event)
331 break;
332 }
333 }
334}
335
225c7b1f
RD
336static void node_desc_override(struct ib_device *dev,
337 struct ib_mad *mad)
338{
df7fba66
JM
339 unsigned long flags;
340
225c7b1f
RD
341 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
342 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
343 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
344 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
df7fba66 345 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
225c7b1f 346 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64);
df7fba66 347 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
225c7b1f
RD
348 }
349}
350
351static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad)
352{
353 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
354 struct ib_mad_send_buf *send_buf;
355 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
356 int ret;
df7fba66 357 unsigned long flags;
225c7b1f
RD
358
359 if (agent) {
360 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
361 IB_MGMT_MAD_DATA, GFP_ATOMIC);
13974909
DC
362 if (IS_ERR(send_buf))
363 return;
225c7b1f
RD
364 /*
365 * We rely here on the fact that MLX QPs don't use the
366 * address handle after the send is posted (this is
367 * wrong following the IB spec strictly, but we know
368 * it's OK for our devices).
369 */
df7fba66 370 spin_lock_irqsave(&dev->sm_lock, flags);
225c7b1f
RD
371 memcpy(send_buf->mad, mad, sizeof *mad);
372 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
373 ret = ib_post_send_mad(send_buf, NULL);
374 else
375 ret = -EINVAL;
df7fba66 376 spin_unlock_irqrestore(&dev->sm_lock, flags);
225c7b1f
RD
377
378 if (ret)
379 ib_free_send_mad(send_buf);
380 }
381}
382
37bfc7c1
JM
383static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
384 struct ib_sa_mad *sa_mad)
385{
b9c5d6a6
OD
386 int ret = 0;
387
388 /* dispatch to different sa handlers */
389 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
390 case IB_SA_ATTR_MC_MEMBER_REC:
391 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
392 break;
393 default:
394 break;
395 }
396 return ret;
37bfc7c1
JM
397}
398
399int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
400{
401 struct mlx4_ib_dev *dev = to_mdev(ibdev);
402 int i;
403
404 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
405 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
406 return i;
407 }
408 return -1;
409}
410
411
2c75d2cc
JM
412static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
413 u8 port, u16 pkey, u16 *ix)
37bfc7c1 414{
2c75d2cc
JM
415 int i, ret;
416 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
417 u16 slot_pkey;
37bfc7c1 418
2c75d2cc
JM
419 if (slave == mlx4_master_func_num(dev->dev))
420 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
37bfc7c1 421
2c75d2cc 422 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
37bfc7c1 423
2c75d2cc
JM
424 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
425 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
426 continue;
427
428 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
429
430 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
431 if (ret)
432 continue;
433 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
434 if (slot_pkey & 0x8000) {
435 *ix = (u16) pkey_ix;
436 return 0;
437 } else {
438 /* take first partial pkey index found */
439 if (partial_ix == 0xFF)
440 partial_ix = pkey_ix;
441 }
442 }
443 }
444
445 if (partial_ix < 0xFF) {
446 *ix = (u16) partial_ix;
447 return 0;
448 }
449
450 return -EINVAL;
37bfc7c1
JM
451}
452
453int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
454 enum ib_qp_type dest_qpt, struct ib_wc *wc,
455 struct ib_grh *grh, struct ib_mad *mad)
456{
457 struct ib_sge list;
458 struct ib_send_wr wr, *bad_wr;
459 struct mlx4_ib_demux_pv_ctx *tun_ctx;
460 struct mlx4_ib_demux_pv_qp *tun_qp;
461 struct mlx4_rcv_tunnel_mad *tun_mad;
462 struct ib_ah_attr attr;
463 struct ib_ah *ah;
464 struct ib_qp *src_qp = NULL;
465 unsigned tun_tx_ix = 0;
466 int dqpn;
467 int ret = 0;
37bfc7c1 468 u16 tun_pkey_ix;
2c75d2cc 469 u16 cached_pkey;
6ee51a4e 470 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
37bfc7c1
JM
471
472 if (dest_qpt > IB_QPT_GSI)
473 return -EINVAL;
474
475 tun_ctx = dev->sriov.demux[port-1].tun[slave];
476
477 /* check if proxy qp created */
478 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
479 return -EAGAIN;
480
481 /* QP0 forwarding only for Dom0 */
482 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
483 return -EINVAL;
484
485 if (!dest_qpt)
486 tun_qp = &tun_ctx->qp[0];
487 else
488 tun_qp = &tun_ctx->qp[1];
489
2c75d2cc 490 /* compute P_Key index to put in tunnel header for slave */
37bfc7c1 491 if (dest_qpt) {
2c75d2cc
JM
492 u16 pkey_ix;
493 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
37bfc7c1
JM
494 if (ret)
495 return -EINVAL;
496
2c75d2cc
JM
497 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
498 if (ret)
37bfc7c1 499 return -EINVAL;
2c75d2cc 500 tun_pkey_ix = pkey_ix;
37bfc7c1
JM
501 } else
502 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
503
47605df9 504 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
37bfc7c1
JM
505
506 /* get tunnel tx data buf for slave */
507 src_qp = tun_qp->qp;
508
509 /* create ah. Just need an empty one with the port num for the post send.
510 * The driver will set the force loopback bit in post_send */
511 memset(&attr, 0, sizeof attr);
512 attr.port_num = port;
6ee51a4e 513 if (is_eth) {
b6ffaeff 514 memcpy(&attr.grh.dgid.raw[0], &grh->dgid.raw[0], 16);
6ee51a4e
JM
515 attr.ah_flags = IB_AH_GRH;
516 }
37bfc7c1
JM
517 ah = ib_create_ah(tun_ctx->pd, &attr);
518 if (IS_ERR(ah))
519 return -ENOMEM;
520
521 /* allocate tunnel tx buf after pass failure returns */
522 spin_lock(&tun_qp->tx_lock);
523 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
524 (MLX4_NUM_TUNNEL_BUFS - 1))
525 ret = -EAGAIN;
526 else
527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528 spin_unlock(&tun_qp->tx_lock);
529 if (ret)
530 goto out;
531
532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533 if (tun_qp->tx_ring[tun_tx_ix].ah)
534 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
535 tun_qp->tx_ring[tun_tx_ix].ah = ah;
536 ib_dma_sync_single_for_cpu(&dev->ib_dev,
537 tun_qp->tx_ring[tun_tx_ix].buf.map,
538 sizeof (struct mlx4_rcv_tunnel_mad),
539 DMA_TO_DEVICE);
540
541 /* copy over to tunnel buffer */
542 if (grh)
543 memcpy(&tun_mad->grh, grh, sizeof *grh);
544 memcpy(&tun_mad->mad, mad, sizeof *mad);
545
546 /* adjust tunnel data */
547 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
37bfc7c1
JM
548 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
549 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
550
5ea8bbfc
JM
551 if (is_eth) {
552 u16 vlan = 0;
553 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
554 NULL)) {
555 /* VST mode */
556 if (vlan != wc->vlan_id)
557 /* Packet vlan is not the VST-assigned vlan.
558 * Drop the packet.
559 */
560 goto out;
561 else
562 /* Remove the vlan tag before forwarding
563 * the packet to the VF.
564 */
565 vlan = 0xffff;
566 } else {
567 vlan = wc->vlan_id;
568 }
569
570 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
571 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
572 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
573 } else {
574 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
575 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
576 }
577
37bfc7c1
JM
578 ib_dma_sync_single_for_device(&dev->ib_dev,
579 tun_qp->tx_ring[tun_tx_ix].buf.map,
580 sizeof (struct mlx4_rcv_tunnel_mad),
581 DMA_TO_DEVICE);
582
583 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
584 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
585 list.lkey = tun_ctx->mr->lkey;
586
587 wr.wr.ud.ah = ah;
588 wr.wr.ud.port_num = port;
589 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
590 wr.wr.ud.remote_qpn = dqpn;
591 wr.next = NULL;
592 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
593 wr.sg_list = &list;
594 wr.num_sge = 1;
595 wr.opcode = IB_WR_SEND;
596 wr.send_flags = IB_SEND_SIGNALED;
597
598 ret = ib_post_send(src_qp, &wr, &bad_wr);
599out:
600 if (ret)
601 ib_destroy_ah(ah);
602 return ret;
603}
604
605static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
606 struct ib_wc *wc, struct ib_grh *grh,
607 struct ib_mad *mad)
608{
609 struct mlx4_ib_dev *dev = to_mdev(ibdev);
610 int err;
611 int slave;
612 u8 *slave_id;
6ee51a4e
JM
613 int is_eth = 0;
614
615 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
616 is_eth = 0;
617 else
618 is_eth = 1;
619
620 if (is_eth) {
621 if (!(wc->wc_flags & IB_WC_GRH)) {
622 mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
623 return -EINVAL;
624 }
625 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
626 mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
627 return -EINVAL;
628 }
629 if (mlx4_get_slave_from_roce_gid(dev->dev, port, grh->dgid.raw, &slave)) {
630 mlx4_ib_warn(ibdev, "failed matching grh\n");
631 return -ENOENT;
632 }
633 if (slave >= dev->dev->caps.sqp_demux) {
634 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
635 slave, dev->dev->caps.sqp_demux);
636 return -ENOENT;
637 }
638
639 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
640 return 0;
641
642 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
643 if (err)
644 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
645 slave, err);
646 return 0;
647 }
37bfc7c1
JM
648
649 /* Initially assume that this mad is for us */
650 slave = mlx4_master_func_num(dev->dev);
651
652 /* See if the slave id is encoded in a response mad */
653 if (mad->mad_hdr.method & 0x80) {
654 slave_id = (u8 *) &mad->mad_hdr.tid;
655 slave = *slave_id;
656 if (slave != 255) /*255 indicates the dom0*/
657 *slave_id = 0; /* remap tid */
658 }
659
660 /* If a grh is present, we demux according to it */
661 if (wc->wc_flags & IB_WC_GRH) {
662 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
663 if (slave < 0) {
664 mlx4_ib_warn(ibdev, "failed matching grh\n");
665 return -ENOENT;
666 }
667 }
668 /* Class-specific handling */
669 switch (mad->mad_hdr.mgmt_class) {
670 case IB_MGMT_CLASS_SUBN_ADM:
671 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
672 (struct ib_sa_mad *) mad))
673 return 0;
674 break;
3cf69cc8
AV
675 case IB_MGMT_CLASS_CM:
676 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
677 return 0;
678 break;
37bfc7c1
JM
679 case IB_MGMT_CLASS_DEVICE_MGMT:
680 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
681 return 0;
682 break;
683 default:
684 /* Drop unsupported classes for slaves in tunnel mode */
685 if (slave != mlx4_master_func_num(dev->dev)) {
686 pr_debug("dropping unsupported ingress mad from class:%d "
687 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
688 return 0;
689 }
690 }
691 /*make sure that no slave==255 was not handled yet.*/
692 if (slave >= dev->dev->caps.sqp_demux) {
693 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
694 slave, dev->dev->caps.sqp_demux);
695 return -ENOENT;
696 }
697
698 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
699 if (err)
700 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
701 slave, err);
702 return 0;
703}
704
c3779134 705static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
225c7b1f
RD
706 struct ib_wc *in_wc, struct ib_grh *in_grh,
707 struct ib_mad *in_mad, struct ib_mad *out_mad)
708{
f0f6f346 709 u16 slid, prev_lid = 0;
225c7b1f 710 int err;
f0f6f346 711 struct ib_port_attr pattr;
225c7b1f 712
b1d8eb5a
JM
713 if (in_wc && in_wc->qp->qp_num) {
714 pr_debug("received MAD: slid:%d sqpn:%d "
715 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
716 in_wc->slid, in_wc->src_qp,
717 in_wc->dlid_path_bits,
718 in_wc->qp->qp_num,
719 in_wc->wc_flags,
720 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
721 be16_to_cpu(in_mad->mad_hdr.attr_id));
722 if (in_wc->wc_flags & IB_WC_GRH) {
723 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
724 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
725 be64_to_cpu(in_grh->sgid.global.interface_id));
726 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
727 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
728 be64_to_cpu(in_grh->dgid.global.interface_id));
729 }
730 }
731
225c7b1f
RD
732 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
733
734 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
735 forward_trap(to_mdev(ibdev), port_num, in_mad);
736 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
737 }
738
739 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
740 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
741 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
742 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
743 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
744 return IB_MAD_RESULT_SUCCESS;
745
746 /*
a6f7feae 747 * Don't process SMInfo queries -- the SMA can't handle them.
225c7b1f 748 */
a6f7feae 749 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
225c7b1f
RD
750 return IB_MAD_RESULT_SUCCESS;
751 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
752 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
6578cf33
EC
753 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
754 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
225c7b1f
RD
755 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
756 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
757 return IB_MAD_RESULT_SUCCESS;
758 } else
759 return IB_MAD_RESULT_SUCCESS;
760
f0f6f346
MS
761 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
762 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
763 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
764 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
765 !ib_query_port(ibdev, port_num, &pattr))
766 prev_lid = pattr.lid;
767
225c7b1f 768 err = mlx4_MAD_IFC(to_mdev(ibdev),
0a9a0188
JM
769 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
770 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
771 MLX4_MAD_IFC_NET_VIEW,
225c7b1f
RD
772 port_num, in_wc, in_grh, in_mad, out_mad);
773 if (err)
774 return IB_MAD_RESULT_FAILURE;
775
776 if (!out_mad->mad_hdr.status) {
00f5ce99
JM
777 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
778 smp_snoop(ibdev, port_num, in_mad, prev_lid);
992e8e6e
JM
779 /* slaves get node desc from FW */
780 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
781 node_desc_override(ibdev, out_mad);
225c7b1f
RD
782 }
783
784 /* set return bit in status of directed route responses */
785 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
786 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
787
788 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
789 /* no response for trap repress */
790 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
791
792 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
793}
794
c3779134
OG
795static void edit_counter(struct mlx4_counter *cnt,
796 struct ib_pma_portcounters *pma_cnt)
797{
798 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
799 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
800 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
801 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
802}
803
804static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
805 struct ib_wc *in_wc, struct ib_grh *in_grh,
806 struct ib_mad *in_mad, struct ib_mad *out_mad)
807{
808 struct mlx4_cmd_mailbox *mailbox;
809 struct mlx4_ib_dev *dev = to_mdev(ibdev);
810 int err;
811 u32 inmod = dev->counters[port_num - 1] & 0xffff;
812 u8 mode;
813
814 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
815 return -EINVAL;
816
817 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
818 if (IS_ERR(mailbox))
819 return IB_MAD_RESULT_FAILURE;
820
821 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
f9baff50
JM
822 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
823 MLX4_CMD_WRAPPED);
c3779134
OG
824 if (err)
825 err = IB_MAD_RESULT_FAILURE;
826 else {
827 memset(out_mad->data, 0, sizeof out_mad->data);
828 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
829 switch (mode & 0xf) {
830 case 0:
831 edit_counter(mailbox->buf,
832 (void *)(out_mad->data + 40));
833 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
834 break;
835 default:
836 err = IB_MAD_RESULT_FAILURE;
837 }
838 }
839
840 mlx4_free_cmd_mailbox(dev->dev, mailbox);
841
842 return err;
843}
844
845int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
846 struct ib_wc *in_wc, struct ib_grh *in_grh,
847 struct ib_mad *in_mad, struct ib_mad *out_mad)
848{
849 switch (rdma_port_get_link_layer(ibdev, port_num)) {
850 case IB_LINK_LAYER_INFINIBAND:
851 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
852 in_grh, in_mad, out_mad);
853 case IB_LINK_LAYER_ETHERNET:
854 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
855 in_grh, in_mad, out_mad);
856 default:
857 return -EINVAL;
858 }
859}
860
225c7b1f
RD
861static void send_handler(struct ib_mad_agent *agent,
862 struct ib_mad_send_wc *mad_send_wc)
863{
992e8e6e
JM
864 if (mad_send_wc->send_buf->context[0])
865 ib_destroy_ah(mad_send_wc->send_buf->context[0]);
225c7b1f
RD
866 ib_free_send_mad(mad_send_wc->send_buf);
867}
868
869int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
870{
871 struct ib_mad_agent *agent;
872 int p, q;
873 int ret;
fa417f7b 874 enum rdma_link_layer ll;
225c7b1f 875
fa417f7b
EC
876 for (p = 0; p < dev->num_ports; ++p) {
877 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
225c7b1f 878 for (q = 0; q <= 1; ++q) {
fa417f7b
EC
879 if (ll == IB_LINK_LAYER_INFINIBAND) {
880 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
881 q ? IB_QPT_GSI : IB_QPT_SMI,
882 NULL, 0, send_handler,
883 NULL, NULL);
884 if (IS_ERR(agent)) {
885 ret = PTR_ERR(agent);
886 goto err;
887 }
888 dev->send_agent[p][q] = agent;
889 } else
890 dev->send_agent[p][q] = NULL;
225c7b1f 891 }
fa417f7b 892 }
225c7b1f
RD
893
894 return 0;
895
896err:
7ff93f8b 897 for (p = 0; p < dev->num_ports; ++p)
225c7b1f
RD
898 for (q = 0; q <= 1; ++q)
899 if (dev->send_agent[p][q])
900 ib_unregister_mad_agent(dev->send_agent[p][q]);
901
902 return ret;
903}
904
905void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
906{
907 struct ib_mad_agent *agent;
908 int p, q;
909
7ff93f8b 910 for (p = 0; p < dev->num_ports; ++p) {
225c7b1f
RD
911 for (q = 0; q <= 1; ++q) {
912 agent = dev->send_agent[p][q];
fa417f7b
EC
913 if (agent) {
914 dev->send_agent[p][q] = NULL;
915 ib_unregister_mad_agent(agent);
916 }
225c7b1f
RD
917 }
918
919 if (dev->sm_ah[p])
920 ib_destroy_ah(dev->sm_ah[p]);
921 }
922}
00f5ce99 923
2a4fae14
JM
924static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
925{
926 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
927
928 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
929 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
930 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
931}
932
b9c5d6a6
OD
933static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
934{
a0c64a17 935 /* re-configure the alias-guid and mcg's */
b9c5d6a6 936 if (mlx4_is_master(dev->dev)) {
a0c64a17
JM
937 mlx4_ib_invalidate_all_guid_record(dev, port_num);
938
2a4fae14 939 if (!dev->sriov.is_going_down) {
b9c5d6a6 940 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
2a4fae14
JM
941 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
942 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
943 }
b9c5d6a6
OD
944 }
945 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
946}
947
2a4fae14
JM
948static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
949 struct mlx4_eqe *eqe)
950{
951 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
952 GET_MASK_FROM_EQE(eqe));
953}
954
955static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
956 u32 guid_tbl_blk_num, u32 change_bitmap)
957{
958 struct ib_smp *in_mad = NULL;
959 struct ib_smp *out_mad = NULL;
960 u16 i;
961
962 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
963 return;
964
965 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
966 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
967 if (!in_mad || !out_mad) {
968 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
969 goto out;
970 }
971
972 guid_tbl_blk_num *= 4;
973
974 for (i = 0; i < 4; i++) {
975 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
976 continue;
977 memset(in_mad, 0, sizeof *in_mad);
978 memset(out_mad, 0, sizeof *out_mad);
979
980 in_mad->base_version = 1;
981 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
982 in_mad->class_version = 1;
983 in_mad->method = IB_MGMT_METHOD_GET;
984 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
985 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
986
987 if (mlx4_MAD_IFC(dev,
988 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
989 port_num, NULL, NULL, in_mad, out_mad)) {
990 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
991 goto out;
992 }
993
994 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
995 port_num,
996 (u8 *)(&((struct ib_smp *)out_mad)->data));
997 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
998 port_num,
999 (u8 *)(&((struct ib_smp *)out_mad)->data));
1000 }
1001
1002out:
1003 kfree(in_mad);
1004 kfree(out_mad);
1005 return;
1006}
1007
00f5ce99
JM
1008void handle_port_mgmt_change_event(struct work_struct *work)
1009{
1010 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1011 struct mlx4_ib_dev *dev = ew->ib_dev;
1012 struct mlx4_eqe *eqe = &(ew->ib_eqe);
1013 u8 port = eqe->event.port_mgmt_change.port;
1014 u32 changed_attr;
2a4fae14
JM
1015 u32 tbl_block;
1016 u32 change_bitmap;
00f5ce99
JM
1017
1018 switch (eqe->subtype) {
1019 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1020 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1021
1022 /* Update the SM ah - This should be done before handling
1023 the other changed attributes so that MADs can be sent to the SM */
1024 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1025 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1026 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1027 update_sm_ah(dev, port, lid, sl);
1028 }
1029
1030 /* Check if it is a lid change event */
1031 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
2a4fae14 1032 handle_lid_change_event(dev, port);
00f5ce99
JM
1033
1034 /* Generate GUID changed event */
2a4fae14 1035 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
00f5ce99 1036 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
2a4fae14
JM
1037 /*if master, notify all slaves*/
1038 if (mlx4_is_master(dev->dev))
1039 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1040 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1041 }
00f5ce99
JM
1042
1043 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
b9c5d6a6 1044 handle_client_rereg_event(dev, port);
00f5ce99
JM
1045 break;
1046
1047 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1048 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
2a4fae14
JM
1049 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1050 propagate_pkey_ev(dev, port, eqe);
00f5ce99
JM
1051 break;
1052 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
6634961c
JM
1053 /* paravirtualized master's guid is guid 0 -- does not change */
1054 if (!mlx4_is_master(dev->dev))
1055 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
2a4fae14
JM
1056 /*if master, notify relevant slaves*/
1057 else if (!dev->sriov.is_going_down) {
1058 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1059 change_bitmap = GET_MASK_FROM_EQE(eqe);
1060 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1061 }
00f5ce99
JM
1062 break;
1063 default:
1064 pr_warn("Unsupported subtype 0x%x for "
1065 "Port Management Change event\n", eqe->subtype);
1066 }
1067
1068 kfree(ew);
1069}
1070
1071void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1072 enum ib_event_type type)
1073{
1074 struct ib_event event;
1075
1076 event.device = &dev->ib_dev;
1077 event.element.port_num = port_num;
1078 event.event = type;
1079
1080 ib_dispatch_event(&event);
1081}
fc06573d
JM
1082
1083static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1084{
1085 unsigned long flags;
1086 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1087 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1088 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1089 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1090 queue_work(ctx->wq, &ctx->work);
1091 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1092}
1093
1094static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1095 struct mlx4_ib_demux_pv_qp *tun_qp,
1096 int index)
1097{
1098 struct ib_sge sg_list;
1099 struct ib_recv_wr recv_wr, *bad_recv_wr;
1100 int size;
1101
1102 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1103 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1104
1105 sg_list.addr = tun_qp->ring[index].map;
1106 sg_list.length = size;
1107 sg_list.lkey = ctx->mr->lkey;
1108
1109 recv_wr.next = NULL;
1110 recv_wr.sg_list = &sg_list;
1111 recv_wr.num_sge = 1;
1112 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1113 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1114 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1115 size, DMA_FROM_DEVICE);
1116 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1117}
1118
37bfc7c1
JM
1119static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1120 int slave, struct ib_sa_mad *sa_mad)
1121{
b9c5d6a6
OD
1122 int ret = 0;
1123
1124 /* dispatch to different sa handlers */
1125 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1126 case IB_SA_ATTR_MC_MEMBER_REC:
1127 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1128 break;
1129 default:
1130 break;
1131 }
1132 return ret;
37bfc7c1
JM
1133}
1134
1135static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1136{
47605df9 1137 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
37bfc7c1 1138
47605df9 1139 return (qpn >= proxy_start && qpn <= proxy_start + 1);
37bfc7c1
JM
1140}
1141
1142
1143int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
5ea8bbfc
JM
1144 enum ib_qp_type dest_qpt, u16 pkey_index,
1145 u32 remote_qpn, u32 qkey, struct ib_ah_attr *attr,
1146 u8 *s_mac, struct ib_mad *mad)
37bfc7c1
JM
1147{
1148 struct ib_sge list;
1149 struct ib_send_wr wr, *bad_wr;
1150 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1151 struct mlx4_ib_demux_pv_qp *sqp;
1152 struct mlx4_mad_snd_buf *sqp_mad;
1153 struct ib_ah *ah;
1154 struct ib_qp *send_qp = NULL;
1155 unsigned wire_tx_ix = 0;
1156 int ret = 0;
1157 u16 wire_pkey_ix;
1158 int src_qpnum;
1159 u8 sgid_index;
1160
1161
1162 sqp_ctx = dev->sriov.sqps[port-1];
1163
1164 /* check if proxy qp created */
1165 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1166 return -EAGAIN;
1167
1168 /* QP0 forwarding only for Dom0 */
1169 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1170 return -EINVAL;
1171
1172 if (dest_qpt == IB_QPT_SMI) {
1173 src_qpnum = 0;
1174 sqp = &sqp_ctx->qp[0];
1175 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1176 } else {
1177 src_qpnum = 1;
1178 sqp = &sqp_ctx->qp[1];
1179 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1180 }
1181
1182 send_qp = sqp->qp;
1183
1184 /* create ah */
1185 sgid_index = attr->grh.sgid_index;
1186 attr->grh.sgid_index = 0;
1187 ah = ib_create_ah(sqp_ctx->pd, attr);
1188 if (IS_ERR(ah))
1189 return -ENOMEM;
1190 attr->grh.sgid_index = sgid_index;
1191 to_mah(ah)->av.ib.gid_index = sgid_index;
1192 /* get rid of force-loopback bit */
1193 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1194 spin_lock(&sqp->tx_lock);
1195 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1196 (MLX4_NUM_TUNNEL_BUFS - 1))
1197 ret = -EAGAIN;
1198 else
1199 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1200 spin_unlock(&sqp->tx_lock);
1201 if (ret)
1202 goto out;
1203
1204 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1205 if (sqp->tx_ring[wire_tx_ix].ah)
1206 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1207 sqp->tx_ring[wire_tx_ix].ah = ah;
1208 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1209 sqp->tx_ring[wire_tx_ix].buf.map,
1210 sizeof (struct mlx4_mad_snd_buf),
1211 DMA_TO_DEVICE);
1212
1213 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1214
1215 ib_dma_sync_single_for_device(&dev->ib_dev,
1216 sqp->tx_ring[wire_tx_ix].buf.map,
1217 sizeof (struct mlx4_mad_snd_buf),
1218 DMA_TO_DEVICE);
1219
1220 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1221 list.length = sizeof (struct mlx4_mad_snd_buf);
1222 list.lkey = sqp_ctx->mr->lkey;
1223
1224 wr.wr.ud.ah = ah;
1225 wr.wr.ud.port_num = port;
1226 wr.wr.ud.pkey_index = wire_pkey_ix;
1227 wr.wr.ud.remote_qkey = qkey;
1228 wr.wr.ud.remote_qpn = remote_qpn;
1229 wr.next = NULL;
1230 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1231 wr.sg_list = &list;
1232 wr.num_sge = 1;
1233 wr.opcode = IB_WR_SEND;
1234 wr.send_flags = IB_SEND_SIGNALED;
5ea8bbfc
JM
1235 if (s_mac)
1236 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1237
37bfc7c1
JM
1238
1239 ret = ib_post_send(send_qp, &wr, &bad_wr);
1240out:
1241 if (ret)
1242 ib_destroy_ah(ah);
1243 return ret;
1244}
1245
b6ffaeff
JM
1246static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1247{
b6ffaeff
JM
1248 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1249 return slave;
449fc488 1250 return mlx4_get_base_gid_ix(dev->dev, slave, port);
b6ffaeff
JM
1251}
1252
1253static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1254 struct ib_ah_attr *ah_attr)
1255{
1256 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1257 ah_attr->grh.sgid_index = slave;
1258 else
1259 ah_attr->grh.sgid_index += get_slave_base_gid_ix(dev, slave, port);
1260}
1261
37bfc7c1
JM
1262static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1263{
1264 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1265 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1266 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1267 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1268 struct mlx4_ib_ah ah;
1269 struct ib_ah_attr ah_attr;
1270 u8 *slave_id;
1271 int slave;
449fc488 1272 int port;
37bfc7c1
JM
1273
1274 /* Get slave that sent this packet */
47605df9
JM
1275 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1276 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
37bfc7c1
JM
1277 (wc->src_qp & 0x1) != ctx->port - 1 ||
1278 wc->src_qp & 0x4) {
1279 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1280 return;
1281 }
47605df9 1282 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
37bfc7c1
JM
1283 if (slave != ctx->slave) {
1284 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1285 "belongs to another slave\n", wc->src_qp);
1286 return;
1287 }
1288 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1289 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1290 "non-master trying to send QP0 packets\n", wc->src_qp);
1291 return;
1292 }
1293
1294 /* Map transaction ID */
1295 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1296 sizeof (struct mlx4_tunnel_mad),
1297 DMA_FROM_DEVICE);
1298 switch (tunnel->mad.mad_hdr.method) {
1299 case IB_MGMT_METHOD_SET:
1300 case IB_MGMT_METHOD_GET:
1301 case IB_MGMT_METHOD_REPORT:
1302 case IB_SA_METHOD_GET_TABLE:
1303 case IB_SA_METHOD_DELETE:
1304 case IB_SA_METHOD_GET_MULTI:
1305 case IB_SA_METHOD_GET_TRACE_TBL:
1306 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1307 if (*slave_id) {
1308 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1309 "class:%d slave:%d\n", *slave_id,
1310 tunnel->mad.mad_hdr.mgmt_class, slave);
1311 return;
1312 } else
1313 *slave_id = slave;
1314 default:
1315 /* nothing */;
1316 }
1317
1318 /* Class-specific handling */
1319 switch (tunnel->mad.mad_hdr.mgmt_class) {
1320 case IB_MGMT_CLASS_SUBN_ADM:
1321 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1322 (struct ib_sa_mad *) &tunnel->mad))
1323 return;
1324 break;
3cf69cc8
AV
1325 case IB_MGMT_CLASS_CM:
1326 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1327 (struct ib_mad *) &tunnel->mad))
1328 return;
1329 break;
37bfc7c1
JM
1330 case IB_MGMT_CLASS_DEVICE_MGMT:
1331 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1332 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1333 return;
1334 break;
1335 default:
1336 /* Drop unsupported classes for slaves in tunnel mode */
1337 if (slave != mlx4_master_func_num(dev->dev)) {
1338 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1339 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1340 return;
1341 }
1342 }
1343
1344 /* We are using standard ib_core services to send the mad, so generate a
1345 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1346 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1347 ah.ibah.device = ctx->ib_dev;
1348 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
6ee51a4e 1349 if (ah_attr.ah_flags & IB_AH_GRH)
b6ffaeff 1350 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
37bfc7c1 1351
449fc488
MB
1352 port = mlx4_slave_convert_port(dev->dev, slave, ah_attr.port_num);
1353 if (port < 0)
1354 return;
1355 ah_attr.port_num = port;
5ea8bbfc
JM
1356 memcpy(ah_attr.dmac, tunnel->hdr.mac, 6);
1357 ah_attr.vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1358 /* if slave have default vlan use it */
1359 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1360 &ah_attr.vlan_id, &ah_attr.sl);
1361
37bfc7c1
JM
1362 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1363 is_proxy_qp0(dev, wc->src_qp, slave) ?
1364 IB_QPT_SMI : IB_QPT_GSI,
1365 be16_to_cpu(tunnel->hdr.pkey_index),
1366 be32_to_cpu(tunnel->hdr.remote_qpn),
1367 be32_to_cpu(tunnel->hdr.qkey),
5ea8bbfc 1368 &ah_attr, wc->smac, &tunnel->mad);
37bfc7c1
JM
1369}
1370
fc06573d
JM
1371static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1372 enum ib_qp_type qp_type, int is_tun)
1373{
1374 int i;
1375 struct mlx4_ib_demux_pv_qp *tun_qp;
1376 int rx_buf_size, tx_buf_size;
1377
1378 if (qp_type > IB_QPT_GSI)
1379 return -EINVAL;
1380
1381 tun_qp = &ctx->qp[qp_type];
1382
1383 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1384 GFP_KERNEL);
1385 if (!tun_qp->ring)
1386 return -ENOMEM;
1387
1388 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1389 sizeof (struct mlx4_ib_tun_tx_buf),
1390 GFP_KERNEL);
1391 if (!tun_qp->tx_ring) {
1392 kfree(tun_qp->ring);
1393 tun_qp->ring = NULL;
1394 return -ENOMEM;
1395 }
1396
1397 if (is_tun) {
1398 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1399 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1400 } else {
1401 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1402 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1403 }
1404
1405 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1406 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1407 if (!tun_qp->ring[i].addr)
1408 goto err;
1409 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1410 tun_qp->ring[i].addr,
1411 rx_buf_size,
1412 DMA_FROM_DEVICE);
1413 }
1414
1415 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1416 tun_qp->tx_ring[i].buf.addr =
1417 kmalloc(tx_buf_size, GFP_KERNEL);
1418 if (!tun_qp->tx_ring[i].buf.addr)
1419 goto tx_err;
1420 tun_qp->tx_ring[i].buf.map =
1421 ib_dma_map_single(ctx->ib_dev,
1422 tun_qp->tx_ring[i].buf.addr,
1423 tx_buf_size,
1424 DMA_TO_DEVICE);
1425 tun_qp->tx_ring[i].ah = NULL;
1426 }
1427 spin_lock_init(&tun_qp->tx_lock);
1428 tun_qp->tx_ix_head = 0;
1429 tun_qp->tx_ix_tail = 0;
1430 tun_qp->proxy_qpt = qp_type;
1431
1432 return 0;
1433
1434tx_err:
1435 while (i > 0) {
1436 --i;
1437 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1438 tx_buf_size, DMA_TO_DEVICE);
1439 kfree(tun_qp->tx_ring[i].buf.addr);
1440 }
1441 kfree(tun_qp->tx_ring);
1442 tun_qp->tx_ring = NULL;
1443 i = MLX4_NUM_TUNNEL_BUFS;
1444err:
1445 while (i > 0) {
1446 --i;
1447 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1448 rx_buf_size, DMA_FROM_DEVICE);
1449 kfree(tun_qp->ring[i].addr);
1450 }
1451 kfree(tun_qp->ring);
1452 tun_qp->ring = NULL;
1453 return -ENOMEM;
1454}
1455
1456static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1457 enum ib_qp_type qp_type, int is_tun)
1458{
1459 int i;
1460 struct mlx4_ib_demux_pv_qp *tun_qp;
1461 int rx_buf_size, tx_buf_size;
1462
1463 if (qp_type > IB_QPT_GSI)
1464 return;
1465
1466 tun_qp = &ctx->qp[qp_type];
1467 if (is_tun) {
1468 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1469 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1470 } else {
1471 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1472 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1473 }
1474
1475
1476 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1477 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1478 rx_buf_size, DMA_FROM_DEVICE);
1479 kfree(tun_qp->ring[i].addr);
1480 }
1481
1482 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1483 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1484 tx_buf_size, DMA_TO_DEVICE);
1485 kfree(tun_qp->tx_ring[i].buf.addr);
1486 if (tun_qp->tx_ring[i].ah)
1487 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1488 }
1489 kfree(tun_qp->tx_ring);
1490 kfree(tun_qp->ring);
1491}
1492
1493static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1494{
37bfc7c1
JM
1495 struct mlx4_ib_demux_pv_ctx *ctx;
1496 struct mlx4_ib_demux_pv_qp *tun_qp;
1497 struct ib_wc wc;
1498 int ret;
1499 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1500 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1501
1502 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1503 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1504 if (wc.status == IB_WC_SUCCESS) {
1505 switch (wc.opcode) {
1506 case IB_WC_RECV:
1507 mlx4_ib_multiplex_mad(ctx, &wc);
1508 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1509 wc.wr_id &
1510 (MLX4_NUM_TUNNEL_BUFS - 1));
1511 if (ret)
1512 pr_err("Failed reposting tunnel "
1513 "buf:%lld\n", wc.wr_id);
1514 break;
1515 case IB_WC_SEND:
1516 pr_debug("received tunnel send completion:"
1517 "wrid=0x%llx, status=0x%x\n",
1518 wc.wr_id, wc.status);
1519 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1520 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1521 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1522 = NULL;
1523 spin_lock(&tun_qp->tx_lock);
1524 tun_qp->tx_ix_tail++;
1525 spin_unlock(&tun_qp->tx_lock);
1526
1527 break;
1528 default:
1529 break;
1530 }
1531 } else {
1532 pr_debug("mlx4_ib: completion error in tunnel: %d."
1533 " status = %d, wrid = 0x%llx\n",
1534 ctx->slave, wc.status, wc.wr_id);
1535 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1536 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1537 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1538 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1539 = NULL;
1540 spin_lock(&tun_qp->tx_lock);
1541 tun_qp->tx_ix_tail++;
1542 spin_unlock(&tun_qp->tx_lock);
1543 }
1544 }
1545 }
fc06573d
JM
1546}
1547
1548static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1549{
1550 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1551
1552 /* It's worse than that! He's dead, Jim! */
1553 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1554 event->event, sqp->port);
1555}
1556
1557static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1558 enum ib_qp_type qp_type, int create_tun)
1559{
1560 int i, ret;
1561 struct mlx4_ib_demux_pv_qp *tun_qp;
1562 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1563 struct ib_qp_attr attr;
1564 int qp_attr_mask_INIT;
1565
1566 if (qp_type > IB_QPT_GSI)
1567 return -EINVAL;
1568
1569 tun_qp = &ctx->qp[qp_type];
1570
1571 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1572 qp_init_attr.init_attr.send_cq = ctx->cq;
1573 qp_init_attr.init_attr.recv_cq = ctx->cq;
1574 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1575 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1576 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1577 qp_init_attr.init_attr.cap.max_send_sge = 1;
1578 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1579 if (create_tun) {
1580 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1581 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1582 qp_init_attr.port = ctx->port;
1583 qp_init_attr.slave = ctx->slave;
1584 qp_init_attr.proxy_qp_type = qp_type;
1585 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1586 IB_QP_QKEY | IB_QP_PORT;
1587 } else {
1588 qp_init_attr.init_attr.qp_type = qp_type;
1589 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1590 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1591 }
1592 qp_init_attr.init_attr.port_num = ctx->port;
1593 qp_init_attr.init_attr.qp_context = ctx;
1594 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1595 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1596 if (IS_ERR(tun_qp->qp)) {
1597 ret = PTR_ERR(tun_qp->qp);
1598 tun_qp->qp = NULL;
1599 pr_err("Couldn't create %s QP (%d)\n",
1600 create_tun ? "tunnel" : "special", ret);
1601 return ret;
1602 }
1603
1604 memset(&attr, 0, sizeof attr);
1605 attr.qp_state = IB_QPS_INIT;
3eac103f
JM
1606 ret = 0;
1607 if (create_tun)
1608 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1609 ctx->port, IB_DEFAULT_PKEY_FULL,
1610 &attr.pkey_index);
1611 if (ret || !create_tun)
1612 attr.pkey_index =
1613 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
fc06573d
JM
1614 attr.qkey = IB_QP1_QKEY;
1615 attr.port_num = ctx->port;
1616 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1617 if (ret) {
1618 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1619 create_tun ? "tunnel" : "special", ret);
1620 goto err_qp;
1621 }
1622 attr.qp_state = IB_QPS_RTR;
1623 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1624 if (ret) {
1625 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1626 create_tun ? "tunnel" : "special", ret);
1627 goto err_qp;
1628 }
1629 attr.qp_state = IB_QPS_RTS;
1630 attr.sq_psn = 0;
1631 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1632 if (ret) {
1633 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1634 create_tun ? "tunnel" : "special", ret);
1635 goto err_qp;
1636 }
1637
1638 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1639 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1640 if (ret) {
1641 pr_err(" mlx4_ib_post_pv_buf error"
1642 " (err = %d, i = %d)\n", ret, i);
1643 goto err_qp;
1644 }
1645 }
1646 return 0;
1647
1648err_qp:
1649 ib_destroy_qp(tun_qp->qp);
1650 tun_qp->qp = NULL;
1651 return ret;
1652}
1653
1654/*
1655 * IB MAD completion callback for real SQPs
1656 */
1657static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1658{
37bfc7c1
JM
1659 struct mlx4_ib_demux_pv_ctx *ctx;
1660 struct mlx4_ib_demux_pv_qp *sqp;
1661 struct ib_wc wc;
1662 struct ib_grh *grh;
1663 struct ib_mad *mad;
1664
1665 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1666 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1667
1668 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1669 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1670 if (wc.status == IB_WC_SUCCESS) {
1671 switch (wc.opcode) {
1672 case IB_WC_SEND:
1673 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1674 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1675 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1676 = NULL;
1677 spin_lock(&sqp->tx_lock);
1678 sqp->tx_ix_tail++;
1679 spin_unlock(&sqp->tx_lock);
1680 break;
1681 case IB_WC_RECV:
1682 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1683 (sqp->ring[wc.wr_id &
1684 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1685 grh = &(((struct mlx4_mad_rcv_buf *)
1686 (sqp->ring[wc.wr_id &
1687 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1688 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1689 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1690 (MLX4_NUM_TUNNEL_BUFS - 1)))
1691 pr_err("Failed reposting SQP "
1692 "buf:%lld\n", wc.wr_id);
1693 break;
1694 default:
1695 BUG_ON(1);
1696 break;
1697 }
1698 } else {
1699 pr_debug("mlx4_ib: completion error in tunnel: %d."
1700 " status = %d, wrid = 0x%llx\n",
1701 ctx->slave, wc.status, wc.wr_id);
1702 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1703 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1704 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1705 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1706 = NULL;
1707 spin_lock(&sqp->tx_lock);
1708 sqp->tx_ix_tail++;
1709 spin_unlock(&sqp->tx_lock);
1710 }
1711 }
1712 }
fc06573d
JM
1713}
1714
1715static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1716 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1717{
1718 struct mlx4_ib_demux_pv_ctx *ctx;
1719
1720 *ret_ctx = NULL;
1721 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1722 if (!ctx) {
1723 pr_err("failed allocating pv resource context "
1724 "for port %d, slave %d\n", port, slave);
1725 return -ENOMEM;
1726 }
1727
1728 ctx->ib_dev = &dev->ib_dev;
1729 ctx->port = port;
1730 ctx->slave = slave;
1731 *ret_ctx = ctx;
1732 return 0;
1733}
1734
1735static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1736{
1737 if (dev->sriov.demux[port - 1].tun[slave]) {
1738 kfree(dev->sriov.demux[port - 1].tun[slave]);
1739 dev->sriov.demux[port - 1].tun[slave] = NULL;
1740 }
1741}
1742
1743static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1744 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1745{
1746 int ret, cq_size;
1747
3806d08c
JM
1748 if (ctx->state != DEMUX_PV_STATE_DOWN)
1749 return -EEXIST;
1750
fc06573d
JM
1751 ctx->state = DEMUX_PV_STATE_STARTING;
1752 /* have QP0 only on port owner, and only if link layer is IB */
1753 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1754 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1755 ctx->has_smi = 1;
1756
1757 if (ctx->has_smi) {
1758 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1759 if (ret) {
1760 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1761 goto err_out;
1762 }
1763 }
1764
1765 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1766 if (ret) {
1767 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1768 goto err_out_qp0;
1769 }
1770
1771 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1772 if (ctx->has_smi)
1773 cq_size *= 2;
1774
1775 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1776 NULL, ctx, cq_size, 0);
1777 if (IS_ERR(ctx->cq)) {
1778 ret = PTR_ERR(ctx->cq);
1779 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1780 goto err_buf;
1781 }
1782
1783 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1784 if (IS_ERR(ctx->pd)) {
1785 ret = PTR_ERR(ctx->pd);
1786 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1787 goto err_cq;
1788 }
1789
1790 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1791 if (IS_ERR(ctx->mr)) {
1792 ret = PTR_ERR(ctx->mr);
1793 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1794 goto err_pd;
1795 }
1796
1797 if (ctx->has_smi) {
1798 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1799 if (ret) {
1800 pr_err("Couldn't create %s QP0 (%d)\n",
1801 create_tun ? "tunnel for" : "", ret);
1802 goto err_mr;
1803 }
1804 }
1805
1806 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1807 if (ret) {
1808 pr_err("Couldn't create %s QP1 (%d)\n",
1809 create_tun ? "tunnel for" : "", ret);
1810 goto err_qp0;
1811 }
1812
1813 if (create_tun)
1814 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1815 else
1816 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1817
1818 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1819
1820 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1821 if (ret) {
1822 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1823 goto err_wq;
1824 }
1825 ctx->state = DEMUX_PV_STATE_ACTIVE;
1826 return 0;
1827
1828err_wq:
1829 ctx->wq = NULL;
1830 ib_destroy_qp(ctx->qp[1].qp);
1831 ctx->qp[1].qp = NULL;
1832
1833
1834err_qp0:
1835 if (ctx->has_smi)
1836 ib_destroy_qp(ctx->qp[0].qp);
1837 ctx->qp[0].qp = NULL;
1838
1839err_mr:
1840 ib_dereg_mr(ctx->mr);
1841 ctx->mr = NULL;
1842
1843err_pd:
1844 ib_dealloc_pd(ctx->pd);
1845 ctx->pd = NULL;
1846
1847err_cq:
1848 ib_destroy_cq(ctx->cq);
1849 ctx->cq = NULL;
1850
1851err_buf:
1852 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1853
1854err_out_qp0:
1855 if (ctx->has_smi)
1856 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
1857err_out:
1858 ctx->state = DEMUX_PV_STATE_DOWN;
1859 return ret;
1860}
1861
1862static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
1863 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
1864{
1865 if (!ctx)
1866 return;
1867 if (ctx->state > DEMUX_PV_STATE_DOWN) {
1868 ctx->state = DEMUX_PV_STATE_DOWNING;
1869 if (flush)
1870 flush_workqueue(ctx->wq);
1871 if (ctx->has_smi) {
1872 ib_destroy_qp(ctx->qp[0].qp);
1873 ctx->qp[0].qp = NULL;
1874 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
1875 }
1876 ib_destroy_qp(ctx->qp[1].qp);
1877 ctx->qp[1].qp = NULL;
1878 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1879 ib_dereg_mr(ctx->mr);
1880 ctx->mr = NULL;
1881 ib_dealloc_pd(ctx->pd);
1882 ctx->pd = NULL;
1883 ib_destroy_cq(ctx->cq);
1884 ctx->cq = NULL;
1885 ctx->state = DEMUX_PV_STATE_DOWN;
1886 }
1887}
1888
1889static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
1890 int port, int do_init)
1891{
1892 int ret = 0;
1893
1894 if (!do_init) {
b9c5d6a6 1895 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
fc06573d
JM
1896 /* for master, destroy real sqp resources */
1897 if (slave == mlx4_master_func_num(dev->dev))
1898 destroy_pv_resources(dev, slave, port,
1899 dev->sriov.sqps[port - 1], 1);
1900 /* destroy the tunnel qp resources */
1901 destroy_pv_resources(dev, slave, port,
1902 dev->sriov.demux[port - 1].tun[slave], 1);
1903 return 0;
1904 }
1905
1906 /* create the tunnel qp resources */
1907 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1908 dev->sriov.demux[port - 1].tun[slave]);
1909
1910 /* for master, create the real sqp resources */
1911 if (!ret && slave == mlx4_master_func_num(dev->dev))
1912 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1913 dev->sriov.sqps[port - 1]);
1914 return ret;
1915}
1916
1917void mlx4_ib_tunnels_update_work(struct work_struct *work)
1918{
1919 struct mlx4_ib_demux_work *dmxw;
1920
1921 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1922 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1923 dmxw->do_init);
1924 kfree(dmxw);
1925 return;
1926}
1927
1928static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1929 struct mlx4_ib_demux_ctx *ctx,
1930 int port)
1931{
1932 char name[12];
1933 int ret = 0;
1934 int i;
1935
1936 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1937 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1938 if (!ctx->tun)
1939 return -ENOMEM;
1940
1941 ctx->dev = dev;
1942 ctx->port = port;
1943 ctx->ib_dev = &dev->ib_dev;
1944
449fc488
MB
1945 for (i = 0;
1946 i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1));
1947 i++) {
1948 struct mlx4_active_ports actv_ports =
1949 mlx4_get_active_ports(dev->dev, i);
1950
1951 if (!test_bit(port - 1, actv_ports.ports))
1952 continue;
1953
fc06573d
JM
1954 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1955 if (ret) {
1956 ret = -ENOMEM;
b9c5d6a6 1957 goto err_mcg;
fc06573d
JM
1958 }
1959 }
1960
b9c5d6a6
OD
1961 ret = mlx4_ib_mcg_port_init(ctx);
1962 if (ret) {
1963 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
1964 goto err_mcg;
1965 }
1966
fc06573d
JM
1967 snprintf(name, sizeof name, "mlx4_ibt%d", port);
1968 ctx->wq = create_singlethread_workqueue(name);
1969 if (!ctx->wq) {
1970 pr_err("Failed to create tunnelling WQ for port %d\n", port);
1971 ret = -ENOMEM;
1972 goto err_wq;
1973 }
1974
1975 snprintf(name, sizeof name, "mlx4_ibud%d", port);
1976 ctx->ud_wq = create_singlethread_workqueue(name);
1977 if (!ctx->ud_wq) {
1978 pr_err("Failed to create up/down WQ for port %d\n", port);
1979 ret = -ENOMEM;
1980 goto err_udwq;
1981 }
1982
1983 return 0;
1984
1985err_udwq:
1986 destroy_workqueue(ctx->wq);
1987 ctx->wq = NULL;
1988
1989err_wq:
b9c5d6a6
OD
1990 mlx4_ib_mcg_port_cleanup(ctx, 1);
1991err_mcg:
fc06573d
JM
1992 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
1993 free_pv_object(dev, i, port);
1994 kfree(ctx->tun);
1995 ctx->tun = NULL;
1996 return ret;
1997}
1998
1999static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2000{
2001 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2002 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2003 flush_workqueue(sqp_ctx->wq);
2004 if (sqp_ctx->has_smi) {
2005 ib_destroy_qp(sqp_ctx->qp[0].qp);
2006 sqp_ctx->qp[0].qp = NULL;
2007 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2008 }
2009 ib_destroy_qp(sqp_ctx->qp[1].qp);
2010 sqp_ctx->qp[1].qp = NULL;
2011 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2012 ib_dereg_mr(sqp_ctx->mr);
2013 sqp_ctx->mr = NULL;
2014 ib_dealloc_pd(sqp_ctx->pd);
2015 sqp_ctx->pd = NULL;
2016 ib_destroy_cq(sqp_ctx->cq);
2017 sqp_ctx->cq = NULL;
2018 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2019 }
2020}
2021
2022static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2023{
2024 int i;
2025 if (ctx) {
2026 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
b9c5d6a6 2027 mlx4_ib_mcg_port_cleanup(ctx, 1);
fc06573d
JM
2028 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2029 if (!ctx->tun[i])
2030 continue;
2031 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2032 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2033 }
2034 flush_workqueue(ctx->wq);
2035 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2036 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2037 free_pv_object(dev, i, ctx->port);
2038 }
2039 kfree(ctx->tun);
2040 destroy_workqueue(ctx->ud_wq);
2041 destroy_workqueue(ctx->wq);
2042 }
2043}
2044
2045static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2046{
2047 int i;
2048
2049 if (!mlx4_is_master(dev->dev))
2050 return;
2051 /* initialize or tear down tunnel QPs for the master */
2052 for (i = 0; i < dev->dev->caps.num_ports; i++)
2053 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2054 return;
2055}
2056
2057int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2058{
2059 int i = 0;
2060 int err;
2061
2062 if (!mlx4_is_mfunc(dev->dev))
2063 return 0;
2064
2065 dev->sriov.is_going_down = 0;
2066 spin_lock_init(&dev->sriov.going_down_lock);
3cf69cc8 2067 mlx4_ib_cm_paravirt_init(dev);
fc06573d
JM
2068
2069 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2070
2071 if (mlx4_is_slave(dev->dev)) {
2072 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2073 return 0;
2074 }
2075
afa8fd1d
JM
2076 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2077 if (i == mlx4_master_func_num(dev->dev))
2078 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2079 else
2080 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2081 }
2082
a0c64a17
JM
2083 err = mlx4_ib_init_alias_guid_service(dev);
2084 if (err) {
2085 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2086 goto paravirt_err;
2087 }
c1e7e466
JM
2088 err = mlx4_ib_device_register_sysfs(dev);
2089 if (err) {
2090 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2091 goto sysfs_err;
2092 }
a0c64a17 2093
fc06573d
JM
2094 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2095 dev->dev->caps.sqp_demux);
2096 for (i = 0; i < dev->num_ports; i++) {
a0c64a17
JM
2097 union ib_gid gid;
2098 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2099 if (err)
2100 goto demux_err;
2101 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
fc06573d
JM
2102 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2103 &dev->sriov.sqps[i]);
2104 if (err)
2105 goto demux_err;
2106 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2107 if (err)
cab66d12 2108 goto free_pv;
fc06573d
JM
2109 }
2110 mlx4_ib_master_tunnels(dev, 1);
2111 return 0;
2112
cab66d12
DC
2113free_pv:
2114 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
fc06573d 2115demux_err:
cab66d12 2116 while (--i >= 0) {
fc06573d
JM
2117 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2118 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
fc06573d 2119 }
c1e7e466
JM
2120 mlx4_ib_device_unregister_sysfs(dev);
2121
2122sysfs_err:
a0c64a17
JM
2123 mlx4_ib_destroy_alias_guid_service(dev);
2124
2125paravirt_err:
3cf69cc8 2126 mlx4_ib_cm_paravirt_clean(dev, -1);
fc06573d
JM
2127
2128 return err;
2129}
2130
2131void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2132{
2133 int i;
2134 unsigned long flags;
2135
2136 if (!mlx4_is_mfunc(dev->dev))
2137 return;
2138
2139 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2140 dev->sriov.is_going_down = 1;
2141 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
3cf69cc8 2142 if (mlx4_is_master(dev->dev)) {
fc06573d
JM
2143 for (i = 0; i < dev->num_ports; i++) {
2144 flush_workqueue(dev->sriov.demux[i].ud_wq);
2145 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2146 kfree(dev->sriov.sqps[i]);
2147 dev->sriov.sqps[i] = NULL;
2148 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2149 }
3cf69cc8
AV
2150
2151 mlx4_ib_cm_paravirt_clean(dev, -1);
a0c64a17 2152 mlx4_ib_destroy_alias_guid_service(dev);
c1e7e466 2153 mlx4_ib_device_unregister_sysfs(dev);
3cf69cc8 2154 }
fc06573d 2155}