Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <rdma/ib_mad.h> | |
34 | #include <rdma/ib_smi.h> | |
37bfc7c1 JM |
35 | #include <rdma/ib_sa.h> |
36 | #include <rdma/ib_cache.h> | |
225c7b1f RD |
37 | |
38 | #include <linux/mlx4/cmd.h> | |
5a0e3ad6 | 39 | #include <linux/gfp.h> |
c3779134 | 40 | #include <rdma/ib_pma.h> |
225c7b1f RD |
41 | |
42 | #include "mlx4_ib.h" | |
43 | ||
44 | enum { | |
45 | MLX4_IB_VENDOR_CLASS1 = 0x9, | |
46 | MLX4_IB_VENDOR_CLASS2 = 0xa | |
47 | }; | |
48 | ||
fc06573d JM |
49 | #define MLX4_TUN_SEND_WRID_SHIFT 34 |
50 | #define MLX4_TUN_QPN_SHIFT 32 | |
51 | #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT) | |
52 | #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT) | |
53 | ||
54 | #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1) | |
55 | #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3) | |
56 | ||
57 | struct mlx4_mad_rcv_buf { | |
58 | struct ib_grh grh; | |
59 | u8 payload[256]; | |
60 | } __packed; | |
61 | ||
62 | struct mlx4_mad_snd_buf { | |
63 | u8 payload[256]; | |
64 | } __packed; | |
65 | ||
66 | struct mlx4_tunnel_mad { | |
67 | struct ib_grh grh; | |
68 | struct mlx4_ib_tunnel_header hdr; | |
69 | struct ib_mad mad; | |
70 | } __packed; | |
71 | ||
72 | struct mlx4_rcv_tunnel_mad { | |
73 | struct mlx4_rcv_tunnel_hdr hdr; | |
74 | struct ib_grh grh; | |
75 | struct ib_mad mad; | |
76 | } __packed; | |
77 | ||
225c7b1f RD |
78 | int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, |
79 | int port, struct ib_wc *in_wc, struct ib_grh *in_grh, | |
80 | void *in_mad, void *response_mad) | |
81 | { | |
82 | struct mlx4_cmd_mailbox *inmailbox, *outmailbox; | |
83 | void *inbox; | |
84 | int err; | |
85 | u32 in_modifier = port; | |
86 | u8 op_modifier = 0; | |
87 | ||
88 | inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); | |
89 | if (IS_ERR(inmailbox)) | |
90 | return PTR_ERR(inmailbox); | |
91 | inbox = inmailbox->buf; | |
92 | ||
93 | outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); | |
94 | if (IS_ERR(outmailbox)) { | |
95 | mlx4_free_cmd_mailbox(dev->dev, inmailbox); | |
96 | return PTR_ERR(outmailbox); | |
97 | } | |
98 | ||
99 | memcpy(inbox, in_mad, 256); | |
100 | ||
101 | /* | |
102 | * Key check traps can't be generated unless we have in_wc to | |
103 | * tell us where to send the trap. | |
104 | */ | |
105 | if (ignore_mkey || !in_wc) | |
106 | op_modifier |= 0x1; | |
107 | if (ignore_bkey || !in_wc) | |
108 | op_modifier |= 0x2; | |
109 | ||
110 | if (in_wc) { | |
111 | struct { | |
112 | __be32 my_qpn; | |
113 | u32 reserved1; | |
114 | __be32 rqpn; | |
115 | u8 sl; | |
116 | u8 g_path; | |
117 | u16 reserved2[2]; | |
118 | __be16 pkey; | |
119 | u32 reserved3[11]; | |
120 | u8 grh[40]; | |
121 | } *ext_info; | |
122 | ||
123 | memset(inbox + 256, 0, 256); | |
124 | ext_info = inbox + 256; | |
125 | ||
126 | ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); | |
127 | ext_info->rqpn = cpu_to_be32(in_wc->src_qp); | |
128 | ext_info->sl = in_wc->sl << 4; | |
129 | ext_info->g_path = in_wc->dlid_path_bits | | |
130 | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); | |
131 | ext_info->pkey = cpu_to_be16(in_wc->pkey_index); | |
132 | ||
133 | if (in_grh) | |
134 | memcpy(ext_info->grh, in_grh, 40); | |
135 | ||
136 | op_modifier |= 0x4; | |
137 | ||
138 | in_modifier |= in_wc->slid << 16; | |
139 | } | |
140 | ||
141 | err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, | |
142 | in_modifier, op_modifier, | |
f9baff50 JM |
143 | MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, |
144 | MLX4_CMD_NATIVE); | |
225c7b1f | 145 | |
fe11cb6b | 146 | if (!err) |
225c7b1f RD |
147 | memcpy(response_mad, outmailbox->buf, 256); |
148 | ||
149 | mlx4_free_cmd_mailbox(dev->dev, inmailbox); | |
150 | mlx4_free_cmd_mailbox(dev->dev, outmailbox); | |
151 | ||
152 | return err; | |
153 | } | |
154 | ||
155 | static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) | |
156 | { | |
157 | struct ib_ah *new_ah; | |
158 | struct ib_ah_attr ah_attr; | |
df7fba66 | 159 | unsigned long flags; |
225c7b1f RD |
160 | |
161 | if (!dev->send_agent[port_num - 1][0]) | |
162 | return; | |
163 | ||
164 | memset(&ah_attr, 0, sizeof ah_attr); | |
165 | ah_attr.dlid = lid; | |
166 | ah_attr.sl = sl; | |
167 | ah_attr.port_num = port_num; | |
168 | ||
169 | new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, | |
170 | &ah_attr); | |
171 | if (IS_ERR(new_ah)) | |
172 | return; | |
173 | ||
df7fba66 | 174 | spin_lock_irqsave(&dev->sm_lock, flags); |
225c7b1f RD |
175 | if (dev->sm_ah[port_num - 1]) |
176 | ib_destroy_ah(dev->sm_ah[port_num - 1]); | |
177 | dev->sm_ah[port_num - 1] = new_ah; | |
df7fba66 | 178 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
225c7b1f RD |
179 | } |
180 | ||
181 | /* | |
00f5ce99 JM |
182 | * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can |
183 | * synthesize LID change, Client-Rereg, GID change, and P_Key change events. | |
225c7b1f | 184 | */ |
f0f6f346 | 185 | static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, |
00f5ce99 | 186 | u16 prev_lid) |
225c7b1f | 187 | { |
00f5ce99 JM |
188 | struct ib_port_info *pinfo; |
189 | u16 lid; | |
54679e14 JM |
190 | __be16 *base; |
191 | u32 bn, pkey_change_bitmap; | |
192 | int i; | |
193 | ||
225c7b1f | 194 | |
00f5ce99 | 195 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
225c7b1f RD |
196 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
197 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | |
00f5ce99 JM |
198 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) |
199 | switch (mad->mad_hdr.attr_id) { | |
200 | case IB_SMP_ATTR_PORT_INFO: | |
201 | pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; | |
202 | lid = be16_to_cpu(pinfo->lid); | |
225c7b1f | 203 | |
00f5ce99 | 204 | update_sm_ah(dev, port_num, |
225c7b1f RD |
205 | be16_to_cpu(pinfo->sm_lid), |
206 | pinfo->neighbormtu_mastersmsl & 0xf); | |
207 | ||
00f5ce99 JM |
208 | if (pinfo->clientrereg_resv_subnetto & 0x80) |
209 | mlx4_ib_dispatch_event(dev, port_num, | |
210 | IB_EVENT_CLIENT_REREGISTER); | |
225c7b1f | 211 | |
00f5ce99 JM |
212 | if (prev_lid != lid) |
213 | mlx4_ib_dispatch_event(dev, port_num, | |
214 | IB_EVENT_LID_CHANGE); | |
215 | break; | |
225c7b1f | 216 | |
00f5ce99 | 217 | case IB_SMP_ATTR_PKEY_TABLE: |
54679e14 JM |
218 | if (!mlx4_is_mfunc(dev->dev)) { |
219 | mlx4_ib_dispatch_event(dev, port_num, | |
220 | IB_EVENT_PKEY_CHANGE); | |
221 | break; | |
222 | } | |
223 | ||
224 | bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF; | |
225 | base = (__be16 *) &(((struct ib_smp *)mad)->data[0]); | |
226 | pkey_change_bitmap = 0; | |
227 | for (i = 0; i < 32; i++) { | |
228 | pr_debug("PKEY[%d] = x%x\n", | |
229 | i + bn*32, be16_to_cpu(base[i])); | |
230 | if (be16_to_cpu(base[i]) != | |
231 | dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) { | |
232 | pkey_change_bitmap |= (1 << i); | |
233 | dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] = | |
234 | be16_to_cpu(base[i]); | |
235 | } | |
236 | } | |
237 | pr_debug("PKEY Change event: port=%d, " | |
238 | "block=0x%x, change_bitmap=0x%x\n", | |
239 | port_num, bn, pkey_change_bitmap); | |
240 | ||
241 | if (pkey_change_bitmap) | |
242 | mlx4_ib_dispatch_event(dev, port_num, | |
243 | IB_EVENT_PKEY_CHANGE); | |
244 | ||
00f5ce99 | 245 | break; |
225c7b1f | 246 | |
00f5ce99 | 247 | case IB_SMP_ATTR_GUID_INFO: |
6634961c JM |
248 | /* paravirtualized master's guid is guid 0 -- does not change */ |
249 | if (!mlx4_is_master(dev->dev)) | |
250 | mlx4_ib_dispatch_event(dev, port_num, | |
251 | IB_EVENT_GID_CHANGE); | |
00f5ce99 JM |
252 | break; |
253 | default: | |
254 | break; | |
225c7b1f | 255 | } |
225c7b1f RD |
256 | } |
257 | ||
258 | static void node_desc_override(struct ib_device *dev, | |
259 | struct ib_mad *mad) | |
260 | { | |
df7fba66 JM |
261 | unsigned long flags; |
262 | ||
225c7b1f RD |
263 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
264 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | |
265 | mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && | |
266 | mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { | |
df7fba66 | 267 | spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); |
225c7b1f | 268 | memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); |
df7fba66 | 269 | spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); |
225c7b1f RD |
270 | } |
271 | } | |
272 | ||
273 | static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad) | |
274 | { | |
275 | int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; | |
276 | struct ib_mad_send_buf *send_buf; | |
277 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; | |
278 | int ret; | |
df7fba66 | 279 | unsigned long flags; |
225c7b1f RD |
280 | |
281 | if (agent) { | |
282 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | |
283 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | |
13974909 DC |
284 | if (IS_ERR(send_buf)) |
285 | return; | |
225c7b1f RD |
286 | /* |
287 | * We rely here on the fact that MLX QPs don't use the | |
288 | * address handle after the send is posted (this is | |
289 | * wrong following the IB spec strictly, but we know | |
290 | * it's OK for our devices). | |
291 | */ | |
df7fba66 | 292 | spin_lock_irqsave(&dev->sm_lock, flags); |
225c7b1f RD |
293 | memcpy(send_buf->mad, mad, sizeof *mad); |
294 | if ((send_buf->ah = dev->sm_ah[port_num - 1])) | |
295 | ret = ib_post_send_mad(send_buf, NULL); | |
296 | else | |
297 | ret = -EINVAL; | |
df7fba66 | 298 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
225c7b1f RD |
299 | |
300 | if (ret) | |
301 | ib_free_send_mad(send_buf); | |
302 | } | |
303 | } | |
304 | ||
37bfc7c1 JM |
305 | static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave, |
306 | struct ib_sa_mad *sa_mad) | |
307 | { | |
308 | return 0; | |
309 | } | |
310 | ||
311 | int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid) | |
312 | { | |
313 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | |
314 | int i; | |
315 | ||
316 | for (i = 0; i < dev->dev->caps.sqp_demux; i++) { | |
317 | if (dev->sriov.demux[port - 1].guid_cache[i] == guid) | |
318 | return i; | |
319 | } | |
320 | return -1; | |
321 | } | |
322 | ||
323 | ||
324 | static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix, | |
325 | u8 *full_pk_ix, u8 *partial_pk_ix, | |
326 | int *is_full_member) | |
327 | { | |
328 | u16 search_pkey; | |
329 | int fm; | |
330 | int err = 0; | |
331 | u16 pk; | |
332 | ||
333 | err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey); | |
334 | if (err) | |
335 | return err; | |
336 | ||
337 | fm = (search_pkey & 0x8000) ? 1 : 0; | |
338 | if (fm) { | |
339 | *full_pk_ix = ph_pkey_ix; | |
340 | search_pkey &= 0x7FFF; | |
341 | } else { | |
342 | *partial_pk_ix = ph_pkey_ix; | |
343 | search_pkey |= 0x8000; | |
344 | } | |
345 | ||
346 | if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk)) | |
347 | pk = 0xFFFF; | |
348 | ||
349 | if (fm) | |
350 | *partial_pk_ix = (pk & 0xFF); | |
351 | else | |
352 | *full_pk_ix = (pk & 0xFF); | |
353 | ||
354 | *is_full_member = fm; | |
355 | return err; | |
356 | } | |
357 | ||
358 | int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |
359 | enum ib_qp_type dest_qpt, struct ib_wc *wc, | |
360 | struct ib_grh *grh, struct ib_mad *mad) | |
361 | { | |
362 | struct ib_sge list; | |
363 | struct ib_send_wr wr, *bad_wr; | |
364 | struct mlx4_ib_demux_pv_ctx *tun_ctx; | |
365 | struct mlx4_ib_demux_pv_qp *tun_qp; | |
366 | struct mlx4_rcv_tunnel_mad *tun_mad; | |
367 | struct ib_ah_attr attr; | |
368 | struct ib_ah *ah; | |
369 | struct ib_qp *src_qp = NULL; | |
370 | unsigned tun_tx_ix = 0; | |
371 | int dqpn; | |
372 | int ret = 0; | |
373 | int i; | |
374 | int is_full_member = 0; | |
375 | u16 tun_pkey_ix; | |
376 | u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0; | |
377 | ||
378 | if (dest_qpt > IB_QPT_GSI) | |
379 | return -EINVAL; | |
380 | ||
381 | tun_ctx = dev->sriov.demux[port-1].tun[slave]; | |
382 | ||
383 | /* check if proxy qp created */ | |
384 | if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE) | |
385 | return -EAGAIN; | |
386 | ||
387 | /* QP0 forwarding only for Dom0 */ | |
388 | if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave)) | |
389 | return -EINVAL; | |
390 | ||
391 | if (!dest_qpt) | |
392 | tun_qp = &tun_ctx->qp[0]; | |
393 | else | |
394 | tun_qp = &tun_ctx->qp[1]; | |
395 | ||
396 | /* compute pkey index for slave */ | |
397 | /* get physical pkey -- virtualized Dom0 pkey to phys*/ | |
398 | if (dest_qpt) { | |
399 | ph_pkey_ix = | |
400 | dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index]; | |
401 | ||
402 | /* now, translate this to the slave pkey index */ | |
403 | ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix, | |
404 | &partial_pk_ix, &is_full_member); | |
405 | if (ret) | |
406 | return -EINVAL; | |
407 | ||
408 | for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) { | |
409 | if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) || | |
410 | (is_full_member && | |
411 | (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix))) | |
412 | break; | |
413 | } | |
414 | if (i == dev->dev->caps.pkey_table_len[port]) | |
415 | return -EINVAL; | |
416 | tun_pkey_ix = i; | |
417 | } else | |
418 | tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; | |
419 | ||
420 | dqpn = dev->dev->caps.sqp_start + 8 * slave + port + (dest_qpt * 2) - 1; | |
421 | ||
422 | /* get tunnel tx data buf for slave */ | |
423 | src_qp = tun_qp->qp; | |
424 | ||
425 | /* create ah. Just need an empty one with the port num for the post send. | |
426 | * The driver will set the force loopback bit in post_send */ | |
427 | memset(&attr, 0, sizeof attr); | |
428 | attr.port_num = port; | |
429 | ah = ib_create_ah(tun_ctx->pd, &attr); | |
430 | if (IS_ERR(ah)) | |
431 | return -ENOMEM; | |
432 | ||
433 | /* allocate tunnel tx buf after pass failure returns */ | |
434 | spin_lock(&tun_qp->tx_lock); | |
435 | if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >= | |
436 | (MLX4_NUM_TUNNEL_BUFS - 1)) | |
437 | ret = -EAGAIN; | |
438 | else | |
439 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); | |
440 | spin_unlock(&tun_qp->tx_lock); | |
441 | if (ret) | |
442 | goto out; | |
443 | ||
444 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); | |
445 | if (tun_qp->tx_ring[tun_tx_ix].ah) | |
446 | ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah); | |
447 | tun_qp->tx_ring[tun_tx_ix].ah = ah; | |
448 | ib_dma_sync_single_for_cpu(&dev->ib_dev, | |
449 | tun_qp->tx_ring[tun_tx_ix].buf.map, | |
450 | sizeof (struct mlx4_rcv_tunnel_mad), | |
451 | DMA_TO_DEVICE); | |
452 | ||
453 | /* copy over to tunnel buffer */ | |
454 | if (grh) | |
455 | memcpy(&tun_mad->grh, grh, sizeof *grh); | |
456 | memcpy(&tun_mad->mad, mad, sizeof *mad); | |
457 | ||
458 | /* adjust tunnel data */ | |
459 | tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix); | |
460 | tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12); | |
461 | tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid); | |
462 | tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF); | |
463 | tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0; | |
464 | ||
465 | ib_dma_sync_single_for_device(&dev->ib_dev, | |
466 | tun_qp->tx_ring[tun_tx_ix].buf.map, | |
467 | sizeof (struct mlx4_rcv_tunnel_mad), | |
468 | DMA_TO_DEVICE); | |
469 | ||
470 | list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; | |
471 | list.length = sizeof (struct mlx4_rcv_tunnel_mad); | |
472 | list.lkey = tun_ctx->mr->lkey; | |
473 | ||
474 | wr.wr.ud.ah = ah; | |
475 | wr.wr.ud.port_num = port; | |
476 | wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | |
477 | wr.wr.ud.remote_qpn = dqpn; | |
478 | wr.next = NULL; | |
479 | wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); | |
480 | wr.sg_list = &list; | |
481 | wr.num_sge = 1; | |
482 | wr.opcode = IB_WR_SEND; | |
483 | wr.send_flags = IB_SEND_SIGNALED; | |
484 | ||
485 | ret = ib_post_send(src_qp, &wr, &bad_wr); | |
486 | out: | |
487 | if (ret) | |
488 | ib_destroy_ah(ah); | |
489 | return ret; | |
490 | } | |
491 | ||
492 | static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port, | |
493 | struct ib_wc *wc, struct ib_grh *grh, | |
494 | struct ib_mad *mad) | |
495 | { | |
496 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | |
497 | int err; | |
498 | int slave; | |
499 | u8 *slave_id; | |
500 | ||
501 | /* Initially assume that this mad is for us */ | |
502 | slave = mlx4_master_func_num(dev->dev); | |
503 | ||
504 | /* See if the slave id is encoded in a response mad */ | |
505 | if (mad->mad_hdr.method & 0x80) { | |
506 | slave_id = (u8 *) &mad->mad_hdr.tid; | |
507 | slave = *slave_id; | |
508 | if (slave != 255) /*255 indicates the dom0*/ | |
509 | *slave_id = 0; /* remap tid */ | |
510 | } | |
511 | ||
512 | /* If a grh is present, we demux according to it */ | |
513 | if (wc->wc_flags & IB_WC_GRH) { | |
514 | slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id); | |
515 | if (slave < 0) { | |
516 | mlx4_ib_warn(ibdev, "failed matching grh\n"); | |
517 | return -ENOENT; | |
518 | } | |
519 | } | |
520 | /* Class-specific handling */ | |
521 | switch (mad->mad_hdr.mgmt_class) { | |
522 | case IB_MGMT_CLASS_SUBN_ADM: | |
523 | if (mlx4_ib_demux_sa_handler(ibdev, port, slave, | |
524 | (struct ib_sa_mad *) mad)) | |
525 | return 0; | |
526 | break; | |
527 | case IB_MGMT_CLASS_DEVICE_MGMT: | |
528 | if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP) | |
529 | return 0; | |
530 | break; | |
531 | default: | |
532 | /* Drop unsupported classes for slaves in tunnel mode */ | |
533 | if (slave != mlx4_master_func_num(dev->dev)) { | |
534 | pr_debug("dropping unsupported ingress mad from class:%d " | |
535 | "for slave:%d\n", mad->mad_hdr.mgmt_class, slave); | |
536 | return 0; | |
537 | } | |
538 | } | |
539 | /*make sure that no slave==255 was not handled yet.*/ | |
540 | if (slave >= dev->dev->caps.sqp_demux) { | |
541 | mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n", | |
542 | slave, dev->dev->caps.sqp_demux); | |
543 | return -ENOENT; | |
544 | } | |
545 | ||
546 | err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad); | |
547 | if (err) | |
548 | pr_debug("failed sending to slave %d via tunnel qp (%d)\n", | |
549 | slave, err); | |
550 | return 0; | |
551 | } | |
552 | ||
c3779134 | 553 | static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
225c7b1f RD |
554 | struct ib_wc *in_wc, struct ib_grh *in_grh, |
555 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
556 | { | |
f0f6f346 | 557 | u16 slid, prev_lid = 0; |
225c7b1f | 558 | int err; |
f0f6f346 | 559 | struct ib_port_attr pattr; |
225c7b1f | 560 | |
b1d8eb5a JM |
561 | if (in_wc && in_wc->qp->qp_num) { |
562 | pr_debug("received MAD: slid:%d sqpn:%d " | |
563 | "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", | |
564 | in_wc->slid, in_wc->src_qp, | |
565 | in_wc->dlid_path_bits, | |
566 | in_wc->qp->qp_num, | |
567 | in_wc->wc_flags, | |
568 | in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, | |
569 | be16_to_cpu(in_mad->mad_hdr.attr_id)); | |
570 | if (in_wc->wc_flags & IB_WC_GRH) { | |
571 | pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", | |
572 | be64_to_cpu(in_grh->sgid.global.subnet_prefix), | |
573 | be64_to_cpu(in_grh->sgid.global.interface_id)); | |
574 | pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", | |
575 | be64_to_cpu(in_grh->dgid.global.subnet_prefix), | |
576 | be64_to_cpu(in_grh->dgid.global.interface_id)); | |
577 | } | |
578 | } | |
579 | ||
225c7b1f RD |
580 | slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); |
581 | ||
582 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { | |
583 | forward_trap(to_mdev(ibdev), port_num, in_mad); | |
584 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
585 | } | |
586 | ||
587 | if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || | |
588 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
589 | if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && | |
590 | in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && | |
591 | in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) | |
592 | return IB_MAD_RESULT_SUCCESS; | |
593 | ||
594 | /* | |
a6f7feae | 595 | * Don't process SMInfo queries -- the SMA can't handle them. |
225c7b1f | 596 | */ |
a6f7feae | 597 | if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) |
225c7b1f RD |
598 | return IB_MAD_RESULT_SUCCESS; |
599 | } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || | |
600 | in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || | |
6578cf33 EC |
601 | in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 || |
602 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { | |
225c7b1f RD |
603 | if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && |
604 | in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) | |
605 | return IB_MAD_RESULT_SUCCESS; | |
606 | } else | |
607 | return IB_MAD_RESULT_SUCCESS; | |
608 | ||
f0f6f346 MS |
609 | if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
610 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | |
611 | in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && | |
612 | in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && | |
613 | !ib_query_port(ibdev, port_num, &pattr)) | |
614 | prev_lid = pattr.lid; | |
615 | ||
225c7b1f RD |
616 | err = mlx4_MAD_IFC(to_mdev(ibdev), |
617 | mad_flags & IB_MAD_IGNORE_MKEY, | |
618 | mad_flags & IB_MAD_IGNORE_BKEY, | |
619 | port_num, in_wc, in_grh, in_mad, out_mad); | |
620 | if (err) | |
621 | return IB_MAD_RESULT_FAILURE; | |
622 | ||
623 | if (!out_mad->mad_hdr.status) { | |
00f5ce99 JM |
624 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) |
625 | smp_snoop(ibdev, port_num, in_mad, prev_lid); | |
225c7b1f RD |
626 | node_desc_override(ibdev, out_mad); |
627 | } | |
628 | ||
629 | /* set return bit in status of directed route responses */ | |
630 | if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
631 | out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); | |
632 | ||
633 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) | |
634 | /* no response for trap repress */ | |
635 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
636 | ||
637 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | |
638 | } | |
639 | ||
c3779134 OG |
640 | static void edit_counter(struct mlx4_counter *cnt, |
641 | struct ib_pma_portcounters *pma_cnt) | |
642 | { | |
643 | pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); | |
644 | pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); | |
645 | pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); | |
646 | pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); | |
647 | } | |
648 | ||
649 | static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
650 | struct ib_wc *in_wc, struct ib_grh *in_grh, | |
651 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
652 | { | |
653 | struct mlx4_cmd_mailbox *mailbox; | |
654 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | |
655 | int err; | |
656 | u32 inmod = dev->counters[port_num - 1] & 0xffff; | |
657 | u8 mode; | |
658 | ||
659 | if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) | |
660 | return -EINVAL; | |
661 | ||
662 | mailbox = mlx4_alloc_cmd_mailbox(dev->dev); | |
663 | if (IS_ERR(mailbox)) | |
664 | return IB_MAD_RESULT_FAILURE; | |
665 | ||
666 | err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, | |
f9baff50 JM |
667 | MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, |
668 | MLX4_CMD_WRAPPED); | |
c3779134 OG |
669 | if (err) |
670 | err = IB_MAD_RESULT_FAILURE; | |
671 | else { | |
672 | memset(out_mad->data, 0, sizeof out_mad->data); | |
673 | mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode; | |
674 | switch (mode & 0xf) { | |
675 | case 0: | |
676 | edit_counter(mailbox->buf, | |
677 | (void *)(out_mad->data + 40)); | |
678 | err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | |
679 | break; | |
680 | default: | |
681 | err = IB_MAD_RESULT_FAILURE; | |
682 | } | |
683 | } | |
684 | ||
685 | mlx4_free_cmd_mailbox(dev->dev, mailbox); | |
686 | ||
687 | return err; | |
688 | } | |
689 | ||
690 | int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
691 | struct ib_wc *in_wc, struct ib_grh *in_grh, | |
692 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
693 | { | |
694 | switch (rdma_port_get_link_layer(ibdev, port_num)) { | |
695 | case IB_LINK_LAYER_INFINIBAND: | |
696 | return ib_process_mad(ibdev, mad_flags, port_num, in_wc, | |
697 | in_grh, in_mad, out_mad); | |
698 | case IB_LINK_LAYER_ETHERNET: | |
699 | return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, | |
700 | in_grh, in_mad, out_mad); | |
701 | default: | |
702 | return -EINVAL; | |
703 | } | |
704 | } | |
705 | ||
225c7b1f RD |
706 | static void send_handler(struct ib_mad_agent *agent, |
707 | struct ib_mad_send_wc *mad_send_wc) | |
708 | { | |
709 | ib_free_send_mad(mad_send_wc->send_buf); | |
710 | } | |
711 | ||
712 | int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |
713 | { | |
714 | struct ib_mad_agent *agent; | |
715 | int p, q; | |
716 | int ret; | |
fa417f7b | 717 | enum rdma_link_layer ll; |
225c7b1f | 718 | |
fa417f7b EC |
719 | for (p = 0; p < dev->num_ports; ++p) { |
720 | ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); | |
225c7b1f | 721 | for (q = 0; q <= 1; ++q) { |
fa417f7b EC |
722 | if (ll == IB_LINK_LAYER_INFINIBAND) { |
723 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | |
724 | q ? IB_QPT_GSI : IB_QPT_SMI, | |
725 | NULL, 0, send_handler, | |
726 | NULL, NULL); | |
727 | if (IS_ERR(agent)) { | |
728 | ret = PTR_ERR(agent); | |
729 | goto err; | |
730 | } | |
731 | dev->send_agent[p][q] = agent; | |
732 | } else | |
733 | dev->send_agent[p][q] = NULL; | |
225c7b1f | 734 | } |
fa417f7b | 735 | } |
225c7b1f RD |
736 | |
737 | return 0; | |
738 | ||
739 | err: | |
7ff93f8b | 740 | for (p = 0; p < dev->num_ports; ++p) |
225c7b1f RD |
741 | for (q = 0; q <= 1; ++q) |
742 | if (dev->send_agent[p][q]) | |
743 | ib_unregister_mad_agent(dev->send_agent[p][q]); | |
744 | ||
745 | return ret; | |
746 | } | |
747 | ||
748 | void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |
749 | { | |
750 | struct ib_mad_agent *agent; | |
751 | int p, q; | |
752 | ||
7ff93f8b | 753 | for (p = 0; p < dev->num_ports; ++p) { |
225c7b1f RD |
754 | for (q = 0; q <= 1; ++q) { |
755 | agent = dev->send_agent[p][q]; | |
fa417f7b EC |
756 | if (agent) { |
757 | dev->send_agent[p][q] = NULL; | |
758 | ib_unregister_mad_agent(agent); | |
759 | } | |
225c7b1f RD |
760 | } |
761 | ||
762 | if (dev->sm_ah[p]) | |
763 | ib_destroy_ah(dev->sm_ah[p]); | |
764 | } | |
765 | } | |
00f5ce99 JM |
766 | |
767 | void handle_port_mgmt_change_event(struct work_struct *work) | |
768 | { | |
769 | struct ib_event_work *ew = container_of(work, struct ib_event_work, work); | |
770 | struct mlx4_ib_dev *dev = ew->ib_dev; | |
771 | struct mlx4_eqe *eqe = &(ew->ib_eqe); | |
772 | u8 port = eqe->event.port_mgmt_change.port; | |
773 | u32 changed_attr; | |
774 | ||
775 | switch (eqe->subtype) { | |
776 | case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: | |
777 | changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); | |
778 | ||
779 | /* Update the SM ah - This should be done before handling | |
780 | the other changed attributes so that MADs can be sent to the SM */ | |
781 | if (changed_attr & MSTR_SM_CHANGE_MASK) { | |
782 | u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); | |
783 | u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; | |
784 | update_sm_ah(dev, port, lid, sl); | |
785 | } | |
786 | ||
787 | /* Check if it is a lid change event */ | |
788 | if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) | |
789 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); | |
790 | ||
791 | /* Generate GUID changed event */ | |
792 | if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) | |
793 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | |
794 | ||
795 | if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) | |
796 | mlx4_ib_dispatch_event(dev, port, | |
797 | IB_EVENT_CLIENT_REREGISTER); | |
798 | break; | |
799 | ||
800 | case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: | |
801 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); | |
802 | break; | |
803 | case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: | |
6634961c JM |
804 | /* paravirtualized master's guid is guid 0 -- does not change */ |
805 | if (!mlx4_is_master(dev->dev)) | |
806 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | |
00f5ce99 JM |
807 | break; |
808 | default: | |
809 | pr_warn("Unsupported subtype 0x%x for " | |
810 | "Port Management Change event\n", eqe->subtype); | |
811 | } | |
812 | ||
813 | kfree(ew); | |
814 | } | |
815 | ||
816 | void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, | |
817 | enum ib_event_type type) | |
818 | { | |
819 | struct ib_event event; | |
820 | ||
821 | event.device = &dev->ib_dev; | |
822 | event.element.port_num = port_num; | |
823 | event.event = type; | |
824 | ||
825 | ib_dispatch_event(&event); | |
826 | } | |
fc06573d JM |
827 | |
828 | static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg) | |
829 | { | |
830 | unsigned long flags; | |
831 | struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; | |
832 | struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); | |
833 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | |
834 | if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) | |
835 | queue_work(ctx->wq, &ctx->work); | |
836 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | |
837 | } | |
838 | ||
839 | static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, | |
840 | struct mlx4_ib_demux_pv_qp *tun_qp, | |
841 | int index) | |
842 | { | |
843 | struct ib_sge sg_list; | |
844 | struct ib_recv_wr recv_wr, *bad_recv_wr; | |
845 | int size; | |
846 | ||
847 | size = (tun_qp->qp->qp_type == IB_QPT_UD) ? | |
848 | sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf); | |
849 | ||
850 | sg_list.addr = tun_qp->ring[index].map; | |
851 | sg_list.length = size; | |
852 | sg_list.lkey = ctx->mr->lkey; | |
853 | ||
854 | recv_wr.next = NULL; | |
855 | recv_wr.sg_list = &sg_list; | |
856 | recv_wr.num_sge = 1; | |
857 | recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV | | |
858 | MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt); | |
859 | ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, | |
860 | size, DMA_FROM_DEVICE); | |
861 | return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr); | |
862 | } | |
863 | ||
37bfc7c1 JM |
864 | static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port, |
865 | int slave, struct ib_sa_mad *sa_mad) | |
866 | { | |
867 | return 0; | |
868 | } | |
869 | ||
870 | static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) | |
871 | { | |
872 | int slave_start = dev->dev->caps.sqp_start + 8 * slave; | |
873 | ||
874 | return (qpn >= slave_start && qpn <= slave_start + 1); | |
875 | } | |
876 | ||
877 | ||
878 | int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |
879 | enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn, | |
880 | u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad) | |
881 | { | |
882 | struct ib_sge list; | |
883 | struct ib_send_wr wr, *bad_wr; | |
884 | struct mlx4_ib_demux_pv_ctx *sqp_ctx; | |
885 | struct mlx4_ib_demux_pv_qp *sqp; | |
886 | struct mlx4_mad_snd_buf *sqp_mad; | |
887 | struct ib_ah *ah; | |
888 | struct ib_qp *send_qp = NULL; | |
889 | unsigned wire_tx_ix = 0; | |
890 | int ret = 0; | |
891 | u16 wire_pkey_ix; | |
892 | int src_qpnum; | |
893 | u8 sgid_index; | |
894 | ||
895 | ||
896 | sqp_ctx = dev->sriov.sqps[port-1]; | |
897 | ||
898 | /* check if proxy qp created */ | |
899 | if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE) | |
900 | return -EAGAIN; | |
901 | ||
902 | /* QP0 forwarding only for Dom0 */ | |
903 | if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave)) | |
904 | return -EINVAL; | |
905 | ||
906 | if (dest_qpt == IB_QPT_SMI) { | |
907 | src_qpnum = 0; | |
908 | sqp = &sqp_ctx->qp[0]; | |
909 | wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0]; | |
910 | } else { | |
911 | src_qpnum = 1; | |
912 | sqp = &sqp_ctx->qp[1]; | |
913 | wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index]; | |
914 | } | |
915 | ||
916 | send_qp = sqp->qp; | |
917 | ||
918 | /* create ah */ | |
919 | sgid_index = attr->grh.sgid_index; | |
920 | attr->grh.sgid_index = 0; | |
921 | ah = ib_create_ah(sqp_ctx->pd, attr); | |
922 | if (IS_ERR(ah)) | |
923 | return -ENOMEM; | |
924 | attr->grh.sgid_index = sgid_index; | |
925 | to_mah(ah)->av.ib.gid_index = sgid_index; | |
926 | /* get rid of force-loopback bit */ | |
927 | to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF); | |
928 | spin_lock(&sqp->tx_lock); | |
929 | if (sqp->tx_ix_head - sqp->tx_ix_tail >= | |
930 | (MLX4_NUM_TUNNEL_BUFS - 1)) | |
931 | ret = -EAGAIN; | |
932 | else | |
933 | wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); | |
934 | spin_unlock(&sqp->tx_lock); | |
935 | if (ret) | |
936 | goto out; | |
937 | ||
938 | sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); | |
939 | if (sqp->tx_ring[wire_tx_ix].ah) | |
940 | ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah); | |
941 | sqp->tx_ring[wire_tx_ix].ah = ah; | |
942 | ib_dma_sync_single_for_cpu(&dev->ib_dev, | |
943 | sqp->tx_ring[wire_tx_ix].buf.map, | |
944 | sizeof (struct mlx4_mad_snd_buf), | |
945 | DMA_TO_DEVICE); | |
946 | ||
947 | memcpy(&sqp_mad->payload, mad, sizeof *mad); | |
948 | ||
949 | ib_dma_sync_single_for_device(&dev->ib_dev, | |
950 | sqp->tx_ring[wire_tx_ix].buf.map, | |
951 | sizeof (struct mlx4_mad_snd_buf), | |
952 | DMA_TO_DEVICE); | |
953 | ||
954 | list.addr = sqp->tx_ring[wire_tx_ix].buf.map; | |
955 | list.length = sizeof (struct mlx4_mad_snd_buf); | |
956 | list.lkey = sqp_ctx->mr->lkey; | |
957 | ||
958 | wr.wr.ud.ah = ah; | |
959 | wr.wr.ud.port_num = port; | |
960 | wr.wr.ud.pkey_index = wire_pkey_ix; | |
961 | wr.wr.ud.remote_qkey = qkey; | |
962 | wr.wr.ud.remote_qpn = remote_qpn; | |
963 | wr.next = NULL; | |
964 | wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); | |
965 | wr.sg_list = &list; | |
966 | wr.num_sge = 1; | |
967 | wr.opcode = IB_WR_SEND; | |
968 | wr.send_flags = IB_SEND_SIGNALED; | |
969 | ||
970 | ret = ib_post_send(send_qp, &wr, &bad_wr); | |
971 | out: | |
972 | if (ret) | |
973 | ib_destroy_ah(ah); | |
974 | return ret; | |
975 | } | |
976 | ||
977 | static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) | |
978 | { | |
979 | struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); | |
980 | struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; | |
981 | int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); | |
982 | struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr; | |
983 | struct mlx4_ib_ah ah; | |
984 | struct ib_ah_attr ah_attr; | |
985 | u8 *slave_id; | |
986 | int slave; | |
987 | ||
988 | /* Get slave that sent this packet */ | |
989 | if (wc->src_qp < dev->dev->caps.sqp_start || | |
990 | wc->src_qp >= dev->dev->caps.base_tunnel_sqpn || | |
991 | (wc->src_qp & 0x1) != ctx->port - 1 || | |
992 | wc->src_qp & 0x4) { | |
993 | mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); | |
994 | return; | |
995 | } | |
996 | slave = ((wc->src_qp & ~0x7) - dev->dev->caps.sqp_start) / 8; | |
997 | if (slave != ctx->slave) { | |
998 | mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " | |
999 | "belongs to another slave\n", wc->src_qp); | |
1000 | return; | |
1001 | } | |
1002 | if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) { | |
1003 | mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " | |
1004 | "non-master trying to send QP0 packets\n", wc->src_qp); | |
1005 | return; | |
1006 | } | |
1007 | ||
1008 | /* Map transaction ID */ | |
1009 | ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, | |
1010 | sizeof (struct mlx4_tunnel_mad), | |
1011 | DMA_FROM_DEVICE); | |
1012 | switch (tunnel->mad.mad_hdr.method) { | |
1013 | case IB_MGMT_METHOD_SET: | |
1014 | case IB_MGMT_METHOD_GET: | |
1015 | case IB_MGMT_METHOD_REPORT: | |
1016 | case IB_SA_METHOD_GET_TABLE: | |
1017 | case IB_SA_METHOD_DELETE: | |
1018 | case IB_SA_METHOD_GET_MULTI: | |
1019 | case IB_SA_METHOD_GET_TRACE_TBL: | |
1020 | slave_id = (u8 *) &tunnel->mad.mad_hdr.tid; | |
1021 | if (*slave_id) { | |
1022 | mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " | |
1023 | "class:%d slave:%d\n", *slave_id, | |
1024 | tunnel->mad.mad_hdr.mgmt_class, slave); | |
1025 | return; | |
1026 | } else | |
1027 | *slave_id = slave; | |
1028 | default: | |
1029 | /* nothing */; | |
1030 | } | |
1031 | ||
1032 | /* Class-specific handling */ | |
1033 | switch (tunnel->mad.mad_hdr.mgmt_class) { | |
1034 | case IB_MGMT_CLASS_SUBN_ADM: | |
1035 | if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, | |
1036 | (struct ib_sa_mad *) &tunnel->mad)) | |
1037 | return; | |
1038 | break; | |
1039 | case IB_MGMT_CLASS_DEVICE_MGMT: | |
1040 | if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET && | |
1041 | tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET) | |
1042 | return; | |
1043 | break; | |
1044 | default: | |
1045 | /* Drop unsupported classes for slaves in tunnel mode */ | |
1046 | if (slave != mlx4_master_func_num(dev->dev)) { | |
1047 | mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " | |
1048 | "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave); | |
1049 | return; | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | /* We are using standard ib_core services to send the mad, so generate a | |
1054 | * stadard address handle by decoding the tunnelled mlx4_ah fields */ | |
1055 | memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av)); | |
1056 | ah.ibah.device = ctx->ib_dev; | |
1057 | mlx4_ib_query_ah(&ah.ibah, &ah_attr); | |
1058 | if ((ah_attr.ah_flags & IB_AH_GRH) && | |
1059 | (ah_attr.grh.sgid_index != slave)) { | |
1060 | mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n", | |
1061 | slave, ah_attr.grh.sgid_index); | |
1062 | return; | |
1063 | } | |
1064 | ||
1065 | mlx4_ib_send_to_wire(dev, slave, ctx->port, | |
1066 | is_proxy_qp0(dev, wc->src_qp, slave) ? | |
1067 | IB_QPT_SMI : IB_QPT_GSI, | |
1068 | be16_to_cpu(tunnel->hdr.pkey_index), | |
1069 | be32_to_cpu(tunnel->hdr.remote_qpn), | |
1070 | be32_to_cpu(tunnel->hdr.qkey), | |
1071 | &ah_attr, &tunnel->mad); | |
1072 | } | |
1073 | ||
fc06573d JM |
1074 | static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, |
1075 | enum ib_qp_type qp_type, int is_tun) | |
1076 | { | |
1077 | int i; | |
1078 | struct mlx4_ib_demux_pv_qp *tun_qp; | |
1079 | int rx_buf_size, tx_buf_size; | |
1080 | ||
1081 | if (qp_type > IB_QPT_GSI) | |
1082 | return -EINVAL; | |
1083 | ||
1084 | tun_qp = &ctx->qp[qp_type]; | |
1085 | ||
1086 | tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS, | |
1087 | GFP_KERNEL); | |
1088 | if (!tun_qp->ring) | |
1089 | return -ENOMEM; | |
1090 | ||
1091 | tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS, | |
1092 | sizeof (struct mlx4_ib_tun_tx_buf), | |
1093 | GFP_KERNEL); | |
1094 | if (!tun_qp->tx_ring) { | |
1095 | kfree(tun_qp->ring); | |
1096 | tun_qp->ring = NULL; | |
1097 | return -ENOMEM; | |
1098 | } | |
1099 | ||
1100 | if (is_tun) { | |
1101 | rx_buf_size = sizeof (struct mlx4_tunnel_mad); | |
1102 | tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); | |
1103 | } else { | |
1104 | rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); | |
1105 | tx_buf_size = sizeof (struct mlx4_mad_snd_buf); | |
1106 | } | |
1107 | ||
1108 | for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { | |
1109 | tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL); | |
1110 | if (!tun_qp->ring[i].addr) | |
1111 | goto err; | |
1112 | tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, | |
1113 | tun_qp->ring[i].addr, | |
1114 | rx_buf_size, | |
1115 | DMA_FROM_DEVICE); | |
1116 | } | |
1117 | ||
1118 | for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { | |
1119 | tun_qp->tx_ring[i].buf.addr = | |
1120 | kmalloc(tx_buf_size, GFP_KERNEL); | |
1121 | if (!tun_qp->tx_ring[i].buf.addr) | |
1122 | goto tx_err; | |
1123 | tun_qp->tx_ring[i].buf.map = | |
1124 | ib_dma_map_single(ctx->ib_dev, | |
1125 | tun_qp->tx_ring[i].buf.addr, | |
1126 | tx_buf_size, | |
1127 | DMA_TO_DEVICE); | |
1128 | tun_qp->tx_ring[i].ah = NULL; | |
1129 | } | |
1130 | spin_lock_init(&tun_qp->tx_lock); | |
1131 | tun_qp->tx_ix_head = 0; | |
1132 | tun_qp->tx_ix_tail = 0; | |
1133 | tun_qp->proxy_qpt = qp_type; | |
1134 | ||
1135 | return 0; | |
1136 | ||
1137 | tx_err: | |
1138 | while (i > 0) { | |
1139 | --i; | |
1140 | ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, | |
1141 | tx_buf_size, DMA_TO_DEVICE); | |
1142 | kfree(tun_qp->tx_ring[i].buf.addr); | |
1143 | } | |
1144 | kfree(tun_qp->tx_ring); | |
1145 | tun_qp->tx_ring = NULL; | |
1146 | i = MLX4_NUM_TUNNEL_BUFS; | |
1147 | err: | |
1148 | while (i > 0) { | |
1149 | --i; | |
1150 | ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, | |
1151 | rx_buf_size, DMA_FROM_DEVICE); | |
1152 | kfree(tun_qp->ring[i].addr); | |
1153 | } | |
1154 | kfree(tun_qp->ring); | |
1155 | tun_qp->ring = NULL; | |
1156 | return -ENOMEM; | |
1157 | } | |
1158 | ||
1159 | static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, | |
1160 | enum ib_qp_type qp_type, int is_tun) | |
1161 | { | |
1162 | int i; | |
1163 | struct mlx4_ib_demux_pv_qp *tun_qp; | |
1164 | int rx_buf_size, tx_buf_size; | |
1165 | ||
1166 | if (qp_type > IB_QPT_GSI) | |
1167 | return; | |
1168 | ||
1169 | tun_qp = &ctx->qp[qp_type]; | |
1170 | if (is_tun) { | |
1171 | rx_buf_size = sizeof (struct mlx4_tunnel_mad); | |
1172 | tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad); | |
1173 | } else { | |
1174 | rx_buf_size = sizeof (struct mlx4_mad_rcv_buf); | |
1175 | tx_buf_size = sizeof (struct mlx4_mad_snd_buf); | |
1176 | } | |
1177 | ||
1178 | ||
1179 | for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { | |
1180 | ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, | |
1181 | rx_buf_size, DMA_FROM_DEVICE); | |
1182 | kfree(tun_qp->ring[i].addr); | |
1183 | } | |
1184 | ||
1185 | for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { | |
1186 | ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, | |
1187 | tx_buf_size, DMA_TO_DEVICE); | |
1188 | kfree(tun_qp->tx_ring[i].buf.addr); | |
1189 | if (tun_qp->tx_ring[i].ah) | |
1190 | ib_destroy_ah(tun_qp->tx_ring[i].ah); | |
1191 | } | |
1192 | kfree(tun_qp->tx_ring); | |
1193 | kfree(tun_qp->ring); | |
1194 | } | |
1195 | ||
1196 | static void mlx4_ib_tunnel_comp_worker(struct work_struct *work) | |
1197 | { | |
37bfc7c1 JM |
1198 | struct mlx4_ib_demux_pv_ctx *ctx; |
1199 | struct mlx4_ib_demux_pv_qp *tun_qp; | |
1200 | struct ib_wc wc; | |
1201 | int ret; | |
1202 | ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); | |
1203 | ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); | |
1204 | ||
1205 | while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { | |
1206 | tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; | |
1207 | if (wc.status == IB_WC_SUCCESS) { | |
1208 | switch (wc.opcode) { | |
1209 | case IB_WC_RECV: | |
1210 | mlx4_ib_multiplex_mad(ctx, &wc); | |
1211 | ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, | |
1212 | wc.wr_id & | |
1213 | (MLX4_NUM_TUNNEL_BUFS - 1)); | |
1214 | if (ret) | |
1215 | pr_err("Failed reposting tunnel " | |
1216 | "buf:%lld\n", wc.wr_id); | |
1217 | break; | |
1218 | case IB_WC_SEND: | |
1219 | pr_debug("received tunnel send completion:" | |
1220 | "wrid=0x%llx, status=0x%x\n", | |
1221 | wc.wr_id, wc.status); | |
1222 | ib_destroy_ah(tun_qp->tx_ring[wc.wr_id & | |
1223 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah); | |
1224 | tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | |
1225 | = NULL; | |
1226 | spin_lock(&tun_qp->tx_lock); | |
1227 | tun_qp->tx_ix_tail++; | |
1228 | spin_unlock(&tun_qp->tx_lock); | |
1229 | ||
1230 | break; | |
1231 | default: | |
1232 | break; | |
1233 | } | |
1234 | } else { | |
1235 | pr_debug("mlx4_ib: completion error in tunnel: %d." | |
1236 | " status = %d, wrid = 0x%llx\n", | |
1237 | ctx->slave, wc.status, wc.wr_id); | |
1238 | if (!MLX4_TUN_IS_RECV(wc.wr_id)) { | |
1239 | ib_destroy_ah(tun_qp->tx_ring[wc.wr_id & | |
1240 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah); | |
1241 | tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | |
1242 | = NULL; | |
1243 | spin_lock(&tun_qp->tx_lock); | |
1244 | tun_qp->tx_ix_tail++; | |
1245 | spin_unlock(&tun_qp->tx_lock); | |
1246 | } | |
1247 | } | |
1248 | } | |
fc06573d JM |
1249 | } |
1250 | ||
1251 | static void pv_qp_event_handler(struct ib_event *event, void *qp_context) | |
1252 | { | |
1253 | struct mlx4_ib_demux_pv_ctx *sqp = qp_context; | |
1254 | ||
1255 | /* It's worse than that! He's dead, Jim! */ | |
1256 | pr_err("Fatal error (%d) on a MAD QP on port %d\n", | |
1257 | event->event, sqp->port); | |
1258 | } | |
1259 | ||
1260 | static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, | |
1261 | enum ib_qp_type qp_type, int create_tun) | |
1262 | { | |
1263 | int i, ret; | |
1264 | struct mlx4_ib_demux_pv_qp *tun_qp; | |
1265 | struct mlx4_ib_qp_tunnel_init_attr qp_init_attr; | |
1266 | struct ib_qp_attr attr; | |
1267 | int qp_attr_mask_INIT; | |
1268 | ||
1269 | if (qp_type > IB_QPT_GSI) | |
1270 | return -EINVAL; | |
1271 | ||
1272 | tun_qp = &ctx->qp[qp_type]; | |
1273 | ||
1274 | memset(&qp_init_attr, 0, sizeof qp_init_attr); | |
1275 | qp_init_attr.init_attr.send_cq = ctx->cq; | |
1276 | qp_init_attr.init_attr.recv_cq = ctx->cq; | |
1277 | qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; | |
1278 | qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS; | |
1279 | qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS; | |
1280 | qp_init_attr.init_attr.cap.max_send_sge = 1; | |
1281 | qp_init_attr.init_attr.cap.max_recv_sge = 1; | |
1282 | if (create_tun) { | |
1283 | qp_init_attr.init_attr.qp_type = IB_QPT_UD; | |
1284 | qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP; | |
1285 | qp_init_attr.port = ctx->port; | |
1286 | qp_init_attr.slave = ctx->slave; | |
1287 | qp_init_attr.proxy_qp_type = qp_type; | |
1288 | qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | | |
1289 | IB_QP_QKEY | IB_QP_PORT; | |
1290 | } else { | |
1291 | qp_init_attr.init_attr.qp_type = qp_type; | |
1292 | qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP; | |
1293 | qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY; | |
1294 | } | |
1295 | qp_init_attr.init_attr.port_num = ctx->port; | |
1296 | qp_init_attr.init_attr.qp_context = ctx; | |
1297 | qp_init_attr.init_attr.event_handler = pv_qp_event_handler; | |
1298 | tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); | |
1299 | if (IS_ERR(tun_qp->qp)) { | |
1300 | ret = PTR_ERR(tun_qp->qp); | |
1301 | tun_qp->qp = NULL; | |
1302 | pr_err("Couldn't create %s QP (%d)\n", | |
1303 | create_tun ? "tunnel" : "special", ret); | |
1304 | return ret; | |
1305 | } | |
1306 | ||
1307 | memset(&attr, 0, sizeof attr); | |
1308 | attr.qp_state = IB_QPS_INIT; | |
1309 | attr.pkey_index = | |
1310 | to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; | |
1311 | attr.qkey = IB_QP1_QKEY; | |
1312 | attr.port_num = ctx->port; | |
1313 | ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT); | |
1314 | if (ret) { | |
1315 | pr_err("Couldn't change %s qp state to INIT (%d)\n", | |
1316 | create_tun ? "tunnel" : "special", ret); | |
1317 | goto err_qp; | |
1318 | } | |
1319 | attr.qp_state = IB_QPS_RTR; | |
1320 | ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE); | |
1321 | if (ret) { | |
1322 | pr_err("Couldn't change %s qp state to RTR (%d)\n", | |
1323 | create_tun ? "tunnel" : "special", ret); | |
1324 | goto err_qp; | |
1325 | } | |
1326 | attr.qp_state = IB_QPS_RTS; | |
1327 | attr.sq_psn = 0; | |
1328 | ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); | |
1329 | if (ret) { | |
1330 | pr_err("Couldn't change %s qp state to RTS (%d)\n", | |
1331 | create_tun ? "tunnel" : "special", ret); | |
1332 | goto err_qp; | |
1333 | } | |
1334 | ||
1335 | for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) { | |
1336 | ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); | |
1337 | if (ret) { | |
1338 | pr_err(" mlx4_ib_post_pv_buf error" | |
1339 | " (err = %d, i = %d)\n", ret, i); | |
1340 | goto err_qp; | |
1341 | } | |
1342 | } | |
1343 | return 0; | |
1344 | ||
1345 | err_qp: | |
1346 | ib_destroy_qp(tun_qp->qp); | |
1347 | tun_qp->qp = NULL; | |
1348 | return ret; | |
1349 | } | |
1350 | ||
1351 | /* | |
1352 | * IB MAD completion callback for real SQPs | |
1353 | */ | |
1354 | static void mlx4_ib_sqp_comp_worker(struct work_struct *work) | |
1355 | { | |
37bfc7c1 JM |
1356 | struct mlx4_ib_demux_pv_ctx *ctx; |
1357 | struct mlx4_ib_demux_pv_qp *sqp; | |
1358 | struct ib_wc wc; | |
1359 | struct ib_grh *grh; | |
1360 | struct ib_mad *mad; | |
1361 | ||
1362 | ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); | |
1363 | ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); | |
1364 | ||
1365 | while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { | |
1366 | sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; | |
1367 | if (wc.status == IB_WC_SUCCESS) { | |
1368 | switch (wc.opcode) { | |
1369 | case IB_WC_SEND: | |
1370 | ib_destroy_ah(sqp->tx_ring[wc.wr_id & | |
1371 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah); | |
1372 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | |
1373 | = NULL; | |
1374 | spin_lock(&sqp->tx_lock); | |
1375 | sqp->tx_ix_tail++; | |
1376 | spin_unlock(&sqp->tx_lock); | |
1377 | break; | |
1378 | case IB_WC_RECV: | |
1379 | mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *) | |
1380 | (sqp->ring[wc.wr_id & | |
1381 | (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload); | |
1382 | grh = &(((struct mlx4_mad_rcv_buf *) | |
1383 | (sqp->ring[wc.wr_id & | |
1384 | (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh); | |
1385 | mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); | |
1386 | if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & | |
1387 | (MLX4_NUM_TUNNEL_BUFS - 1))) | |
1388 | pr_err("Failed reposting SQP " | |
1389 | "buf:%lld\n", wc.wr_id); | |
1390 | break; | |
1391 | default: | |
1392 | BUG_ON(1); | |
1393 | break; | |
1394 | } | |
1395 | } else { | |
1396 | pr_debug("mlx4_ib: completion error in tunnel: %d." | |
1397 | " status = %d, wrid = 0x%llx\n", | |
1398 | ctx->slave, wc.status, wc.wr_id); | |
1399 | if (!MLX4_TUN_IS_RECV(wc.wr_id)) { | |
1400 | ib_destroy_ah(sqp->tx_ring[wc.wr_id & | |
1401 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah); | |
1402 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | |
1403 | = NULL; | |
1404 | spin_lock(&sqp->tx_lock); | |
1405 | sqp->tx_ix_tail++; | |
1406 | spin_unlock(&sqp->tx_lock); | |
1407 | } | |
1408 | } | |
1409 | } | |
fc06573d JM |
1410 | } |
1411 | ||
1412 | static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port, | |
1413 | struct mlx4_ib_demux_pv_ctx **ret_ctx) | |
1414 | { | |
1415 | struct mlx4_ib_demux_pv_ctx *ctx; | |
1416 | ||
1417 | *ret_ctx = NULL; | |
1418 | ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); | |
1419 | if (!ctx) { | |
1420 | pr_err("failed allocating pv resource context " | |
1421 | "for port %d, slave %d\n", port, slave); | |
1422 | return -ENOMEM; | |
1423 | } | |
1424 | ||
1425 | ctx->ib_dev = &dev->ib_dev; | |
1426 | ctx->port = port; | |
1427 | ctx->slave = slave; | |
1428 | *ret_ctx = ctx; | |
1429 | return 0; | |
1430 | } | |
1431 | ||
1432 | static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port) | |
1433 | { | |
1434 | if (dev->sriov.demux[port - 1].tun[slave]) { | |
1435 | kfree(dev->sriov.demux[port - 1].tun[slave]); | |
1436 | dev->sriov.demux[port - 1].tun[slave] = NULL; | |
1437 | } | |
1438 | } | |
1439 | ||
1440 | static int create_pv_resources(struct ib_device *ibdev, int slave, int port, | |
1441 | int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) | |
1442 | { | |
1443 | int ret, cq_size; | |
1444 | ||
1445 | ctx->state = DEMUX_PV_STATE_STARTING; | |
1446 | /* have QP0 only on port owner, and only if link layer is IB */ | |
1447 | if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) && | |
1448 | rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND) | |
1449 | ctx->has_smi = 1; | |
1450 | ||
1451 | if (ctx->has_smi) { | |
1452 | ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); | |
1453 | if (ret) { | |
1454 | pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret); | |
1455 | goto err_out; | |
1456 | } | |
1457 | } | |
1458 | ||
1459 | ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); | |
1460 | if (ret) { | |
1461 | pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret); | |
1462 | goto err_out_qp0; | |
1463 | } | |
1464 | ||
1465 | cq_size = 2 * MLX4_NUM_TUNNEL_BUFS; | |
1466 | if (ctx->has_smi) | |
1467 | cq_size *= 2; | |
1468 | ||
1469 | ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, | |
1470 | NULL, ctx, cq_size, 0); | |
1471 | if (IS_ERR(ctx->cq)) { | |
1472 | ret = PTR_ERR(ctx->cq); | |
1473 | pr_err("Couldn't create tunnel CQ (%d)\n", ret); | |
1474 | goto err_buf; | |
1475 | } | |
1476 | ||
1477 | ctx->pd = ib_alloc_pd(ctx->ib_dev); | |
1478 | if (IS_ERR(ctx->pd)) { | |
1479 | ret = PTR_ERR(ctx->pd); | |
1480 | pr_err("Couldn't create tunnel PD (%d)\n", ret); | |
1481 | goto err_cq; | |
1482 | } | |
1483 | ||
1484 | ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE); | |
1485 | if (IS_ERR(ctx->mr)) { | |
1486 | ret = PTR_ERR(ctx->mr); | |
1487 | pr_err("Couldn't get tunnel DMA MR (%d)\n", ret); | |
1488 | goto err_pd; | |
1489 | } | |
1490 | ||
1491 | if (ctx->has_smi) { | |
1492 | ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); | |
1493 | if (ret) { | |
1494 | pr_err("Couldn't create %s QP0 (%d)\n", | |
1495 | create_tun ? "tunnel for" : "", ret); | |
1496 | goto err_mr; | |
1497 | } | |
1498 | } | |
1499 | ||
1500 | ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); | |
1501 | if (ret) { | |
1502 | pr_err("Couldn't create %s QP1 (%d)\n", | |
1503 | create_tun ? "tunnel for" : "", ret); | |
1504 | goto err_qp0; | |
1505 | } | |
1506 | ||
1507 | if (create_tun) | |
1508 | INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); | |
1509 | else | |
1510 | INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); | |
1511 | ||
1512 | ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; | |
1513 | ||
1514 | ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); | |
1515 | if (ret) { | |
1516 | pr_err("Couldn't arm tunnel cq (%d)\n", ret); | |
1517 | goto err_wq; | |
1518 | } | |
1519 | ctx->state = DEMUX_PV_STATE_ACTIVE; | |
1520 | return 0; | |
1521 | ||
1522 | err_wq: | |
1523 | ctx->wq = NULL; | |
1524 | ib_destroy_qp(ctx->qp[1].qp); | |
1525 | ctx->qp[1].qp = NULL; | |
1526 | ||
1527 | ||
1528 | err_qp0: | |
1529 | if (ctx->has_smi) | |
1530 | ib_destroy_qp(ctx->qp[0].qp); | |
1531 | ctx->qp[0].qp = NULL; | |
1532 | ||
1533 | err_mr: | |
1534 | ib_dereg_mr(ctx->mr); | |
1535 | ctx->mr = NULL; | |
1536 | ||
1537 | err_pd: | |
1538 | ib_dealloc_pd(ctx->pd); | |
1539 | ctx->pd = NULL; | |
1540 | ||
1541 | err_cq: | |
1542 | ib_destroy_cq(ctx->cq); | |
1543 | ctx->cq = NULL; | |
1544 | ||
1545 | err_buf: | |
1546 | mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); | |
1547 | ||
1548 | err_out_qp0: | |
1549 | if (ctx->has_smi) | |
1550 | mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); | |
1551 | err_out: | |
1552 | ctx->state = DEMUX_PV_STATE_DOWN; | |
1553 | return ret; | |
1554 | } | |
1555 | ||
1556 | static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, | |
1557 | struct mlx4_ib_demux_pv_ctx *ctx, int flush) | |
1558 | { | |
1559 | if (!ctx) | |
1560 | return; | |
1561 | if (ctx->state > DEMUX_PV_STATE_DOWN) { | |
1562 | ctx->state = DEMUX_PV_STATE_DOWNING; | |
1563 | if (flush) | |
1564 | flush_workqueue(ctx->wq); | |
1565 | if (ctx->has_smi) { | |
1566 | ib_destroy_qp(ctx->qp[0].qp); | |
1567 | ctx->qp[0].qp = NULL; | |
1568 | mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); | |
1569 | } | |
1570 | ib_destroy_qp(ctx->qp[1].qp); | |
1571 | ctx->qp[1].qp = NULL; | |
1572 | mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); | |
1573 | ib_dereg_mr(ctx->mr); | |
1574 | ctx->mr = NULL; | |
1575 | ib_dealloc_pd(ctx->pd); | |
1576 | ctx->pd = NULL; | |
1577 | ib_destroy_cq(ctx->cq); | |
1578 | ctx->cq = NULL; | |
1579 | ctx->state = DEMUX_PV_STATE_DOWN; | |
1580 | } | |
1581 | } | |
1582 | ||
1583 | static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave, | |
1584 | int port, int do_init) | |
1585 | { | |
1586 | int ret = 0; | |
1587 | ||
1588 | if (!do_init) { | |
1589 | /* for master, destroy real sqp resources */ | |
1590 | if (slave == mlx4_master_func_num(dev->dev)) | |
1591 | destroy_pv_resources(dev, slave, port, | |
1592 | dev->sriov.sqps[port - 1], 1); | |
1593 | /* destroy the tunnel qp resources */ | |
1594 | destroy_pv_resources(dev, slave, port, | |
1595 | dev->sriov.demux[port - 1].tun[slave], 1); | |
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | /* create the tunnel qp resources */ | |
1600 | ret = create_pv_resources(&dev->ib_dev, slave, port, 1, | |
1601 | dev->sriov.demux[port - 1].tun[slave]); | |
1602 | ||
1603 | /* for master, create the real sqp resources */ | |
1604 | if (!ret && slave == mlx4_master_func_num(dev->dev)) | |
1605 | ret = create_pv_resources(&dev->ib_dev, slave, port, 0, | |
1606 | dev->sriov.sqps[port - 1]); | |
1607 | return ret; | |
1608 | } | |
1609 | ||
1610 | void mlx4_ib_tunnels_update_work(struct work_struct *work) | |
1611 | { | |
1612 | struct mlx4_ib_demux_work *dmxw; | |
1613 | ||
1614 | dmxw = container_of(work, struct mlx4_ib_demux_work, work); | |
1615 | mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port, | |
1616 | dmxw->do_init); | |
1617 | kfree(dmxw); | |
1618 | return; | |
1619 | } | |
1620 | ||
1621 | static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, | |
1622 | struct mlx4_ib_demux_ctx *ctx, | |
1623 | int port) | |
1624 | { | |
1625 | char name[12]; | |
1626 | int ret = 0; | |
1627 | int i; | |
1628 | ||
1629 | ctx->tun = kcalloc(dev->dev->caps.sqp_demux, | |
1630 | sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL); | |
1631 | if (!ctx->tun) | |
1632 | return -ENOMEM; | |
1633 | ||
1634 | ctx->dev = dev; | |
1635 | ctx->port = port; | |
1636 | ctx->ib_dev = &dev->ib_dev; | |
1637 | ||
1638 | for (i = 0; i < dev->dev->caps.sqp_demux; i++) { | |
1639 | ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); | |
1640 | if (ret) { | |
1641 | ret = -ENOMEM; | |
1642 | goto err_wq; | |
1643 | } | |
1644 | } | |
1645 | ||
1646 | snprintf(name, sizeof name, "mlx4_ibt%d", port); | |
1647 | ctx->wq = create_singlethread_workqueue(name); | |
1648 | if (!ctx->wq) { | |
1649 | pr_err("Failed to create tunnelling WQ for port %d\n", port); | |
1650 | ret = -ENOMEM; | |
1651 | goto err_wq; | |
1652 | } | |
1653 | ||
1654 | snprintf(name, sizeof name, "mlx4_ibud%d", port); | |
1655 | ctx->ud_wq = create_singlethread_workqueue(name); | |
1656 | if (!ctx->ud_wq) { | |
1657 | pr_err("Failed to create up/down WQ for port %d\n", port); | |
1658 | ret = -ENOMEM; | |
1659 | goto err_udwq; | |
1660 | } | |
1661 | ||
1662 | return 0; | |
1663 | ||
1664 | err_udwq: | |
1665 | destroy_workqueue(ctx->wq); | |
1666 | ctx->wq = NULL; | |
1667 | ||
1668 | err_wq: | |
1669 | for (i = 0; i < dev->dev->caps.sqp_demux; i++) | |
1670 | free_pv_object(dev, i, port); | |
1671 | kfree(ctx->tun); | |
1672 | ctx->tun = NULL; | |
1673 | return ret; | |
1674 | } | |
1675 | ||
1676 | static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx) | |
1677 | { | |
1678 | if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) { | |
1679 | sqp_ctx->state = DEMUX_PV_STATE_DOWNING; | |
1680 | flush_workqueue(sqp_ctx->wq); | |
1681 | if (sqp_ctx->has_smi) { | |
1682 | ib_destroy_qp(sqp_ctx->qp[0].qp); | |
1683 | sqp_ctx->qp[0].qp = NULL; | |
1684 | mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0); | |
1685 | } | |
1686 | ib_destroy_qp(sqp_ctx->qp[1].qp); | |
1687 | sqp_ctx->qp[1].qp = NULL; | |
1688 | mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0); | |
1689 | ib_dereg_mr(sqp_ctx->mr); | |
1690 | sqp_ctx->mr = NULL; | |
1691 | ib_dealloc_pd(sqp_ctx->pd); | |
1692 | sqp_ctx->pd = NULL; | |
1693 | ib_destroy_cq(sqp_ctx->cq); | |
1694 | sqp_ctx->cq = NULL; | |
1695 | sqp_ctx->state = DEMUX_PV_STATE_DOWN; | |
1696 | } | |
1697 | } | |
1698 | ||
1699 | static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) | |
1700 | { | |
1701 | int i; | |
1702 | if (ctx) { | |
1703 | struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); | |
1704 | for (i = 0; i < dev->dev->caps.sqp_demux; i++) { | |
1705 | if (!ctx->tun[i]) | |
1706 | continue; | |
1707 | if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) | |
1708 | ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; | |
1709 | } | |
1710 | flush_workqueue(ctx->wq); | |
1711 | for (i = 0; i < dev->dev->caps.sqp_demux; i++) { | |
1712 | destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); | |
1713 | free_pv_object(dev, i, ctx->port); | |
1714 | } | |
1715 | kfree(ctx->tun); | |
1716 | destroy_workqueue(ctx->ud_wq); | |
1717 | destroy_workqueue(ctx->wq); | |
1718 | } | |
1719 | } | |
1720 | ||
1721 | static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init) | |
1722 | { | |
1723 | int i; | |
1724 | ||
1725 | if (!mlx4_is_master(dev->dev)) | |
1726 | return; | |
1727 | /* initialize or tear down tunnel QPs for the master */ | |
1728 | for (i = 0; i < dev->dev->caps.num_ports; i++) | |
1729 | mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init); | |
1730 | return; | |
1731 | } | |
1732 | ||
1733 | int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) | |
1734 | { | |
1735 | int i = 0; | |
1736 | int err; | |
1737 | ||
1738 | if (!mlx4_is_mfunc(dev->dev)) | |
1739 | return 0; | |
1740 | ||
1741 | dev->sriov.is_going_down = 0; | |
1742 | spin_lock_init(&dev->sriov.going_down_lock); | |
1743 | ||
1744 | mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); | |
1745 | ||
1746 | if (mlx4_is_slave(dev->dev)) { | |
1747 | mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n"); | |
1748 | return 0; | |
1749 | } | |
1750 | ||
1751 | mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", | |
1752 | dev->dev->caps.sqp_demux); | |
1753 | for (i = 0; i < dev->num_ports; i++) { | |
1754 | err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1, | |
1755 | &dev->sriov.sqps[i]); | |
1756 | if (err) | |
1757 | goto demux_err; | |
1758 | err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1); | |
1759 | if (err) | |
1760 | goto demux_err; | |
1761 | } | |
1762 | mlx4_ib_master_tunnels(dev, 1); | |
1763 | return 0; | |
1764 | ||
1765 | demux_err: | |
1766 | while (i > 0) { | |
1767 | free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1); | |
1768 | mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); | |
1769 | --i; | |
1770 | } | |
1771 | ||
1772 | return err; | |
1773 | } | |
1774 | ||
1775 | void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) | |
1776 | { | |
1777 | int i; | |
1778 | unsigned long flags; | |
1779 | ||
1780 | if (!mlx4_is_mfunc(dev->dev)) | |
1781 | return; | |
1782 | ||
1783 | spin_lock_irqsave(&dev->sriov.going_down_lock, flags); | |
1784 | dev->sriov.is_going_down = 1; | |
1785 | spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); | |
1786 | if (mlx4_is_master(dev->dev)) | |
1787 | for (i = 0; i < dev->num_ports; i++) { | |
1788 | flush_workqueue(dev->sriov.demux[i].ud_wq); | |
1789 | mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]); | |
1790 | kfree(dev->sriov.sqps[i]); | |
1791 | dev->sriov.sqps[i] = NULL; | |
1792 | mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); | |
1793 | } | |
1794 | } |