Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
4 | * |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
ea54b10c | 34 | #include <linux/log2.h> |
1049f138 | 35 | #include <linux/etherdevice.h> |
3ef967a4 | 36 | #include <net/ip.h> |
5a0e3ad6 | 37 | #include <linux/slab.h> |
fa417f7b | 38 | #include <linux/netdevice.h> |
ea54b10c | 39 | |
225c7b1f RD |
40 | #include <rdma/ib_cache.h> |
41 | #include <rdma/ib_pack.h> | |
4c3eb3ca | 42 | #include <rdma/ib_addr.h> |
1ffeb2eb | 43 | #include <rdma/ib_mad.h> |
89944450 | 44 | #include <rdma/uverbs_ioctl.h> |
225c7b1f | 45 | |
2f48485d | 46 | #include <linux/mlx4/driver.h> |
225c7b1f RD |
47 | #include <linux/mlx4/qp.h> |
48 | ||
49 | #include "mlx4_ib.h" | |
9ce28a20 | 50 | #include <rdma/mlx4-abi.h> |
225c7b1f | 51 | |
35f05dab YH |
52 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, |
53 | struct mlx4_ib_cq *recv_cq); | |
54 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, | |
55 | struct mlx4_ib_cq *recv_cq); | |
89944450 SR |
56 | static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state, |
57 | struct ib_udata *udata); | |
35f05dab | 58 | |
225c7b1f RD |
59 | enum { |
60 | MLX4_IB_ACK_REQ_FREQ = 8, | |
61 | }; | |
62 | ||
63 | enum { | |
64 | MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, | |
fa417f7b EC |
65 | MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, |
66 | MLX4_IB_LINK_TYPE_IB = 0, | |
67 | MLX4_IB_LINK_TYPE_ETH = 1 | |
225c7b1f RD |
68 | }; |
69 | ||
83904132 | 70 | enum { |
417608c2 EC |
71 | MLX4_IB_MIN_SQ_STRIDE = 6, |
72 | MLX4_IB_CACHE_LINE_SIZE = 64, | |
83904132 JM |
73 | }; |
74 | ||
3987a2d3 OG |
75 | enum { |
76 | MLX4_RAW_QP_MTU = 7, | |
77 | MLX4_RAW_QP_MSGMAX = 31, | |
78 | }; | |
79 | ||
297e0dad MS |
80 | #ifndef ETH_ALEN |
81 | #define ETH_ALEN 6 | |
82 | #endif | |
297e0dad | 83 | |
225c7b1f | 84 | static const __be32 mlx4_ib_opcode[] = { |
6fa8f719 VS |
85 | [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), |
86 | [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), | |
87 | [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), | |
88 | [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), | |
89 | [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), | |
90 | [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), | |
91 | [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), | |
92 | [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), | |
93 | [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), | |
94 | [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), | |
1b2cd0fc | 95 | [IB_WR_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), |
6fa8f719 VS |
96 | [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), |
97 | [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), | |
225c7b1f RD |
98 | }; |
99 | ||
400b1ebc GL |
100 | enum mlx4_ib_source_type { |
101 | MLX4_IB_QP_SRC = 0, | |
102 | MLX4_IB_RWQ_SRC = 1, | |
103 | }; | |
104 | ||
1ffeb2eb JM |
105 | static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) |
106 | { | |
107 | if (!mlx4_is_master(dev->dev)) | |
108 | return 0; | |
109 | ||
47605df9 JM |
110 | return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && |
111 | qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + | |
112 | 8 * MLX4_MFUNC_MAX; | |
1ffeb2eb JM |
113 | } |
114 | ||
225c7b1f RD |
115 | static int is_sqp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) |
116 | { | |
47605df9 JM |
117 | int proxy_sqp = 0; |
118 | int real_sqp = 0; | |
119 | int i; | |
120 | /* PPF or Native -- real SQP */ | |
121 | real_sqp = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && | |
122 | qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && | |
123 | qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); | |
124 | if (real_sqp) | |
125 | return 1; | |
126 | /* VF or PF -- proxy SQP */ | |
127 | if (mlx4_is_mfunc(dev->dev)) { | |
128 | for (i = 0; i < dev->dev->caps.num_ports; i++) { | |
c73c8b1e EBE |
129 | if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || |
130 | qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { | |
47605df9 JM |
131 | proxy_sqp = 1; |
132 | break; | |
133 | } | |
134 | } | |
135 | } | |
e1b866c6 MS |
136 | if (proxy_sqp) |
137 | return 1; | |
138 | ||
139 | return !!(qp->flags & MLX4_IB_ROCE_V2_GSI_QP); | |
225c7b1f RD |
140 | } |
141 | ||
1ffeb2eb | 142 | /* used for INIT/CLOSE port logic */ |
225c7b1f RD |
143 | static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) |
144 | { | |
47605df9 JM |
145 | int proxy_qp0 = 0; |
146 | int real_qp0 = 0; | |
147 | int i; | |
148 | /* PPF or Native -- real QP0 */ | |
149 | real_qp0 = ((mlx4_is_master(dev->dev) || !mlx4_is_mfunc(dev->dev)) && | |
150 | qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && | |
151 | qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); | |
152 | if (real_qp0) | |
153 | return 1; | |
154 | /* VF or PF -- proxy QP0 */ | |
155 | if (mlx4_is_mfunc(dev->dev)) { | |
156 | for (i = 0; i < dev->dev->caps.num_ports; i++) { | |
c73c8b1e | 157 | if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { |
47605df9 JM |
158 | proxy_qp0 = 1; |
159 | break; | |
160 | } | |
161 | } | |
162 | } | |
163 | return proxy_qp0; | |
225c7b1f RD |
164 | } |
165 | ||
166 | static void *get_wqe(struct mlx4_ib_qp *qp, int offset) | |
167 | { | |
1c69fc2a | 168 | return mlx4_buf_offset(&qp->buf, offset); |
225c7b1f RD |
169 | } |
170 | ||
171 | static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) | |
172 | { | |
173 | return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); | |
174 | } | |
175 | ||
176 | static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) | |
177 | { | |
178 | return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); | |
179 | } | |
180 | ||
0e6e7416 RD |
181 | /* |
182 | * Stamp a SQ WQE so that it is invalid if prefetched by marking the | |
f95ccffc JM |
183 | * first four bytes of every 64 byte chunk with 0xffffffff, except for |
184 | * the very first chunk of the WQE. | |
0e6e7416 | 185 | */ |
f95ccffc | 186 | static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) |
0e6e7416 | 187 | { |
d2ae16d5 | 188 | __be32 *wqe; |
0e6e7416 | 189 | int i; |
ea54b10c | 190 | int s; |
ea54b10c | 191 | void *buf; |
ea54b10c | 192 | struct mlx4_wqe_ctrl_seg *ctrl; |
ea54b10c | 193 | |
f95ccffc JM |
194 | buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); |
195 | ctrl = (struct mlx4_wqe_ctrl_seg *)buf; | |
196 | s = (ctrl->qpn_vlan.fence_size & 0x3f) << 4; | |
197 | for (i = 64; i < s; i += 64) { | |
198 | wqe = buf + i; | |
199 | *wqe = cpu_to_be32(0xffffffff); | |
ea54b10c | 200 | } |
0e6e7416 RD |
201 | } |
202 | ||
225c7b1f RD |
203 | static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) |
204 | { | |
205 | struct ib_event event; | |
206 | struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; | |
207 | ||
208 | if (type == MLX4_EVENT_TYPE_PATH_MIG) | |
209 | to_mibqp(qp)->port = to_mibqp(qp)->alt_port; | |
210 | ||
211 | if (ibqp->event_handler) { | |
212 | event.device = ibqp->device; | |
213 | event.element.qp = ibqp; | |
214 | switch (type) { | |
215 | case MLX4_EVENT_TYPE_PATH_MIG: | |
216 | event.event = IB_EVENT_PATH_MIG; | |
217 | break; | |
218 | case MLX4_EVENT_TYPE_COMM_EST: | |
219 | event.event = IB_EVENT_COMM_EST; | |
220 | break; | |
221 | case MLX4_EVENT_TYPE_SQ_DRAINED: | |
222 | event.event = IB_EVENT_SQ_DRAINED; | |
223 | break; | |
224 | case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: | |
225 | event.event = IB_EVENT_QP_LAST_WQE_REACHED; | |
226 | break; | |
227 | case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: | |
228 | event.event = IB_EVENT_QP_FATAL; | |
229 | break; | |
230 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: | |
231 | event.event = IB_EVENT_PATH_MIG_ERR; | |
232 | break; | |
233 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
234 | event.event = IB_EVENT_QP_REQ_ERR; | |
235 | break; | |
236 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: | |
237 | event.event = IB_EVENT_QP_ACCESS_ERR; | |
238 | break; | |
239 | default: | |
987c8f8f | 240 | pr_warn("Unexpected event type %d " |
225c7b1f RD |
241 | "on QP %06x\n", type, qp->qpn); |
242 | return; | |
243 | } | |
244 | ||
245 | ibqp->event_handler(&event, ibqp->qp_context); | |
246 | } | |
247 | } | |
248 | ||
400b1ebc GL |
249 | static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type) |
250 | { | |
251 | pr_warn_ratelimited("Unexpected event type %d on WQ 0x%06x. Events are not supported for WQs\n", | |
252 | type, qp->qpn); | |
253 | } | |
254 | ||
1ffeb2eb | 255 | static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) |
225c7b1f RD |
256 | { |
257 | /* | |
258 | * UD WQEs must have a datagram segment. | |
259 | * RC and UC WQEs might have a remote address segment. | |
260 | * MLX WQEs need two extra inline data segments (for the UD | |
261 | * header and space for the ICRC). | |
262 | */ | |
263 | switch (type) { | |
1ffeb2eb | 264 | case MLX4_IB_QPT_UD: |
225c7b1f | 265 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
b832be1e | 266 | sizeof (struct mlx4_wqe_datagram_seg) + |
417608c2 | 267 | ((flags & MLX4_IB_QP_LSO) ? MLX4_IB_LSO_HEADER_SPARE : 0); |
1ffeb2eb JM |
268 | case MLX4_IB_QPT_PROXY_SMI_OWNER: |
269 | case MLX4_IB_QPT_PROXY_SMI: | |
270 | case MLX4_IB_QPT_PROXY_GSI: | |
271 | return sizeof (struct mlx4_wqe_ctrl_seg) + | |
272 | sizeof (struct mlx4_wqe_datagram_seg) + 64; | |
273 | case MLX4_IB_QPT_TUN_SMI_OWNER: | |
274 | case MLX4_IB_QPT_TUN_GSI: | |
275 | return sizeof (struct mlx4_wqe_ctrl_seg) + | |
276 | sizeof (struct mlx4_wqe_datagram_seg); | |
277 | ||
278 | case MLX4_IB_QPT_UC: | |
225c7b1f RD |
279 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
280 | sizeof (struct mlx4_wqe_raddr_seg); | |
1ffeb2eb | 281 | case MLX4_IB_QPT_RC: |
225c7b1f | 282 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
f2940e2c | 283 | sizeof (struct mlx4_wqe_masked_atomic_seg) + |
225c7b1f | 284 | sizeof (struct mlx4_wqe_raddr_seg); |
1ffeb2eb JM |
285 | case MLX4_IB_QPT_SMI: |
286 | case MLX4_IB_QPT_GSI: | |
225c7b1f RD |
287 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
288 | ALIGN(MLX4_IB_UD_HEADER_SIZE + | |
e61ef241 RD |
289 | DIV_ROUND_UP(MLX4_IB_UD_HEADER_SIZE, |
290 | MLX4_INLINE_ALIGN) * | |
225c7b1f RD |
291 | sizeof (struct mlx4_wqe_inline_seg), |
292 | sizeof (struct mlx4_wqe_data_seg)) + | |
293 | ALIGN(4 + | |
294 | sizeof (struct mlx4_wqe_inline_seg), | |
295 | sizeof (struct mlx4_wqe_data_seg)); | |
296 | default: | |
297 | return sizeof (struct mlx4_wqe_ctrl_seg); | |
298 | } | |
299 | } | |
300 | ||
2446304d | 301 | static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, |
913df8c3 | 302 | bool is_user, bool has_rq, struct mlx4_ib_qp *qp, |
ea30b966 | 303 | u32 inl_recv_sz) |
225c7b1f | 304 | { |
2446304d | 305 | /* Sanity check RQ size before proceeding */ |
fc2d0044 SG |
306 | if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE || |
307 | cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg)) | |
2446304d EC |
308 | return -EINVAL; |
309 | ||
0a1405da | 310 | if (!has_rq) { |
ea30b966 | 311 | if (cap->max_recv_wr || inl_recv_sz) |
a4cd7ed8 | 312 | return -EINVAL; |
2446304d | 313 | |
0e6e7416 | 314 | qp->rq.wqe_cnt = qp->rq.max_gs = 0; |
a4cd7ed8 | 315 | } else { |
ea30b966 MG |
316 | u32 max_inl_recv_sz = dev->dev->caps.max_rq_sg * |
317 | sizeof(struct mlx4_wqe_data_seg); | |
318 | u32 wqe_size; | |
319 | ||
a4cd7ed8 | 320 | /* HW requires >= 1 RQ entry with >= 1 gather entry */ |
ea30b966 MG |
321 | if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge || |
322 | inl_recv_sz > max_inl_recv_sz)) | |
a4cd7ed8 RD |
323 | return -EINVAL; |
324 | ||
0e6e7416 | 325 | qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); |
42c059ea | 326 | qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); |
ea30b966 MG |
327 | wqe_size = qp->rq.max_gs * sizeof(struct mlx4_wqe_data_seg); |
328 | qp->rq.wqe_shift = ilog2(max_t(u32, wqe_size, inl_recv_sz)); | |
a4cd7ed8 | 329 | } |
2446304d | 330 | |
fc2d0044 SG |
331 | /* leave userspace return values as they were, so as not to break ABI */ |
332 | if (is_user) { | |
333 | cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; | |
334 | cap->max_recv_sge = qp->rq.max_gs; | |
335 | } else { | |
336 | cap->max_recv_wr = qp->rq.max_post = | |
337 | min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); | |
338 | cap->max_recv_sge = min(qp->rq.max_gs, | |
339 | min(dev->dev->caps.max_sq_sg, | |
340 | dev->dev->caps.max_rq_sg)); | |
341 | } | |
2446304d EC |
342 | |
343 | return 0; | |
344 | } | |
345 | ||
346 | static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |
f95ccffc | 347 | enum mlx4_ib_qp_type type, struct mlx4_ib_qp *qp) |
2446304d | 348 | { |
ea54b10c JM |
349 | int s; |
350 | ||
2446304d | 351 | /* Sanity check SQ size before proceeding */ |
fc2d0044 SG |
352 | if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) || |
353 | cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) || | |
b832be1e | 354 | cap->max_inline_data + send_wqe_overhead(type, qp->flags) + |
225c7b1f RD |
355 | sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) |
356 | return -EINVAL; | |
357 | ||
358 | /* | |
359 | * For MLX transport we need 2 extra S/G entries: | |
360 | * one for the header and one for the checksum at the end | |
361 | */ | |
1ffeb2eb JM |
362 | if ((type == MLX4_IB_QPT_SMI || type == MLX4_IB_QPT_GSI || |
363 | type & (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) && | |
225c7b1f RD |
364 | cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) |
365 | return -EINVAL; | |
366 | ||
ea54b10c JM |
367 | s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), |
368 | cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + | |
b832be1e | 369 | send_wqe_overhead(type, qp->flags); |
225c7b1f | 370 | |
cd155c1c RD |
371 | if (s > dev->dev->caps.max_sq_desc_sz) |
372 | return -EINVAL; | |
373 | ||
f95ccffc JM |
374 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); |
375 | ||
0e6e7416 | 376 | /* |
f95ccffc JM |
377 | * We need to leave 2 KB + 1 WR of headroom in the SQ to |
378 | * allow HW to prefetch. | |
0e6e7416 | 379 | */ |
350b4c8a | 380 | qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); |
f95ccffc JM |
381 | qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + |
382 | qp->sq_spare_wqes); | |
383 | ||
384 | qp->sq.max_gs = | |
385 | (min(dev->dev->caps.max_sq_desc_sz, | |
386 | (1 << qp->sq.wqe_shift)) - | |
387 | send_wqe_overhead(type, qp->flags)) / | |
b832be1e | 388 | sizeof (struct mlx4_wqe_data_seg); |
0e6e7416 RD |
389 | |
390 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + | |
391 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); | |
225c7b1f RD |
392 | if (qp->rq.wqe_shift > qp->sq.wqe_shift) { |
393 | qp->rq.offset = 0; | |
0e6e7416 | 394 | qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; |
225c7b1f | 395 | } else { |
0e6e7416 | 396 | qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; |
225c7b1f RD |
397 | qp->sq.offset = 0; |
398 | } | |
399 | ||
ea54b10c | 400 | cap->max_send_wr = qp->sq.max_post = |
f95ccffc | 401 | qp->sq.wqe_cnt - qp->sq_spare_wqes; |
cd155c1c RD |
402 | cap->max_send_sge = min(qp->sq.max_gs, |
403 | min(dev->dev->caps.max_sq_sg, | |
404 | dev->dev->caps.max_rq_sg)); | |
54e95f8d RD |
405 | /* We don't support inline sends for kernel QPs (yet) */ |
406 | cap->max_inline_data = 0; | |
225c7b1f RD |
407 | |
408 | return 0; | |
409 | } | |
410 | ||
83904132 JM |
411 | static int set_user_sq_size(struct mlx4_ib_dev *dev, |
412 | struct mlx4_ib_qp *qp, | |
2446304d EC |
413 | struct mlx4_ib_create_qp *ucmd) |
414 | { | |
83904132 JM |
415 | /* Sanity check SQ size before proceeding */ |
416 | if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes || | |
417 | ucmd->log_sq_stride > | |
418 | ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) || | |
419 | ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE) | |
420 | return -EINVAL; | |
421 | ||
0e6e7416 | 422 | qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; |
2446304d EC |
423 | qp->sq.wqe_shift = ucmd->log_sq_stride; |
424 | ||
0e6e7416 RD |
425 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + |
426 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); | |
2446304d EC |
427 | |
428 | return 0; | |
429 | } | |
430 | ||
1ffeb2eb JM |
431 | static int alloc_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) |
432 | { | |
433 | int i; | |
434 | ||
435 | qp->sqp_proxy_rcv = | |
6da2ec56 KC |
436 | kmalloc_array(qp->rq.wqe_cnt, sizeof(struct mlx4_ib_buf), |
437 | GFP_KERNEL); | |
1ffeb2eb JM |
438 | if (!qp->sqp_proxy_rcv) |
439 | return -ENOMEM; | |
440 | for (i = 0; i < qp->rq.wqe_cnt; i++) { | |
441 | qp->sqp_proxy_rcv[i].addr = | |
442 | kmalloc(sizeof (struct mlx4_ib_proxy_sqp_hdr), | |
443 | GFP_KERNEL); | |
444 | if (!qp->sqp_proxy_rcv[i].addr) | |
445 | goto err; | |
446 | qp->sqp_proxy_rcv[i].map = | |
447 | ib_dma_map_single(dev, qp->sqp_proxy_rcv[i].addr, | |
448 | sizeof (struct mlx4_ib_proxy_sqp_hdr), | |
449 | DMA_FROM_DEVICE); | |
cc47d369 SO |
450 | if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { |
451 | kfree(qp->sqp_proxy_rcv[i].addr); | |
452 | goto err; | |
453 | } | |
1ffeb2eb JM |
454 | } |
455 | return 0; | |
456 | ||
457 | err: | |
458 | while (i > 0) { | |
459 | --i; | |
460 | ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, | |
461 | sizeof (struct mlx4_ib_proxy_sqp_hdr), | |
462 | DMA_FROM_DEVICE); | |
463 | kfree(qp->sqp_proxy_rcv[i].addr); | |
464 | } | |
465 | kfree(qp->sqp_proxy_rcv); | |
466 | qp->sqp_proxy_rcv = NULL; | |
467 | return -ENOMEM; | |
468 | } | |
469 | ||
470 | static void free_proxy_bufs(struct ib_device *dev, struct mlx4_ib_qp *qp) | |
471 | { | |
472 | int i; | |
473 | ||
474 | for (i = 0; i < qp->rq.wqe_cnt; i++) { | |
475 | ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, | |
476 | sizeof (struct mlx4_ib_proxy_sqp_hdr), | |
477 | DMA_FROM_DEVICE); | |
478 | kfree(qp->sqp_proxy_rcv[i].addr); | |
479 | } | |
480 | kfree(qp->sqp_proxy_rcv); | |
481 | } | |
482 | ||
913df8c3 | 483 | static bool qp_has_rq(struct ib_qp_init_attr *attr) |
0a1405da SH |
484 | { |
485 | if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT) | |
913df8c3 | 486 | return false; |
0a1405da SH |
487 | |
488 | return !attr->srq; | |
489 | } | |
490 | ||
99ec41d0 JM |
491 | static int qp0_enabled_vf(struct mlx4_dev *dev, int qpn) |
492 | { | |
493 | int i; | |
494 | for (i = 0; i < dev->caps.num_ports; i++) { | |
c73c8b1e EBE |
495 | if (qpn == dev->caps.spec_qps[i].qp0_proxy) |
496 | return !!dev->caps.spec_qps[i].qp0_qkey; | |
99ec41d0 JM |
497 | } |
498 | return 0; | |
499 | } | |
500 | ||
7b59f0f9 EBE |
501 | static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, |
502 | struct mlx4_ib_qp *qp) | |
503 | { | |
504 | mutex_lock(&dev->counters_table[qp->port - 1].mutex); | |
505 | mlx4_counter_free(dev->dev, qp->counter_index->index); | |
506 | list_del(&qp->counter_index->list); | |
507 | mutex_unlock(&dev->counters_table[qp->port - 1].mutex); | |
508 | ||
509 | kfree(qp->counter_index); | |
510 | qp->counter_index = NULL; | |
511 | } | |
512 | ||
3078f5f1 GL |
513 | static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, |
514 | struct ib_qp_init_attr *init_attr, | |
515 | struct mlx4_ib_create_qp_rss *ucmd) | |
516 | { | |
517 | rss_ctx->base_qpn_tbl_sz = init_attr->rwq_ind_tbl->ind_tbl[0]->wq_num | | |
518 | (init_attr->rwq_ind_tbl->log_ind_tbl_size << 24); | |
519 | ||
520 | if ((ucmd->rx_hash_function == MLX4_IB_RX_HASH_FUNC_TOEPLITZ) && | |
521 | (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) { | |
522 | memcpy(rss_ctx->rss_key, ucmd->rx_hash_key, | |
523 | MLX4_EN_RSS_KEY_SIZE); | |
524 | } else { | |
525 | pr_debug("RX Hash function is not supported\n"); | |
526 | return (-EOPNOTSUPP); | |
527 | } | |
528 | ||
4d02ebd9 GL |
529 | if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | |
530 | MLX4_IB_RX_HASH_DST_IPV4 | | |
531 | MLX4_IB_RX_HASH_SRC_IPV6 | | |
532 | MLX4_IB_RX_HASH_DST_IPV6 | | |
533 | MLX4_IB_RX_HASH_SRC_PORT_TCP | | |
534 | MLX4_IB_RX_HASH_DST_PORT_TCP | | |
535 | MLX4_IB_RX_HASH_SRC_PORT_UDP | | |
4f9ca2d8 LR |
536 | MLX4_IB_RX_HASH_DST_PORT_UDP | |
537 | MLX4_IB_RX_HASH_INNER)) { | |
4d02ebd9 GL |
538 | pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", |
539 | ucmd->rx_hash_fields_mask); | |
540 | return (-EOPNOTSUPP); | |
541 | } | |
542 | ||
3078f5f1 GL |
543 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && |
544 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { | |
545 | rss_ctx->flags = MLX4_RSS_IPV4; | |
546 | } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) || | |
547 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { | |
548 | pr_debug("RX Hash fields_mask is not supported - both IPv4 SRC and DST must be set\n"); | |
549 | return (-EOPNOTSUPP); | |
550 | } | |
551 | ||
552 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) && | |
553 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) { | |
554 | rss_ctx->flags |= MLX4_RSS_IPV6; | |
555 | } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV6) || | |
556 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV6)) { | |
557 | pr_debug("RX Hash fields_mask is not supported - both IPv6 SRC and DST must be set\n"); | |
558 | return (-EOPNOTSUPP); | |
559 | } | |
560 | ||
561 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) && | |
562 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) { | |
563 | if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UDP_RSS)) { | |
564 | pr_debug("RX Hash fields_mask for UDP is not supported\n"); | |
565 | return (-EOPNOTSUPP); | |
566 | } | |
567 | ||
4d02ebd9 | 568 | if (rss_ctx->flags & MLX4_RSS_IPV4) |
3078f5f1 | 569 | rss_ctx->flags |= MLX4_RSS_UDP_IPV4; |
4d02ebd9 | 570 | if (rss_ctx->flags & MLX4_RSS_IPV6) |
3078f5f1 | 571 | rss_ctx->flags |= MLX4_RSS_UDP_IPV6; |
4d02ebd9 | 572 | if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { |
3078f5f1 GL |
573 | pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); |
574 | return (-EOPNOTSUPP); | |
575 | } | |
576 | } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_UDP) || | |
577 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_UDP)) { | |
578 | pr_debug("RX Hash fields_mask is not supported - both UDP SRC and DST must be set\n"); | |
579 | return (-EOPNOTSUPP); | |
580 | } | |
581 | ||
582 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && | |
583 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { | |
4d02ebd9 | 584 | if (rss_ctx->flags & MLX4_RSS_IPV4) |
3078f5f1 | 585 | rss_ctx->flags |= MLX4_RSS_TCP_IPV4; |
4d02ebd9 | 586 | if (rss_ctx->flags & MLX4_RSS_IPV6) |
3078f5f1 | 587 | rss_ctx->flags |= MLX4_RSS_TCP_IPV6; |
4d02ebd9 | 588 | if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { |
3078f5f1 GL |
589 | pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); |
590 | return (-EOPNOTSUPP); | |
591 | } | |
3078f5f1 GL |
592 | } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || |
593 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { | |
594 | pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); | |
595 | return (-EOPNOTSUPP); | |
596 | } | |
597 | ||
07d84f7b GL |
598 | if (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_INNER) { |
599 | if (dev->dev->caps.tunnel_offload_mode == | |
600 | MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | |
601 | /* | |
602 | * Hash according to inner headers if exist, otherwise | |
603 | * according to outer headers. | |
604 | */ | |
605 | rss_ctx->flags |= MLX4_RSS_BY_INNER_HEADERS_IPONLY; | |
606 | } else { | |
607 | pr_debug("RSS Hash for inner headers isn't supported\n"); | |
608 | return (-EOPNOTSUPP); | |
609 | } | |
610 | } | |
611 | ||
3078f5f1 GL |
612 | return 0; |
613 | } | |
614 | ||
d7c0557a | 615 | static int create_qp_rss(struct mlx4_ib_dev *dev, |
3078f5f1 GL |
616 | struct ib_qp_init_attr *init_attr, |
617 | struct mlx4_ib_create_qp_rss *ucmd, | |
618 | struct mlx4_ib_qp *qp) | |
619 | { | |
620 | int qpn; | |
621 | int err; | |
622 | ||
623 | qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; | |
624 | ||
625 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); | |
626 | if (err) | |
627 | return err; | |
628 | ||
629 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); | |
630 | if (err) | |
631 | goto err_qpn; | |
632 | ||
3078f5f1 GL |
633 | INIT_LIST_HEAD(&qp->gid_list); |
634 | INIT_LIST_HEAD(&qp->steering_rules); | |
635 | ||
c3f1ee29 | 636 | qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; |
3078f5f1 GL |
637 | qp->state = IB_QPS_RESET; |
638 | ||
639 | /* Set dummy send resources to be compatible with HV and PRM */ | |
640 | qp->sq_no_prefetch = 1; | |
641 | qp->sq.wqe_cnt = 1; | |
642 | qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; | |
643 | qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; | |
644 | qp->mtt = (to_mqp( | |
645 | (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt; | |
646 | ||
647 | qp->rss_ctx = kzalloc(sizeof(*qp->rss_ctx), GFP_KERNEL); | |
648 | if (!qp->rss_ctx) { | |
649 | err = -ENOMEM; | |
650 | goto err_qp_alloc; | |
651 | } | |
652 | ||
653 | err = set_qp_rss(dev, qp->rss_ctx, init_attr, ucmd); | |
654 | if (err) | |
655 | goto err; | |
656 | ||
657 | return 0; | |
658 | ||
659 | err: | |
660 | kfree(qp->rss_ctx); | |
661 | ||
662 | err_qp_alloc: | |
663 | mlx4_qp_remove(dev->dev, &qp->mqp); | |
664 | mlx4_qp_free(dev->dev, &qp->mqp); | |
665 | ||
666 | err_qpn: | |
667 | mlx4_qp_release_range(dev->dev, qpn, 1); | |
668 | return err; | |
669 | } | |
670 | ||
8fd3cd2a LR |
671 | static int _mlx4_ib_create_qp_rss(struct ib_pd *pd, struct mlx4_ib_qp *qp, |
672 | struct ib_qp_init_attr *init_attr, | |
673 | struct ib_udata *udata) | |
3078f5f1 | 674 | { |
3078f5f1 GL |
675 | struct mlx4_ib_create_qp_rss ucmd = {}; |
676 | size_t required_cmd_sz; | |
677 | int err; | |
678 | ||
679 | if (!udata) { | |
680 | pr_debug("RSS QP with NULL udata\n"); | |
8fd3cd2a | 681 | return -EINVAL; |
3078f5f1 GL |
682 | } |
683 | ||
684 | if (udata->outlen) | |
8fd3cd2a | 685 | return -EOPNOTSUPP; |
3078f5f1 GL |
686 | |
687 | required_cmd_sz = offsetof(typeof(ucmd), reserved1) + | |
688 | sizeof(ucmd.reserved1); | |
689 | if (udata->inlen < required_cmd_sz) { | |
690 | pr_debug("invalid inlen\n"); | |
8fd3cd2a | 691 | return -EINVAL; |
3078f5f1 GL |
692 | } |
693 | ||
694 | if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) { | |
695 | pr_debug("copy failed\n"); | |
8fd3cd2a | 696 | return -EFAULT; |
3078f5f1 GL |
697 | } |
698 | ||
f9bfea99 | 699 | if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved))) |
8fd3cd2a | 700 | return -EOPNOTSUPP; |
f9bfea99 | 701 | |
3078f5f1 | 702 | if (ucmd.comp_mask || ucmd.reserved1) |
8fd3cd2a | 703 | return -EOPNOTSUPP; |
3078f5f1 GL |
704 | |
705 | if (udata->inlen > sizeof(ucmd) && | |
706 | !ib_is_udata_cleared(udata, sizeof(ucmd), | |
707 | udata->inlen - sizeof(ucmd))) { | |
708 | pr_debug("inlen is not supported\n"); | |
8fd3cd2a | 709 | return -EOPNOTSUPP; |
3078f5f1 GL |
710 | } |
711 | ||
712 | if (init_attr->qp_type != IB_QPT_RAW_PACKET) { | |
713 | pr_debug("RSS QP with unsupported QP type %d\n", | |
714 | init_attr->qp_type); | |
8fd3cd2a | 715 | return -EOPNOTSUPP; |
3078f5f1 GL |
716 | } |
717 | ||
718 | if (init_attr->create_flags) { | |
719 | pr_debug("RSS QP doesn't support create flags\n"); | |
8fd3cd2a | 720 | return -EOPNOTSUPP; |
3078f5f1 GL |
721 | } |
722 | ||
723 | if (init_attr->send_cq || init_attr->cap.max_send_wr) { | |
724 | pr_debug("RSS QP with unsupported send attributes\n"); | |
8fd3cd2a | 725 | return -EOPNOTSUPP; |
3078f5f1 GL |
726 | } |
727 | ||
3078f5f1 GL |
728 | qp->pri.vid = 0xFFFF; |
729 | qp->alt.vid = 0xFFFF; | |
730 | ||
d7c0557a | 731 | err = create_qp_rss(to_mdev(pd->device), init_attr, &ucmd, qp); |
8fd3cd2a LR |
732 | if (err) |
733 | return err; | |
3078f5f1 GL |
734 | |
735 | qp->ibqp.qp_num = qp->mqp.qpn; | |
8fd3cd2a | 736 | return 0; |
3078f5f1 GL |
737 | } |
738 | ||
400b1ebc GL |
739 | /* |
740 | * This function allocates a WQN from a range which is consecutive and aligned | |
741 | * to its size. In case the range is full, then it creates a new range and | |
742 | * allocates WQN from it. The new range will be used for following allocations. | |
743 | */ | |
744 | static int mlx4_ib_alloc_wqn(struct mlx4_ib_ucontext *context, | |
745 | struct mlx4_ib_qp *qp, int range_size, int *wqn) | |
746 | { | |
747 | struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); | |
748 | struct mlx4_wqn_range *range; | |
749 | int err = 0; | |
750 | ||
751 | mutex_lock(&context->wqn_ranges_mutex); | |
752 | ||
753 | range = list_first_entry_or_null(&context->wqn_ranges_list, | |
754 | struct mlx4_wqn_range, list); | |
755 | ||
756 | if (!range || (range->refcount == range->size) || range->dirty) { | |
757 | range = kzalloc(sizeof(*range), GFP_KERNEL); | |
758 | if (!range) { | |
759 | err = -ENOMEM; | |
760 | goto out; | |
761 | } | |
762 | ||
763 | err = mlx4_qp_reserve_range(dev->dev, range_size, | |
764 | range_size, &range->base_wqn, 0, | |
765 | qp->mqp.usage); | |
766 | if (err) { | |
767 | kfree(range); | |
768 | goto out; | |
769 | } | |
770 | ||
771 | range->size = range_size; | |
772 | list_add(&range->list, &context->wqn_ranges_list); | |
773 | } else if (range_size != 1) { | |
774 | /* | |
775 | * Requesting a new range (>1) when last range is still open, is | |
776 | * not valid. | |
777 | */ | |
778 | err = -EINVAL; | |
779 | goto out; | |
780 | } | |
781 | ||
782 | qp->wqn_range = range; | |
783 | ||
784 | *wqn = range->base_wqn + range->refcount; | |
785 | ||
786 | range->refcount++; | |
787 | ||
788 | out: | |
789 | mutex_unlock(&context->wqn_ranges_mutex); | |
790 | ||
791 | return err; | |
792 | } | |
793 | ||
794 | static void mlx4_ib_release_wqn(struct mlx4_ib_ucontext *context, | |
795 | struct mlx4_ib_qp *qp, bool dirty_release) | |
796 | { | |
797 | struct mlx4_ib_dev *dev = to_mdev(context->ibucontext.device); | |
798 | struct mlx4_wqn_range *range; | |
799 | ||
800 | mutex_lock(&context->wqn_ranges_mutex); | |
801 | ||
802 | range = qp->wqn_range; | |
803 | ||
804 | range->refcount--; | |
805 | if (!range->refcount) { | |
806 | mlx4_qp_release_range(dev->dev, range->base_wqn, | |
807 | range->size); | |
808 | list_del(&range->list); | |
809 | kfree(range); | |
810 | } else if (dirty_release) { | |
811 | /* | |
812 | * A range which one of its WQNs is destroyed, won't be able to be | |
813 | * reused for further WQN allocations. | |
814 | * The next created WQ will allocate a new range. | |
815 | */ | |
cf368beb | 816 | range->dirty = true; |
400b1ebc GL |
817 | } |
818 | ||
819 | mutex_unlock(&context->wqn_ranges_mutex); | |
820 | } | |
821 | ||
089b645d LR |
822 | static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, |
823 | struct ib_udata *udata, struct mlx4_ib_qp *qp) | |
824 | { | |
825 | struct mlx4_ib_dev *dev = to_mdev(pd->device); | |
826 | int qpn; | |
827 | int err; | |
828 | struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( | |
829 | udata, struct mlx4_ib_ucontext, ibucontext); | |
830 | struct mlx4_ib_cq *mcq; | |
831 | unsigned long flags; | |
832 | int range_size; | |
833 | struct mlx4_ib_create_wq wq; | |
834 | size_t copy_len; | |
835 | int shift; | |
836 | int n; | |
837 | ||
838 | qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; | |
839 | ||
089b645d LR |
840 | spin_lock_init(&qp->sq.lock); |
841 | spin_lock_init(&qp->rq.lock); | |
842 | INIT_LIST_HEAD(&qp->gid_list); | |
843 | INIT_LIST_HEAD(&qp->steering_rules); | |
844 | ||
845 | qp->state = IB_QPS_RESET; | |
846 | ||
847 | copy_len = min(sizeof(struct mlx4_ib_create_wq), udata->inlen); | |
848 | ||
849 | if (ib_copy_from_udata(&wq, udata, copy_len)) { | |
850 | err = -EFAULT; | |
851 | goto err; | |
852 | } | |
853 | ||
854 | if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] || | |
855 | wq.reserved[2]) { | |
856 | pr_debug("user command isn't supported\n"); | |
857 | err = -EOPNOTSUPP; | |
858 | goto err; | |
859 | } | |
860 | ||
861 | if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) { | |
862 | pr_debug("WQN range size must be equal or smaller than %d\n", | |
863 | dev->dev->caps.max_rss_tbl_sz); | |
864 | err = -EOPNOTSUPP; | |
865 | goto err; | |
866 | } | |
867 | range_size = 1 << wq.log_range_size; | |
868 | ||
869 | if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) | |
870 | qp->flags |= MLX4_IB_QP_SCATTER_FCS; | |
871 | ||
913df8c3 | 872 | err = set_rq_size(dev, &init_attr->cap, true, true, qp, qp->inl_recv_sz); |
089b645d LR |
873 | if (err) |
874 | goto err; | |
875 | ||
876 | qp->sq_no_prefetch = 1; | |
877 | qp->sq.wqe_cnt = 1; | |
878 | qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; | |
879 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + | |
880 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); | |
881 | ||
c320e527 | 882 | qp->umem = ib_umem_get(pd->device, wq.buf_addr, qp->buf_size, 0); |
089b645d LR |
883 | if (IS_ERR(qp->umem)) { |
884 | err = PTR_ERR(qp->umem); | |
885 | goto err; | |
886 | } | |
887 | ||
089b645d LR |
888 | shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); |
889 | err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); | |
890 | ||
891 | if (err) | |
892 | goto err_buf; | |
893 | ||
894 | err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); | |
895 | if (err) | |
896 | goto err_mtt; | |
897 | ||
898 | err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); | |
899 | if (err) | |
900 | goto err_mtt; | |
901 | qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; | |
902 | ||
903 | err = mlx4_ib_alloc_wqn(context, qp, range_size, &qpn); | |
904 | if (err) | |
905 | goto err_wrid; | |
906 | ||
907 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); | |
908 | if (err) | |
909 | goto err_qpn; | |
910 | ||
911 | /* | |
912 | * Hardware wants QPN written in big-endian order (after | |
913 | * shifting) for send doorbell. Precompute this value to save | |
914 | * a little bit when posting sends. | |
915 | */ | |
916 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); | |
917 | ||
918 | qp->mqp.event = mlx4_ib_wq_event; | |
919 | ||
920 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); | |
921 | mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), | |
922 | to_mcq(init_attr->recv_cq)); | |
923 | /* Maintain device to QPs access, needed for further handling | |
924 | * via reset flow | |
925 | */ | |
926 | list_add_tail(&qp->qps_list, &dev->qp_list); | |
927 | /* Maintain CQ to QPs access, needed for further handling | |
928 | * via reset flow | |
929 | */ | |
930 | mcq = to_mcq(init_attr->send_cq); | |
931 | list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); | |
932 | mcq = to_mcq(init_attr->recv_cq); | |
933 | list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); | |
934 | mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), | |
935 | to_mcq(init_attr->recv_cq)); | |
936 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); | |
937 | return 0; | |
938 | ||
939 | err_qpn: | |
940 | mlx4_ib_release_wqn(context, qp, 0); | |
941 | err_wrid: | |
942 | mlx4_ib_db_unmap_user(context, &qp->db); | |
943 | ||
944 | err_mtt: | |
945 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | |
946 | err_buf: | |
947 | ib_umem_release(qp->umem); | |
948 | err: | |
949 | return err; | |
950 | } | |
951 | ||
952 | static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, | |
8900b894 | 953 | struct ib_udata *udata, int sqpn, |
8fd3cd2a | 954 | struct mlx4_ib_qp *qp) |
225c7b1f | 955 | { |
089b645d | 956 | struct mlx4_ib_dev *dev = to_mdev(pd->device); |
a3cdcbfa | 957 | int qpn; |
225c7b1f | 958 | int err; |
89944450 SR |
959 | struct mlx4_ib_ucontext *context = rdma_udata_to_drv_context( |
960 | udata, struct mlx4_ib_ucontext, ibucontext); | |
1ffeb2eb | 961 | enum mlx4_ib_qp_type qp_type = (enum mlx4_ib_qp_type) init_attr->qp_type; |
35f05dab YH |
962 | struct mlx4_ib_cq *mcq; |
963 | unsigned long flags; | |
1ffeb2eb JM |
964 | |
965 | /* When tunneling special qps, we use a plain UD qp */ | |
966 | if (sqpn) { | |
967 | if (mlx4_is_mfunc(dev->dev) && | |
968 | (!mlx4_is_master(dev->dev) || | |
969 | !(init_attr->create_flags & MLX4_IB_SRIOV_SQP))) { | |
970 | if (init_attr->qp_type == IB_QPT_GSI) | |
971 | qp_type = MLX4_IB_QPT_PROXY_GSI; | |
99ec41d0 JM |
972 | else { |
973 | if (mlx4_is_master(dev->dev) || | |
974 | qp0_enabled_vf(dev->dev, sqpn)) | |
975 | qp_type = MLX4_IB_QPT_PROXY_SMI_OWNER; | |
976 | else | |
977 | qp_type = MLX4_IB_QPT_PROXY_SMI; | |
978 | } | |
1ffeb2eb JM |
979 | } |
980 | qpn = sqpn; | |
981 | /* add extra sg entry for tunneling */ | |
982 | init_attr->cap.max_recv_sge++; | |
983 | } else if (init_attr->create_flags & MLX4_IB_SRIOV_TUNNEL_QP) { | |
984 | struct mlx4_ib_qp_tunnel_init_attr *tnl_init = | |
985 | container_of(init_attr, | |
986 | struct mlx4_ib_qp_tunnel_init_attr, init_attr); | |
987 | if ((tnl_init->proxy_qp_type != IB_QPT_SMI && | |
988 | tnl_init->proxy_qp_type != IB_QPT_GSI) || | |
989 | !mlx4_is_master(dev->dev)) | |
990 | return -EINVAL; | |
991 | if (tnl_init->proxy_qp_type == IB_QPT_GSI) | |
992 | qp_type = MLX4_IB_QPT_TUN_GSI; | |
99ec41d0 JM |
993 | else if (tnl_init->slave == mlx4_master_func_num(dev->dev) || |
994 | mlx4_vf_smi_enabled(dev->dev, tnl_init->slave, | |
995 | tnl_init->port)) | |
1ffeb2eb JM |
996 | qp_type = MLX4_IB_QPT_TUN_SMI_OWNER; |
997 | else | |
998 | qp_type = MLX4_IB_QPT_TUN_SMI; | |
47605df9 JM |
999 | /* we are definitely in the PPF here, since we are creating |
1000 | * tunnel QPs. base_tunnel_sqpn is therefore valid. */ | |
1001 | qpn = dev->dev->phys_caps.base_tunnel_sqpn + 8 * tnl_init->slave | |
1002 | + tnl_init->proxy_qp_type * 2 + tnl_init->port - 1; | |
1ffeb2eb JM |
1003 | sqpn = qpn; |
1004 | } | |
1005 | ||
8fd3cd2a LR |
1006 | if (init_attr->qp_type == IB_QPT_SMI || |
1007 | init_attr->qp_type == IB_QPT_GSI || qp_type == MLX4_IB_QPT_SMI || | |
1008 | qp_type == MLX4_IB_QPT_GSI || | |
1009 | (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | | |
1010 | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { | |
1011 | qp->sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); | |
1012 | if (!qp->sqp) | |
915ec7ed | 1013 | return -ENOMEM; |
8fd3cd2a | 1014 | } |
1ffeb2eb JM |
1015 | |
1016 | qp->mlx4_ib_qp_type = qp_type; | |
225c7b1f | 1017 | |
225c7b1f RD |
1018 | spin_lock_init(&qp->sq.lock); |
1019 | spin_lock_init(&qp->rq.lock); | |
fa417f7b | 1020 | INIT_LIST_HEAD(&qp->gid_list); |
0ff1fb65 | 1021 | INIT_LIST_HEAD(&qp->steering_rules); |
225c7b1f | 1022 | |
089b645d | 1023 | qp->state = IB_QPS_RESET; |
ea54b10c JM |
1024 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
1025 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | |
225c7b1f | 1026 | |
e00b64f7 | 1027 | if (udata) { |
089b645d | 1028 | struct mlx4_ib_create_qp ucmd; |
400b1ebc | 1029 | size_t copy_len; |
ed8637d3 GL |
1030 | int shift; |
1031 | int n; | |
400b1ebc | 1032 | |
089b645d | 1033 | copy_len = sizeof(struct mlx4_ib_create_qp); |
225c7b1f | 1034 | |
400b1ebc | 1035 | if (ib_copy_from_udata(&ucmd, udata, copy_len)) { |
225c7b1f RD |
1036 | err = -EFAULT; |
1037 | goto err; | |
1038 | } | |
1039 | ||
089b645d | 1040 | qp->inl_recv_sz = ucmd.inl_recv_sz; |
0e6e7416 | 1041 | |
6d06c9aa GL |
1042 | if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) { |
1043 | if (!(dev->dev->caps.flags & | |
1044 | MLX4_DEV_CAP_FLAG_FCS_KEEP)) { | |
1045 | pr_debug("scatter FCS is unsupported\n"); | |
1046 | err = -EOPNOTSUPP; | |
1047 | goto err; | |
1048 | } | |
1049 | ||
1050 | qp->flags |= MLX4_IB_QP_SCATTER_FCS; | |
1051 | } | |
1052 | ||
e00b64f7 | 1053 | err = set_rq_size(dev, &init_attr->cap, udata, |
400b1ebc | 1054 | qp_has_rq(init_attr), qp, qp->inl_recv_sz); |
2446304d EC |
1055 | if (err) |
1056 | goto err; | |
1057 | ||
089b645d | 1058 | qp->sq_no_prefetch = ucmd.sq_no_prefetch; |
400b1ebc | 1059 | |
089b645d LR |
1060 | err = set_user_sq_size(dev, qp, &ucmd); |
1061 | if (err) | |
1062 | goto err; | |
400b1ebc | 1063 | |
c320e527 MS |
1064 | qp->umem = |
1065 | ib_umem_get(pd->device, ucmd.buf_addr, qp->buf_size, 0); | |
225c7b1f RD |
1066 | if (IS_ERR(qp->umem)) { |
1067 | err = PTR_ERR(qp->umem); | |
1068 | goto err; | |
1069 | } | |
1070 | ||
ed8637d3 GL |
1071 | shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n); |
1072 | err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); | |
1073 | ||
225c7b1f RD |
1074 | if (err) |
1075 | goto err_buf; | |
1076 | ||
1077 | err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); | |
1078 | if (err) | |
1079 | goto err_mtt; | |
1080 | ||
0a1405da | 1081 | if (qp_has_rq(init_attr)) { |
089b645d | 1082 | err = mlx4_ib_db_map_user(udata, ucmd.db_addr, &qp->db); |
02d89b87 RD |
1083 | if (err) |
1084 | goto err_mtt; | |
1085 | } | |
f3301870 | 1086 | qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; |
225c7b1f | 1087 | } else { |
e00b64f7 | 1088 | err = set_rq_size(dev, &init_attr->cap, udata, |
ea30b966 MG |
1089 | qp_has_rq(init_attr), qp, 0); |
1090 | if (err) | |
1091 | goto err; | |
1092 | ||
0e6e7416 RD |
1093 | qp->sq_no_prefetch = 0; |
1094 | ||
b832be1e EC |
1095 | if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) |
1096 | qp->flags |= MLX4_IB_QP_LSO; | |
1097 | ||
c1c98501 MB |
1098 | if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { |
1099 | if (dev->steering_support == | |
1100 | MLX4_STEERING_MODE_DEVICE_MANAGED) | |
1101 | qp->flags |= MLX4_IB_QP_NETIF; | |
1102 | else | |
1103 | goto err; | |
1104 | } | |
1105 | ||
f95ccffc | 1106 | err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); |
2446304d EC |
1107 | if (err) |
1108 | goto err; | |
1109 | ||
0a1405da | 1110 | if (qp_has_rq(init_attr)) { |
8900b894 | 1111 | err = mlx4_db_alloc(dev->dev, &qp->db, 0); |
02d89b87 RD |
1112 | if (err) |
1113 | goto err; | |
225c7b1f | 1114 | |
02d89b87 RD |
1115 | *qp->db.db = 0; |
1116 | } | |
225c7b1f | 1117 | |
f95ccffc | 1118 | if (mlx4_buf_alloc(dev->dev, qp->buf_size, PAGE_SIZE * 2, |
8900b894 | 1119 | &qp->buf)) { |
f95ccffc JM |
1120 | err = -ENOMEM; |
1121 | goto err_db; | |
225c7b1f RD |
1122 | } |
1123 | ||
1124 | err = mlx4_mtt_init(dev->dev, qp->buf.npages, qp->buf.page_shift, | |
1125 | &qp->mtt); | |
1126 | if (err) | |
1127 | goto err_buf; | |
1128 | ||
8900b894 | 1129 | err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); |
225c7b1f RD |
1130 | if (err) |
1131 | goto err_mtt; | |
1132 | ||
e9105cde LD |
1133 | qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, |
1134 | sizeof(u64), GFP_KERNEL); | |
1135 | qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt, | |
1136 | sizeof(u64), GFP_KERNEL); | |
225c7b1f RD |
1137 | if (!qp->sq.wrid || !qp->rq.wrid) { |
1138 | err = -ENOMEM; | |
1139 | goto err_wrid; | |
1140 | } | |
f3301870 | 1141 | qp->mqp.usage = MLX4_RES_USAGE_DRIVER; |
225c7b1f RD |
1142 | } |
1143 | ||
a3cdcbfa | 1144 | if (sqpn) { |
1ffeb2eb JM |
1145 | if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | |
1146 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { | |
1147 | if (alloc_proxy_bufs(pd->device, qp)) { | |
1148 | err = -ENOMEM; | |
1149 | goto err_wrid; | |
1150 | } | |
1151 | } | |
a3cdcbfa | 1152 | } else { |
ddae0349 EE |
1153 | /* Raw packet QPNs may not have bits 6,7 set in their qp_num; |
1154 | * otherwise, the WQE BlueFlame setup flow wrongly causes | |
1155 | * VLAN insertion. */ | |
3987a2d3 | 1156 | if (init_attr->qp_type == IB_QPT_RAW_PACKET) |
ddae0349 | 1157 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, |
d57febe1 MB |
1158 | (init_attr->cap.max_send_wr ? |
1159 | MLX4_RESERVE_ETH_BF_QP : 0) | | |
1160 | (init_attr->cap.max_recv_wr ? | |
f3301870 MS |
1161 | MLX4_RESERVE_A0_QP : 0), |
1162 | qp->mqp.usage); | |
3987a2d3 | 1163 | else |
c1c98501 MB |
1164 | if (qp->flags & MLX4_IB_QP_NETIF) |
1165 | err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); | |
1166 | else | |
1167 | err = mlx4_qp_reserve_range(dev->dev, 1, 1, | |
f3301870 | 1168 | &qpn, 0, qp->mqp.usage); |
a3cdcbfa | 1169 | if (err) |
1ffeb2eb | 1170 | goto err_proxy; |
a3cdcbfa YP |
1171 | } |
1172 | ||
fbfb6625 EBE |
1173 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) |
1174 | qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; | |
1175 | ||
8900b894 | 1176 | err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); |
225c7b1f | 1177 | if (err) |
a3cdcbfa | 1178 | goto err_qpn; |
225c7b1f | 1179 | |
0a1405da SH |
1180 | if (init_attr->qp_type == IB_QPT_XRC_TGT) |
1181 | qp->mqp.qpn |= (1 << 23); | |
1182 | ||
225c7b1f RD |
1183 | /* |
1184 | * Hardware wants QPN written in big-endian order (after | |
1185 | * shifting) for send doorbell. Precompute this value to save | |
1186 | * a little bit when posting sends. | |
1187 | */ | |
1188 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); | |
1189 | ||
089b645d | 1190 | qp->mqp.event = mlx4_ib_qp_event; |
400b1ebc | 1191 | |
35f05dab YH |
1192 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); |
1193 | mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), | |
1194 | to_mcq(init_attr->recv_cq)); | |
1195 | /* Maintain device to QPs access, needed for further handling | |
1196 | * via reset flow | |
1197 | */ | |
1198 | list_add_tail(&qp->qps_list, &dev->qp_list); | |
1199 | /* Maintain CQ to QPs access, needed for further handling | |
1200 | * via reset flow | |
1201 | */ | |
1202 | mcq = to_mcq(init_attr->send_cq); | |
1203 | list_add_tail(&qp->cq_send_list, &mcq->send_qp_list); | |
1204 | mcq = to_mcq(init_attr->recv_cq); | |
1205 | list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list); | |
1206 | mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), | |
1207 | to_mcq(init_attr->recv_cq)); | |
1208 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); | |
225c7b1f RD |
1209 | return 0; |
1210 | ||
a3cdcbfa | 1211 | err_qpn: |
c1c98501 MB |
1212 | if (!sqpn) { |
1213 | if (qp->flags & MLX4_IB_QP_NETIF) | |
1214 | mlx4_ib_steer_qp_free(dev, qpn, 1); | |
1215 | else | |
1216 | mlx4_qp_release_range(dev->dev, qpn, 1); | |
1217 | } | |
1ffeb2eb JM |
1218 | err_proxy: |
1219 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) | |
1220 | free_proxy_bufs(pd->device, qp); | |
225c7b1f | 1221 | err_wrid: |
e00b64f7 | 1222 | if (udata) { |
0a1405da | 1223 | if (qp_has_rq(init_attr)) |
89944450 | 1224 | mlx4_ib_db_unmap_user(context, &qp->db); |
23f1b384 | 1225 | } else { |
0ef2f05c WW |
1226 | kvfree(qp->sq.wrid); |
1227 | kvfree(qp->rq.wrid); | |
225c7b1f RD |
1228 | } |
1229 | ||
1230 | err_mtt: | |
1231 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); | |
1232 | ||
1233 | err_buf: | |
836a0fbb | 1234 | if (!qp->umem) |
225c7b1f | 1235 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
836a0fbb | 1236 | ib_umem_release(qp->umem); |
225c7b1f RD |
1237 | |
1238 | err_db: | |
e00b64f7 | 1239 | if (!udata && qp_has_rq(init_attr)) |
6296883c | 1240 | mlx4_db_free(dev->dev, &qp->db); |
225c7b1f RD |
1241 | |
1242 | err: | |
8fd3cd2a | 1243 | kfree(qp->sqp); |
225c7b1f RD |
1244 | return err; |
1245 | } | |
1246 | ||
1247 | static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state) | |
1248 | { | |
1249 | switch (state) { | |
1250 | case IB_QPS_RESET: return MLX4_QP_STATE_RST; | |
1251 | case IB_QPS_INIT: return MLX4_QP_STATE_INIT; | |
1252 | case IB_QPS_RTR: return MLX4_QP_STATE_RTR; | |
1253 | case IB_QPS_RTS: return MLX4_QP_STATE_RTS; | |
1254 | case IB_QPS_SQD: return MLX4_QP_STATE_SQD; | |
1255 | case IB_QPS_SQE: return MLX4_QP_STATE_SQER; | |
1256 | case IB_QPS_ERR: return MLX4_QP_STATE_ERR; | |
1257 | default: return -1; | |
1258 | } | |
1259 | } | |
1260 | ||
1261 | static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) | |
338a8fad | 1262 | __acquires(&send_cq->lock) __acquires(&recv_cq->lock) |
225c7b1f | 1263 | { |
338a8fad | 1264 | if (send_cq == recv_cq) { |
35f05dab | 1265 | spin_lock(&send_cq->lock); |
338a8fad RD |
1266 | __acquire(&recv_cq->lock); |
1267 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { | |
35f05dab | 1268 | spin_lock(&send_cq->lock); |
225c7b1f RD |
1269 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); |
1270 | } else { | |
35f05dab | 1271 | spin_lock(&recv_cq->lock); |
225c7b1f RD |
1272 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); |
1273 | } | |
1274 | } | |
1275 | ||
1276 | static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) | |
338a8fad | 1277 | __releases(&send_cq->lock) __releases(&recv_cq->lock) |
225c7b1f | 1278 | { |
338a8fad RD |
1279 | if (send_cq == recv_cq) { |
1280 | __release(&recv_cq->lock); | |
35f05dab | 1281 | spin_unlock(&send_cq->lock); |
338a8fad | 1282 | } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { |
225c7b1f | 1283 | spin_unlock(&recv_cq->lock); |
35f05dab | 1284 | spin_unlock(&send_cq->lock); |
225c7b1f RD |
1285 | } else { |
1286 | spin_unlock(&send_cq->lock); | |
35f05dab | 1287 | spin_unlock(&recv_cq->lock); |
225c7b1f RD |
1288 | } |
1289 | } | |
1290 | ||
fa417f7b EC |
1291 | static void del_gid_entries(struct mlx4_ib_qp *qp) |
1292 | { | |
1293 | struct mlx4_ib_gid_entry *ge, *tmp; | |
1294 | ||
1295 | list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { | |
1296 | list_del(&ge->list); | |
1297 | kfree(ge); | |
1298 | } | |
1299 | } | |
1300 | ||
0a1405da SH |
1301 | static struct mlx4_ib_pd *get_pd(struct mlx4_ib_qp *qp) |
1302 | { | |
1303 | if (qp->ibqp.qp_type == IB_QPT_XRC_TGT) | |
1304 | return to_mpd(to_mxrcd(qp->ibqp.xrcd)->pd); | |
1305 | else | |
1306 | return to_mpd(qp->ibqp.pd); | |
1307 | } | |
1308 | ||
400b1ebc | 1309 | static void get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, |
0a1405da SH |
1310 | struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) |
1311 | { | |
1312 | switch (qp->ibqp.qp_type) { | |
1313 | case IB_QPT_XRC_TGT: | |
1314 | *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); | |
1315 | *recv_cq = *send_cq; | |
1316 | break; | |
1317 | case IB_QPT_XRC_INI: | |
1318 | *send_cq = to_mcq(qp->ibqp.send_cq); | |
1319 | *recv_cq = *send_cq; | |
1320 | break; | |
1321 | default: | |
400b1ebc GL |
1322 | *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) : |
1323 | to_mcq(qp->ibwq.cq); | |
1324 | *send_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.send_cq) : | |
1325 | *recv_cq; | |
0a1405da SH |
1326 | break; |
1327 | } | |
1328 | } | |
1329 | ||
3078f5f1 GL |
1330 | static void destroy_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) |
1331 | { | |
1332 | if (qp->state != IB_QPS_RESET) { | |
1333 | int i; | |
1334 | ||
1335 | for (i = 0; i < (1 << qp->ibqp.rwq_ind_tbl->log_ind_tbl_size); | |
1336 | i++) { | |
1337 | struct ib_wq *ibwq = qp->ibqp.rwq_ind_tbl->ind_tbl[i]; | |
1338 | struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); | |
1339 | ||
1340 | mutex_lock(&wq->mutex); | |
1341 | ||
1342 | wq->rss_usecnt--; | |
1343 | ||
1344 | mutex_unlock(&wq->mutex); | |
1345 | } | |
1346 | ||
1347 | if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), | |
1348 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | |
1349 | pr_warn("modify QP %06x to RESET failed.\n", | |
1350 | qp->mqp.qpn); | |
1351 | } | |
1352 | ||
1353 | mlx4_qp_remove(dev->dev, &qp->mqp); | |
1354 | mlx4_qp_free(dev->dev, &qp->mqp); | |
1355 | mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); | |
1356 | del_gid_entries(qp); | |
3078f5f1 GL |
1357 | } |
1358 | ||
225c7b1f | 1359 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, |
bdeacabd SR |
1360 | enum mlx4_ib_source_type src, |
1361 | struct ib_udata *udata) | |
225c7b1f RD |
1362 | { |
1363 | struct mlx4_ib_cq *send_cq, *recv_cq; | |
35f05dab | 1364 | unsigned long flags; |
225c7b1f | 1365 | |
2f5bb473 | 1366 | if (qp->state != IB_QPS_RESET) { |
225c7b1f RD |
1367 | if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), |
1368 | MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) | |
987c8f8f | 1369 | pr_warn("modify QP %06x to RESET failed.\n", |
225c7b1f | 1370 | qp->mqp.qpn); |
25476b02 | 1371 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
2f5bb473 JM |
1372 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
1373 | qp->pri.smac = 0; | |
25476b02 | 1374 | qp->pri.smac_port = 0; |
2f5bb473 JM |
1375 | } |
1376 | if (qp->alt.smac) { | |
1377 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | |
1378 | qp->alt.smac = 0; | |
1379 | } | |
1380 | if (qp->pri.vid < 0x1000) { | |
1381 | mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); | |
1382 | qp->pri.vid = 0xFFFF; | |
1383 | qp->pri.candidate_vid = 0xFFFF; | |
1384 | qp->pri.update_vid = 0; | |
1385 | } | |
1386 | if (qp->alt.vid < 0x1000) { | |
1387 | mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); | |
1388 | qp->alt.vid = 0xFFFF; | |
1389 | qp->alt.candidate_vid = 0xFFFF; | |
1390 | qp->alt.update_vid = 0; | |
1391 | } | |
1392 | } | |
225c7b1f | 1393 | |
400b1ebc | 1394 | get_cqs(qp, src, &send_cq, &recv_cq); |
225c7b1f | 1395 | |
35f05dab | 1396 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); |
225c7b1f RD |
1397 | mlx4_ib_lock_cqs(send_cq, recv_cq); |
1398 | ||
35f05dab YH |
1399 | /* del from lists under both locks above to protect reset flow paths */ |
1400 | list_del(&qp->qps_list); | |
1401 | list_del(&qp->cq_send_list); | |
1402 | list_del(&qp->cq_recv_list); | |
bdeacabd | 1403 | if (!udata) { |
225c7b1f RD |
1404 | __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, |
1405 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL); | |
1406 | if (send_cq != recv_cq) | |
1407 | __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); | |
1408 | } | |
1409 | ||
1410 | mlx4_qp_remove(dev->dev, &qp->mqp); | |
1411 | ||
1412 | mlx4_ib_unlock_cqs(send_cq, recv_cq); | |
35f05dab | 1413 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); |
225c7b1f RD |
1414 | |
1415 | mlx4_qp_free(dev->dev, &qp->mqp); | |
a3cdcbfa | 1416 | |
c1c98501 MB |
1417 | if (!is_sqp(dev, qp) && !is_tunnel_qp(dev, qp)) { |
1418 | if (qp->flags & MLX4_IB_QP_NETIF) | |
1419 | mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); | |
400b1ebc | 1420 | else if (src == MLX4_IB_RWQ_SRC) |
bdeacabd SR |
1421 | mlx4_ib_release_wqn( |
1422 | rdma_udata_to_drv_context( | |
1423 | udata, | |
1424 | struct mlx4_ib_ucontext, | |
1425 | ibucontext), | |
1426 | qp, 1); | |
c1c98501 MB |
1427 | else |
1428 | mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); | |
1429 | } | |
a3cdcbfa | 1430 | |
225c7b1f RD |
1431 | mlx4_mtt_cleanup(dev->dev, &qp->mtt); |
1432 | ||
bdeacabd | 1433 | if (udata) { |
400b1ebc | 1434 | if (qp->rq.wqe_cnt) { |
bdeacabd SR |
1435 | struct mlx4_ib_ucontext *mcontext = |
1436 | rdma_udata_to_drv_context( | |
1437 | udata, | |
1438 | struct mlx4_ib_ucontext, | |
1439 | ibucontext); | |
1440 | ||
400b1ebc GL |
1441 | mlx4_ib_db_unmap_user(mcontext, &qp->db); |
1442 | } | |
225c7b1f | 1443 | } else { |
0ef2f05c WW |
1444 | kvfree(qp->sq.wrid); |
1445 | kvfree(qp->rq.wrid); | |
1ffeb2eb JM |
1446 | if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | |
1447 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) | |
1448 | free_proxy_bufs(&dev->ib_dev, qp); | |
225c7b1f | 1449 | mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); |
0a1405da | 1450 | if (qp->rq.wqe_cnt) |
6296883c | 1451 | mlx4_db_free(dev->dev, &qp->db); |
225c7b1f | 1452 | } |
836a0fbb | 1453 | ib_umem_release(qp->umem); |
fa417f7b EC |
1454 | |
1455 | del_gid_entries(qp); | |
225c7b1f RD |
1456 | } |
1457 | ||
47605df9 JM |
1458 | static u32 get_sqp_num(struct mlx4_ib_dev *dev, struct ib_qp_init_attr *attr) |
1459 | { | |
1460 | /* Native or PPF */ | |
1461 | if (!mlx4_is_mfunc(dev->dev) || | |
1462 | (mlx4_is_master(dev->dev) && | |
1463 | attr->create_flags & MLX4_IB_SRIOV_SQP)) { | |
1464 | return dev->dev->phys_caps.base_sqpn + | |
1465 | (attr->qp_type == IB_QPT_SMI ? 0 : 2) + | |
1466 | attr->port_num - 1; | |
1467 | } | |
1468 | /* PF or VF -- creating proxies */ | |
1469 | if (attr->qp_type == IB_QPT_SMI) | |
c73c8b1e | 1470 | return dev->dev->caps.spec_qps[attr->port_num - 1].qp0_proxy; |
47605df9 | 1471 | else |
c73c8b1e | 1472 | return dev->dev->caps.spec_qps[attr->port_num - 1].qp1_proxy; |
47605df9 JM |
1473 | } |
1474 | ||
8fd3cd2a LR |
1475 | static int _mlx4_ib_create_qp(struct ib_pd *pd, struct mlx4_ib_qp *qp, |
1476 | struct ib_qp_init_attr *init_attr, | |
1477 | struct ib_udata *udata) | |
225c7b1f | 1478 | { |
225c7b1f | 1479 | int err; |
fbfb6625 | 1480 | int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; |
0a1405da | 1481 | u16 xrcdn = 0; |
225c7b1f | 1482 | |
3078f5f1 | 1483 | if (init_attr->rwq_ind_tbl) |
8fd3cd2a | 1484 | return _mlx4_ib_create_qp_rss(pd, qp, init_attr, udata); |
3078f5f1 | 1485 | |
521e575b | 1486 | /* |
1ffeb2eb JM |
1487 | * We only support LSO, vendor flag1, and multicast loopback blocking, |
1488 | * and only for kernel UD QPs. | |
521e575b | 1489 | */ |
1ffeb2eb JM |
1490 | if (init_attr->create_flags & ~(MLX4_IB_QP_LSO | |
1491 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK | | |
c1c98501 MB |
1492 | MLX4_IB_SRIOV_TUNNEL_QP | |
1493 | MLX4_IB_SRIOV_SQP | | |
40f2287b | 1494 | MLX4_IB_QP_NETIF | |
8900b894 | 1495 | MLX4_IB_QP_CREATE_ROCE_V2_GSI)) |
1f11a761 | 1496 | return -EOPNOTSUPP; |
521e575b | 1497 | |
c1c98501 MB |
1498 | if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { |
1499 | if (init_attr->qp_type != IB_QPT_UD) | |
8fd3cd2a | 1500 | return -EINVAL; |
c1c98501 MB |
1501 | } |
1502 | ||
e1b866c6 MS |
1503 | if (init_attr->create_flags) { |
1504 | if (udata && init_attr->create_flags & ~(sup_u_create_flags)) | |
8fd3cd2a | 1505 | return -EINVAL; |
e1b866c6 MS |
1506 | |
1507 | if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | | |
e1b866c6 MS |
1508 | MLX4_IB_QP_CREATE_ROCE_V2_GSI | |
1509 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && | |
1510 | init_attr->qp_type != IB_QPT_UD) || | |
1511 | (init_attr->create_flags & MLX4_IB_SRIOV_SQP && | |
1512 | init_attr->qp_type > IB_QPT_GSI) || | |
1513 | (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI && | |
1514 | init_attr->qp_type != IB_QPT_GSI)) | |
8fd3cd2a | 1515 | return -EINVAL; |
e1b866c6 | 1516 | } |
b846f25a | 1517 | |
225c7b1f | 1518 | switch (init_attr->qp_type) { |
0a1405da SH |
1519 | case IB_QPT_XRC_TGT: |
1520 | pd = to_mxrcd(init_attr->xrcd)->pd; | |
1521 | xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; | |
1522 | init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; | |
df561f66 | 1523 | fallthrough; |
0a1405da SH |
1524 | case IB_QPT_XRC_INI: |
1525 | if (!(to_mdev(pd->device)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) | |
8fd3cd2a | 1526 | return -ENOSYS; |
0a1405da | 1527 | init_attr->recv_cq = init_attr->send_cq; |
df561f66 | 1528 | fallthrough; |
225c7b1f RD |
1529 | case IB_QPT_RC: |
1530 | case IB_QPT_UC: | |
3987a2d3 | 1531 | case IB_QPT_RAW_PACKET: |
8fd3cd2a | 1532 | case IB_QPT_UD: |
2f5bb473 JM |
1533 | qp->pri.vid = 0xFFFF; |
1534 | qp->alt.vid = 0xFFFF; | |
8fd3cd2a LR |
1535 | err = create_qp_common(pd, init_attr, udata, 0, qp); |
1536 | if (err) | |
1537 | return err; | |
225c7b1f RD |
1538 | |
1539 | qp->ibqp.qp_num = qp->mqp.qpn; | |
0a1405da | 1540 | qp->xrcdn = xrcdn; |
225c7b1f | 1541 | break; |
225c7b1f RD |
1542 | case IB_QPT_SMI: |
1543 | case IB_QPT_GSI: | |
1544 | { | |
e1b866c6 MS |
1545 | int sqpn; |
1546 | ||
e1b866c6 | 1547 | if (init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI) { |
f3301870 MS |
1548 | int res = mlx4_qp_reserve_range(to_mdev(pd->device)->dev, |
1549 | 1, 1, &sqpn, 0, | |
1550 | MLX4_RES_USAGE_DRIVER); | |
e1b866c6 MS |
1551 | |
1552 | if (res) | |
8fd3cd2a | 1553 | return res; |
e1b866c6 MS |
1554 | } else { |
1555 | sqpn = get_sqp_num(to_mdev(pd->device), init_attr); | |
1556 | } | |
225c7b1f | 1557 | |
8fd3cd2a LR |
1558 | qp->pri.vid = 0xFFFF; |
1559 | qp->alt.vid = 0xFFFF; | |
1560 | err = create_qp_common(pd, init_attr, udata, sqpn, qp); | |
1ffeb2eb | 1561 | if (err) |
8fd3cd2a | 1562 | return err; |
225c7b1f | 1563 | |
2b1f7470 LR |
1564 | if (init_attr->create_flags & |
1565 | (MLX4_IB_SRIOV_SQP | MLX4_IB_SRIOV_TUNNEL_QP)) | |
1566 | /* Internal QP created with ib_create_qp */ | |
1567 | rdma_restrack_no_track(&qp->ibqp.res); | |
1568 | ||
225c7b1f | 1569 | qp->port = init_attr->port_num; |
e1b866c6 MS |
1570 | qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : |
1571 | init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI ? sqpn : 1; | |
225c7b1f RD |
1572 | break; |
1573 | } | |
1574 | default: | |
1575 | /* Don't support raw QPs */ | |
8fd3cd2a | 1576 | return -EOPNOTSUPP; |
225c7b1f | 1577 | } |
8fd3cd2a | 1578 | return 0; |
225c7b1f RD |
1579 | } |
1580 | ||
e1b866c6 MS |
1581 | struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, |
1582 | struct ib_qp_init_attr *init_attr, | |
1583 | struct ib_udata *udata) { | |
1584 | struct ib_device *device = pd ? pd->device : init_attr->xrcd->device; | |
e1b866c6 | 1585 | struct mlx4_ib_dev *dev = to_mdev(device); |
8fd3cd2a LR |
1586 | struct mlx4_ib_qp *qp; |
1587 | int ret; | |
1588 | ||
1589 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
1590 | if (!qp) | |
1591 | return ERR_PTR(-ENOMEM); | |
e1b866c6 | 1592 | |
8fd3cd2a LR |
1593 | mutex_init(&qp->mutex); |
1594 | ret = _mlx4_ib_create_qp(pd, qp, init_attr, udata); | |
1595 | if (ret) { | |
1596 | kfree(qp); | |
1597 | return ERR_PTR(ret); | |
1598 | } | |
e1b866c6 | 1599 | |
8fd3cd2a | 1600 | if (init_attr->qp_type == IB_QPT_GSI && |
e1b866c6 | 1601 | !(init_attr->create_flags & MLX4_IB_QP_CREATE_ROCE_V2_GSI)) { |
915ec7ed | 1602 | struct mlx4_ib_sqp *sqp = qp->sqp; |
e1b866c6 MS |
1603 | int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num); |
1604 | ||
1605 | if (is_eth && | |
1606 | dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { | |
1607 | init_attr->create_flags |= MLX4_IB_QP_CREATE_ROCE_V2_GSI; | |
1608 | sqp->roce_v2_gsi = ib_create_qp(pd, init_attr); | |
1609 | ||
1610 | if (IS_ERR(sqp->roce_v2_gsi)) { | |
1611 | pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi)); | |
1612 | sqp->roce_v2_gsi = NULL; | |
1613 | } else { | |
915ec7ed LR |
1614 | to_mqp(sqp->roce_v2_gsi)->flags |= |
1615 | MLX4_IB_ROCE_V2_GSI_QP; | |
e1b866c6 MS |
1616 | } |
1617 | ||
1618 | init_attr->create_flags &= ~MLX4_IB_QP_CREATE_ROCE_V2_GSI; | |
1619 | } | |
1620 | } | |
8fd3cd2a | 1621 | return &qp->ibqp; |
e1b866c6 MS |
1622 | } |
1623 | ||
bdeacabd | 1624 | static int _mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
225c7b1f RD |
1625 | { |
1626 | struct mlx4_ib_dev *dev = to_mdev(qp->device); | |
1627 | struct mlx4_ib_qp *mqp = to_mqp(qp); | |
1628 | ||
1629 | if (is_qp0(dev, mqp)) | |
1630 | mlx4_CLOSE_PORT(dev->dev, mqp->port); | |
1631 | ||
c482af64 JM |
1632 | if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI && |
1633 | dev->qp1_proxy[mqp->port - 1] == mqp) { | |
9433c188 MB |
1634 | mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); |
1635 | dev->qp1_proxy[mqp->port - 1] = NULL; | |
1636 | mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); | |
1637 | } | |
1638 | ||
7b59f0f9 EBE |
1639 | if (mqp->counter_index) |
1640 | mlx4_ib_free_qp_counter(dev, mqp); | |
1641 | ||
3078f5f1 GL |
1642 | if (qp->rwq_ind_tbl) { |
1643 | destroy_qp_rss(dev, mqp); | |
1644 | } else { | |
bdeacabd | 1645 | destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata); |
3078f5f1 | 1646 | } |
225c7b1f | 1647 | |
8fd3cd2a | 1648 | kfree(mqp->sqp); |
915ec7ed | 1649 | kfree(mqp); |
225c7b1f RD |
1650 | |
1651 | return 0; | |
1652 | } | |
1653 | ||
c4367a26 | 1654 | int mlx4_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata) |
e1b866c6 MS |
1655 | { |
1656 | struct mlx4_ib_qp *mqp = to_mqp(qp); | |
1657 | ||
1658 | if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { | |
915ec7ed | 1659 | struct mlx4_ib_sqp *sqp = mqp->sqp; |
e1b866c6 MS |
1660 | |
1661 | if (sqp->roce_v2_gsi) | |
1662 | ib_destroy_qp(sqp->roce_v2_gsi); | |
1663 | } | |
1664 | ||
bdeacabd | 1665 | return _mlx4_ib_destroy_qp(qp, udata); |
e1b866c6 MS |
1666 | } |
1667 | ||
1ffeb2eb | 1668 | static int to_mlx4_st(struct mlx4_ib_dev *dev, enum mlx4_ib_qp_type type) |
225c7b1f RD |
1669 | { |
1670 | switch (type) { | |
1ffeb2eb JM |
1671 | case MLX4_IB_QPT_RC: return MLX4_QP_ST_RC; |
1672 | case MLX4_IB_QPT_UC: return MLX4_QP_ST_UC; | |
1673 | case MLX4_IB_QPT_UD: return MLX4_QP_ST_UD; | |
1674 | case MLX4_IB_QPT_XRC_INI: | |
1675 | case MLX4_IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC; | |
1676 | case MLX4_IB_QPT_SMI: | |
1677 | case MLX4_IB_QPT_GSI: | |
1678 | case MLX4_IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX; | |
1679 | ||
1680 | case MLX4_IB_QPT_PROXY_SMI_OWNER: | |
1681 | case MLX4_IB_QPT_TUN_SMI_OWNER: return (mlx4_is_mfunc(dev->dev) ? | |
1682 | MLX4_QP_ST_MLX : -1); | |
1683 | case MLX4_IB_QPT_PROXY_SMI: | |
1684 | case MLX4_IB_QPT_TUN_SMI: | |
1685 | case MLX4_IB_QPT_PROXY_GSI: | |
1686 | case MLX4_IB_QPT_TUN_GSI: return (mlx4_is_mfunc(dev->dev) ? | |
1687 | MLX4_QP_ST_UD : -1); | |
1688 | default: return -1; | |
225c7b1f RD |
1689 | } |
1690 | } | |
1691 | ||
65adfa91 | 1692 | static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr, |
225c7b1f RD |
1693 | int attr_mask) |
1694 | { | |
1695 | u8 dest_rd_atomic; | |
1696 | u32 access_flags; | |
1697 | u32 hw_access_flags = 0; | |
1698 | ||
1699 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
1700 | dest_rd_atomic = attr->max_dest_rd_atomic; | |
1701 | else | |
1702 | dest_rd_atomic = qp->resp_depth; | |
1703 | ||
1704 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
1705 | access_flags = attr->qp_access_flags; | |
1706 | else | |
1707 | access_flags = qp->atomic_rd_en; | |
1708 | ||
1709 | if (!dest_rd_atomic) | |
1710 | access_flags &= IB_ACCESS_REMOTE_WRITE; | |
1711 | ||
1712 | if (access_flags & IB_ACCESS_REMOTE_READ) | |
1713 | hw_access_flags |= MLX4_QP_BIT_RRE; | |
1714 | if (access_flags & IB_ACCESS_REMOTE_ATOMIC) | |
1715 | hw_access_flags |= MLX4_QP_BIT_RAE; | |
1716 | if (access_flags & IB_ACCESS_REMOTE_WRITE) | |
1717 | hw_access_flags |= MLX4_QP_BIT_RWE; | |
1718 | ||
1719 | return cpu_to_be32(hw_access_flags); | |
1720 | } | |
1721 | ||
65adfa91 | 1722 | static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr, |
225c7b1f RD |
1723 | int attr_mask) |
1724 | { | |
1725 | if (attr_mask & IB_QP_PKEY_INDEX) | |
1726 | sqp->pkey_index = attr->pkey_index; | |
1727 | if (attr_mask & IB_QP_QKEY) | |
1728 | sqp->qkey = attr->qkey; | |
1729 | if (attr_mask & IB_QP_SQ_PSN) | |
1730 | sqp->send_psn = attr->sq_psn; | |
1731 | } | |
1732 | ||
1733 | static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) | |
1734 | { | |
1735 | path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); | |
1736 | } | |
1737 | ||
90898850 DC |
1738 | static int _mlx4_set_path(struct mlx4_ib_dev *dev, |
1739 | const struct rdma_ah_attr *ah, | |
297e0dad | 1740 | u64 smac, u16 vlan_tag, struct mlx4_qp_path *path, |
2f5bb473 | 1741 | struct mlx4_roce_smac_vlan_info *smac_info, u8 port) |
225c7b1f | 1742 | { |
4c3eb3ca | 1743 | int vidx; |
297e0dad | 1744 | int smac_index; |
2f5bb473 | 1745 | int err; |
297e0dad | 1746 | |
d8966fcd DC |
1747 | path->grh_mylmc = rdma_ah_get_path_bits(ah) & 0x7f; |
1748 | path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah)); | |
1749 | if (rdma_ah_get_static_rate(ah)) { | |
1750 | path->static_rate = rdma_ah_get_static_rate(ah) + | |
1751 | MLX4_STAT_RATE_OFFSET; | |
225c7b1f RD |
1752 | while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && |
1753 | !(1 << path->static_rate & dev->dev->caps.stat_rate_support)) | |
1754 | --path->static_rate; | |
1755 | } else | |
1756 | path->static_rate = 0; | |
225c7b1f | 1757 | |
d8966fcd DC |
1758 | if (rdma_ah_get_ah_flags(ah) & IB_AH_GRH) { |
1759 | const struct ib_global_route *grh = rdma_ah_read_grh(ah); | |
1760 | int real_sgid_index = | |
7492052a | 1761 | mlx4_ib_gid_index_to_real_index(dev, grh->sgid_attr); |
5070cd22 | 1762 | |
54a6d63f DC |
1763 | if (real_sgid_index < 0) |
1764 | return real_sgid_index; | |
5070cd22 | 1765 | if (real_sgid_index >= dev->dev->caps.gid_table_len[port]) { |
987c8f8f | 1766 | pr_err("sgid_index (%u) too large. max is %d\n", |
5070cd22 | 1767 | real_sgid_index, dev->dev->caps.gid_table_len[port] - 1); |
225c7b1f RD |
1768 | return -1; |
1769 | } | |
1770 | ||
1771 | path->grh_mylmc |= 1 << 7; | |
5070cd22 | 1772 | path->mgid_index = real_sgid_index; |
d8966fcd | 1773 | path->hop_limit = grh->hop_limit; |
225c7b1f | 1774 | path->tclass_flowlabel = |
d8966fcd DC |
1775 | cpu_to_be32((grh->traffic_class << 20) | |
1776 | (grh->flow_label)); | |
1777 | memcpy(path->rgid, grh->dgid.raw, 16); | |
225c7b1f RD |
1778 | } |
1779 | ||
44c58487 | 1780 | if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) { |
d8966fcd | 1781 | if (!(rdma_ah_get_ah_flags(ah) & IB_AH_GRH)) |
fa417f7b EC |
1782 | return -1; |
1783 | ||
2f5bb473 | 1784 | path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | |
d8966fcd | 1785 | ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 7) << 3); |
4c3eb3ca | 1786 | |
297e0dad | 1787 | path->feup |= MLX4_FEUP_FORCE_ETH_UP; |
4c3eb3ca | 1788 | if (vlan_tag < 0x1000) { |
2f5bb473 JM |
1789 | if (smac_info->vid < 0x1000) { |
1790 | /* both valid vlan ids */ | |
1791 | if (smac_info->vid != vlan_tag) { | |
1792 | /* different VIDs. unreg old and reg new */ | |
1793 | err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); | |
1794 | if (err) | |
1795 | return err; | |
1796 | smac_info->candidate_vid = vlan_tag; | |
1797 | smac_info->candidate_vlan_index = vidx; | |
1798 | smac_info->candidate_vlan_port = port; | |
1799 | smac_info->update_vid = 1; | |
1800 | path->vlan_index = vidx; | |
1801 | } else { | |
1802 | path->vlan_index = smac_info->vlan_index; | |
1803 | } | |
1804 | } else { | |
1805 | /* no current vlan tag in qp */ | |
1806 | err = mlx4_register_vlan(dev->dev, port, vlan_tag, &vidx); | |
1807 | if (err) | |
1808 | return err; | |
1809 | smac_info->candidate_vid = vlan_tag; | |
1810 | smac_info->candidate_vlan_index = vidx; | |
1811 | smac_info->candidate_vlan_port = port; | |
1812 | smac_info->update_vid = 1; | |
1813 | path->vlan_index = vidx; | |
1814 | } | |
297e0dad | 1815 | path->feup |= MLX4_FVL_FORCE_ETH_VLAN; |
2f5bb473 JM |
1816 | path->fl = 1 << 6; |
1817 | } else { | |
1818 | /* have current vlan tag. unregister it at modify-qp success */ | |
1819 | if (smac_info->vid < 0x1000) { | |
1820 | smac_info->candidate_vid = 0xFFFF; | |
1821 | smac_info->update_vid = 1; | |
1822 | } | |
4c3eb3ca | 1823 | } |
2f5bb473 JM |
1824 | |
1825 | /* get smac_index for RoCE use. | |
1826 | * If no smac was yet assigned, register one. | |
1827 | * If one was already assigned, but the new mac differs, | |
1828 | * unregister the old one and register the new one. | |
1829 | */ | |
25476b02 JM |
1830 | if ((!smac_info->smac && !smac_info->smac_port) || |
1831 | smac_info->smac != smac) { | |
2f5bb473 JM |
1832 | /* register candidate now, unreg if needed, after success */ |
1833 | smac_index = mlx4_register_mac(dev->dev, port, smac); | |
1834 | if (smac_index >= 0) { | |
1835 | smac_info->candidate_smac_index = smac_index; | |
1836 | smac_info->candidate_smac = smac; | |
1837 | smac_info->candidate_smac_port = port; | |
1838 | } else { | |
1839 | return -EINVAL; | |
1840 | } | |
1841 | } else { | |
1842 | smac_index = smac_info->smac_index; | |
1843 | } | |
44c58487 | 1844 | memcpy(path->dmac, ah->roce.dmac, 6); |
2f5bb473 JM |
1845 | path->ackto = MLX4_IB_LINK_TYPE_ETH; |
1846 | /* put MAC table smac index for IBoE */ | |
1847 | path->grh_mylmc = (u8) (smac_index) | 0x80; | |
1848 | } else { | |
4c3eb3ca | 1849 | path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | |
d8966fcd | 1850 | ((port - 1) << 6) | ((rdma_ah_get_sl(ah) & 0xf) << 2); |
2f5bb473 | 1851 | } |
fa417f7b | 1852 | |
225c7b1f RD |
1853 | return 0; |
1854 | } | |
1855 | ||
297e0dad MS |
1856 | static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, |
1857 | enum ib_qp_attr_mask qp_attr_mask, | |
2f5bb473 | 1858 | struct mlx4_ib_qp *mqp, |
dbf727de MB |
1859 | struct mlx4_qp_path *path, u8 port, |
1860 | u16 vlan_id, u8 *smac) | |
297e0dad MS |
1861 | { |
1862 | return _mlx4_set_path(dev, &qp->ah_attr, | |
dbf727de MB |
1863 | mlx4_mac_to_u64(smac), |
1864 | vlan_id, | |
2f5bb473 | 1865 | path, &mqp->pri, port); |
297e0dad MS |
1866 | } |
1867 | ||
1868 | static int mlx4_set_alt_path(struct mlx4_ib_dev *dev, | |
1869 | const struct ib_qp_attr *qp, | |
1870 | enum ib_qp_attr_mask qp_attr_mask, | |
2f5bb473 | 1871 | struct mlx4_ib_qp *mqp, |
297e0dad MS |
1872 | struct mlx4_qp_path *path, u8 port) |
1873 | { | |
1874 | return _mlx4_set_path(dev, &qp->alt_ah_attr, | |
dbf727de MB |
1875 | 0, |
1876 | 0xffff, | |
2f5bb473 | 1877 | path, &mqp->alt, port); |
297e0dad MS |
1878 | } |
1879 | ||
fa417f7b EC |
1880 | static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) |
1881 | { | |
1882 | struct mlx4_ib_gid_entry *ge, *tmp; | |
1883 | ||
1884 | list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { | |
1885 | if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { | |
1886 | ge->added = 1; | |
1887 | ge->port = qp->port; | |
1888 | } | |
1889 | } | |
1890 | } | |
1891 | ||
dbf727de MB |
1892 | static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, |
1893 | struct mlx4_ib_qp *qp, | |
2f5bb473 JM |
1894 | struct mlx4_qp_context *context) |
1895 | { | |
2f5bb473 JM |
1896 | u64 u64_mac; |
1897 | int smac_index; | |
1898 | ||
3e0629cb | 1899 | u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]); |
2f5bb473 JM |
1900 | |
1901 | context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6); | |
25476b02 | 1902 | if (!qp->pri.smac && !qp->pri.smac_port) { |
2f5bb473 JM |
1903 | smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac); |
1904 | if (smac_index >= 0) { | |
1905 | qp->pri.candidate_smac_index = smac_index; | |
1906 | qp->pri.candidate_smac = u64_mac; | |
1907 | qp->pri.candidate_smac_port = qp->port; | |
1908 | context->pri_path.grh_mylmc = 0x80 | (u8) smac_index; | |
1909 | } else { | |
1910 | return -ENOENT; | |
1911 | } | |
1912 | } | |
1913 | return 0; | |
1914 | } | |
1915 | ||
7b59f0f9 EBE |
1916 | static int create_qp_lb_counter(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) |
1917 | { | |
1918 | struct counter_index *new_counter_index; | |
1919 | int err; | |
1920 | u32 tmp_idx; | |
1921 | ||
1922 | if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != | |
1923 | IB_LINK_LAYER_ETHERNET || | |
1924 | !(qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) || | |
1925 | !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK)) | |
1926 | return 0; | |
1927 | ||
f3301870 | 1928 | err = mlx4_counter_alloc(dev->dev, &tmp_idx, MLX4_RES_USAGE_DRIVER); |
7b59f0f9 EBE |
1929 | if (err) |
1930 | return err; | |
1931 | ||
1932 | new_counter_index = kmalloc(sizeof(*new_counter_index), GFP_KERNEL); | |
1933 | if (!new_counter_index) { | |
1934 | mlx4_counter_free(dev->dev, tmp_idx); | |
1935 | return -ENOMEM; | |
1936 | } | |
1937 | ||
1938 | new_counter_index->index = tmp_idx; | |
1939 | new_counter_index->allocated = 1; | |
1940 | qp->counter_index = new_counter_index; | |
1941 | ||
1942 | mutex_lock(&dev->counters_table[qp->port - 1].mutex); | |
1943 | list_add_tail(&new_counter_index->list, | |
1944 | &dev->counters_table[qp->port - 1].counters_list); | |
1945 | mutex_unlock(&dev->counters_table[qp->port - 1].mutex); | |
1946 | ||
1947 | return 0; | |
1948 | } | |
1949 | ||
3b5daf28 MS |
1950 | enum { |
1951 | MLX4_QPC_ROCE_MODE_1 = 0, | |
1952 | MLX4_QPC_ROCE_MODE_2 = 2, | |
1953 | MLX4_QPC_ROCE_MODE_UNDEFINED = 0xff | |
1954 | }; | |
1955 | ||
1956 | static u8 gid_type_to_qpc(enum ib_gid_type gid_type) | |
1957 | { | |
1958 | switch (gid_type) { | |
1959 | case IB_GID_TYPE_ROCE: | |
1960 | return MLX4_QPC_ROCE_MODE_1; | |
1961 | case IB_GID_TYPE_ROCE_UDP_ENCAP: | |
1962 | return MLX4_QPC_ROCE_MODE_2; | |
1963 | default: | |
1964 | return MLX4_QPC_ROCE_MODE_UNDEFINED; | |
1965 | } | |
1966 | } | |
1967 | ||
3078f5f1 GL |
1968 | /* |
1969 | * Go over all RSS QP's childes (WQs) and apply their HW state according to | |
1970 | * their logic state if the RSS QP is the first RSS QP associated for the WQ. | |
1971 | */ | |
89944450 SR |
1972 | static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num, |
1973 | struct ib_udata *udata) | |
3078f5f1 | 1974 | { |
fba02e6c | 1975 | int err = 0; |
3078f5f1 | 1976 | int i; |
3078f5f1 GL |
1977 | |
1978 | for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { | |
1979 | struct ib_wq *ibwq = ind_tbl->ind_tbl[i]; | |
1980 | struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); | |
1981 | ||
1982 | mutex_lock(&wq->mutex); | |
1983 | ||
1984 | /* Mlx4_ib restrictions: | |
1985 | * WQ's is associated to a port according to the RSS QP it is | |
1986 | * associates to. | |
1987 | * In case the WQ is associated to a different port by another | |
1988 | * RSS QP, return a failure. | |
1989 | */ | |
1990 | if ((wq->rss_usecnt > 0) && (wq->port != port_num)) { | |
1991 | err = -EINVAL; | |
1992 | mutex_unlock(&wq->mutex); | |
1993 | break; | |
1994 | } | |
1995 | wq->port = port_num; | |
1996 | if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) { | |
89944450 | 1997 | err = _mlx4_ib_modify_wq(ibwq, IB_WQS_RDY, udata); |
3078f5f1 GL |
1998 | if (err) { |
1999 | mutex_unlock(&wq->mutex); | |
2000 | break; | |
2001 | } | |
2002 | } | |
2003 | wq->rss_usecnt++; | |
2004 | ||
2005 | mutex_unlock(&wq->mutex); | |
2006 | } | |
2007 | ||
2008 | if (i && err) { | |
2009 | int j; | |
2010 | ||
2011 | for (j = (i - 1); j >= 0; j--) { | |
2012 | struct ib_wq *ibwq = ind_tbl->ind_tbl[j]; | |
2013 | struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); | |
2014 | ||
2015 | mutex_lock(&wq->mutex); | |
2016 | ||
2017 | if ((wq->rss_usecnt == 1) && | |
2018 | (ibwq->state == IB_WQS_RDY)) | |
89944450 SR |
2019 | if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, |
2020 | udata)) | |
3078f5f1 GL |
2021 | pr_warn("failed to reverse WQN=0x%06x\n", |
2022 | ibwq->wq_num); | |
2023 | wq->rss_usecnt--; | |
2024 | ||
2025 | mutex_unlock(&wq->mutex); | |
2026 | } | |
2027 | } | |
2028 | ||
2029 | return err; | |
2030 | } | |
2031 | ||
89944450 SR |
2032 | static void bring_down_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, |
2033 | struct ib_udata *udata) | |
3078f5f1 GL |
2034 | { |
2035 | int i; | |
2036 | ||
2037 | for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { | |
2038 | struct ib_wq *ibwq = ind_tbl->ind_tbl[i]; | |
2039 | struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); | |
2040 | ||
2041 | mutex_lock(&wq->mutex); | |
2042 | ||
2043 | if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY)) | |
89944450 | 2044 | if (_mlx4_ib_modify_wq(ibwq, IB_WQS_RESET, udata)) |
3078f5f1 GL |
2045 | pr_warn("failed to reverse WQN=%x\n", |
2046 | ibwq->wq_num); | |
2047 | wq->rss_usecnt--; | |
2048 | ||
2049 | mutex_unlock(&wq->mutex); | |
2050 | } | |
2051 | } | |
2052 | ||
2053 | static void fill_qp_rss_context(struct mlx4_qp_context *context, | |
2054 | struct mlx4_ib_qp *qp) | |
2055 | { | |
2056 | struct mlx4_rss_context *rss_context; | |
2057 | ||
2058 | rss_context = (void *)context + offsetof(struct mlx4_qp_context, | |
2059 | pri_path) + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH; | |
2060 | ||
2061 | rss_context->base_qpn = cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz); | |
2062 | rss_context->default_qpn = | |
2063 | cpu_to_be32(qp->rss_ctx->base_qpn_tbl_sz & 0xffffff); | |
2064 | if (qp->rss_ctx->flags & (MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6)) | |
2065 | rss_context->base_qpn_udp = rss_context->default_qpn; | |
2066 | rss_context->flags = qp->rss_ctx->flags; | |
2067 | /* Currently support just toeplitz */ | |
2068 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; | |
2069 | ||
2070 | memcpy(rss_context->rss_key, qp->rss_ctx->rss_key, | |
2071 | MLX4_EN_RSS_KEY_SIZE); | |
2072 | } | |
2073 | ||
400b1ebc | 2074 | static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type, |
65adfa91 | 2075 | const struct ib_qp_attr *attr, int attr_mask, |
89944450 SR |
2076 | enum ib_qp_state cur_state, |
2077 | enum ib_qp_state new_state, | |
2078 | struct ib_udata *udata) | |
225c7b1f | 2079 | { |
400b1ebc | 2080 | struct ib_srq *ibsrq; |
47ec3866 | 2081 | const struct ib_gid_attr *gid_attr = NULL; |
3078f5f1 | 2082 | struct ib_rwq_ind_table *rwq_ind_tbl; |
400b1ebc GL |
2083 | enum ib_qp_type qp_type; |
2084 | struct mlx4_ib_dev *dev; | |
2085 | struct mlx4_ib_qp *qp; | |
0a1405da SH |
2086 | struct mlx4_ib_pd *pd; |
2087 | struct mlx4_ib_cq *send_cq, *recv_cq; | |
89944450 SR |
2088 | struct mlx4_ib_ucontext *ucontext = rdma_udata_to_drv_context( |
2089 | udata, struct mlx4_ib_ucontext, ibucontext); | |
225c7b1f RD |
2090 | struct mlx4_qp_context *context; |
2091 | enum mlx4_qp_optpar optpar = 0; | |
225c7b1f | 2092 | int sqd_event; |
c1c98501 | 2093 | int steer_qp = 0; |
225c7b1f | 2094 | int err = -EINVAL; |
3ba8e31d | 2095 | int counter_index; |
225c7b1f | 2096 | |
400b1ebc GL |
2097 | if (src_type == MLX4_IB_RWQ_SRC) { |
2098 | struct ib_wq *ibwq; | |
2099 | ||
3078f5f1 | 2100 | ibwq = (struct ib_wq *)src; |
3078f5f1 GL |
2101 | ibsrq = NULL; |
2102 | rwq_ind_tbl = NULL; | |
2103 | qp_type = IB_QPT_RAW_PACKET; | |
2104 | qp = to_mqp((struct ib_qp *)ibwq); | |
2105 | dev = to_mdev(ibwq->device); | |
2106 | pd = to_mpd(ibwq->pd); | |
400b1ebc GL |
2107 | } else { |
2108 | struct ib_qp *ibqp; | |
2109 | ||
3078f5f1 | 2110 | ibqp = (struct ib_qp *)src; |
3078f5f1 GL |
2111 | ibsrq = ibqp->srq; |
2112 | rwq_ind_tbl = ibqp->rwq_ind_tbl; | |
2113 | qp_type = ibqp->qp_type; | |
2114 | qp = to_mqp(ibqp); | |
2115 | dev = to_mdev(ibqp->device); | |
2116 | pd = get_pd(qp); | |
400b1ebc GL |
2117 | } |
2118 | ||
3dec4878 JM |
2119 | /* APM is not supported under RoCE */ |
2120 | if (attr_mask & IB_QP_ALT_PATH && | |
2121 | rdma_port_get_link_layer(&dev->ib_dev, qp->port) == | |
2122 | IB_LINK_LAYER_ETHERNET) | |
2123 | return -ENOTSUPP; | |
2124 | ||
225c7b1f RD |
2125 | context = kzalloc(sizeof *context, GFP_KERNEL); |
2126 | if (!context) | |
2127 | return -ENOMEM; | |
2128 | ||
225c7b1f | 2129 | context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) | |
1ffeb2eb | 2130 | (to_mlx4_st(dev, qp->mlx4_ib_qp_type) << 16)); |
225c7b1f RD |
2131 | |
2132 | if (!(attr_mask & IB_QP_PATH_MIG_STATE)) | |
2133 | context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); | |
2134 | else { | |
2135 | optpar |= MLX4_QP_OPTPAR_PM_STATE; | |
2136 | switch (attr->path_mig_state) { | |
2137 | case IB_MIG_MIGRATED: | |
2138 | context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11); | |
2139 | break; | |
2140 | case IB_MIG_REARM: | |
2141 | context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11); | |
2142 | break; | |
2143 | case IB_MIG_ARMED: | |
2144 | context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11); | |
2145 | break; | |
2146 | } | |
2147 | } | |
2148 | ||
ea30b966 MG |
2149 | if (qp->inl_recv_sz) |
2150 | context->param3 |= cpu_to_be32(1 << 25); | |
2151 | ||
6d06c9aa GL |
2152 | if (qp->flags & MLX4_IB_QP_SCATTER_FCS) |
2153 | context->param3 |= cpu_to_be32(1 << 29); | |
2154 | ||
400b1ebc | 2155 | if (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI) |
225c7b1f | 2156 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; |
400b1ebc | 2157 | else if (qp_type == IB_QPT_RAW_PACKET) |
3987a2d3 | 2158 | context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX; |
400b1ebc | 2159 | else if (qp_type == IB_QPT_UD) { |
b832be1e EC |
2160 | if (qp->flags & MLX4_IB_QP_LSO) |
2161 | context->mtu_msgmax = (IB_MTU_4096 << 5) | | |
2162 | ilog2(dev->dev->caps.max_gso_sz); | |
2163 | else | |
5f22a1d8 | 2164 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 13; |
b832be1e | 2165 | } else if (attr_mask & IB_QP_PATH_MTU) { |
225c7b1f | 2166 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { |
987c8f8f | 2167 | pr_err("path MTU (%u) is invalid\n", |
225c7b1f | 2168 | attr->path_mtu); |
f5b40431 | 2169 | goto out; |
225c7b1f | 2170 | } |
d1f2cd89 EC |
2171 | context->mtu_msgmax = (attr->path_mtu << 5) | |
2172 | ilog2(dev->dev->caps.max_msg_sz); | |
225c7b1f RD |
2173 | } |
2174 | ||
3078f5f1 GL |
2175 | if (!rwq_ind_tbl) { /* PRM RSS receive side should be left zeros */ |
2176 | if (qp->rq.wqe_cnt) | |
2177 | context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3; | |
2178 | context->rq_size_stride |= qp->rq.wqe_shift - 4; | |
2179 | } | |
225c7b1f | 2180 | |
0e6e7416 RD |
2181 | if (qp->sq.wqe_cnt) |
2182 | context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; | |
225c7b1f RD |
2183 | context->sq_size_stride |= qp->sq.wqe_shift - 4; |
2184 | ||
7b59f0f9 EBE |
2185 | if (new_state == IB_QPS_RESET && qp->counter_index) |
2186 | mlx4_ib_free_qp_counter(dev, qp); | |
2187 | ||
0a1405da | 2188 | if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { |
0e6e7416 | 2189 | context->sq_size_stride |= !!qp->sq_no_prefetch << 7; |
0a1405da | 2190 | context->xrcd = cpu_to_be32((u32) qp->xrcdn); |
400b1ebc | 2191 | if (qp_type == IB_QPT_RAW_PACKET) |
02d7ef6f | 2192 | context->param3 |= cpu_to_be32(1 << 30); |
0a1405da | 2193 | } |
0e6e7416 | 2194 | |
89944450 | 2195 | if (ucontext) |
85743f1e | 2196 | context->usr_page = cpu_to_be32( |
89944450 | 2197 | mlx4_to_hw_uar_index(dev->dev, ucontext->uar.index)); |
225c7b1f | 2198 | else |
85743f1e HN |
2199 | context->usr_page = cpu_to_be32( |
2200 | mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index)); | |
225c7b1f RD |
2201 | |
2202 | if (attr_mask & IB_QP_DEST_QPN) | |
2203 | context->remote_qpn = cpu_to_be32(attr->dest_qp_num); | |
2204 | ||
2205 | if (attr_mask & IB_QP_PORT) { | |
2206 | if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD && | |
2207 | !(attr_mask & IB_QP_AV)) { | |
2208 | mlx4_set_sched(&context->pri_path, attr->port_num); | |
2209 | optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE; | |
2210 | } | |
2211 | } | |
2212 | ||
cfcde11c | 2213 | if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { |
7b59f0f9 EBE |
2214 | err = create_qp_lb_counter(dev, qp); |
2215 | if (err) | |
2216 | goto out; | |
2217 | ||
3ba8e31d EBE |
2218 | counter_index = |
2219 | dev->counters_table[qp->port - 1].default_counter; | |
7b59f0f9 EBE |
2220 | if (qp->counter_index) |
2221 | counter_index = qp->counter_index->index; | |
2222 | ||
3ba8e31d EBE |
2223 | if (counter_index != -1) { |
2224 | context->pri_path.counter_index = counter_index; | |
cfcde11c | 2225 | optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX; |
7b59f0f9 EBE |
2226 | if (qp->counter_index) { |
2227 | context->pri_path.fl |= | |
2228 | MLX4_FL_ETH_SRC_CHECK_MC_LB; | |
2229 | context->pri_path.vlan_control |= | |
2230 | MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER; | |
2231 | } | |
cfcde11c | 2232 | } else |
47d8417f EBE |
2233 | context->pri_path.counter_index = |
2234 | MLX4_SINK_COUNTER_INDEX(dev->dev); | |
c1c98501 MB |
2235 | |
2236 | if (qp->flags & MLX4_IB_QP_NETIF) { | |
2237 | mlx4_ib_steer_qp_reg(dev, qp, 1); | |
2238 | steer_qp = 1; | |
2239 | } | |
e1b866c6 | 2240 | |
400b1ebc | 2241 | if (qp_type == IB_QPT_GSI) { |
e1b866c6 MS |
2242 | enum ib_gid_type gid_type = qp->flags & MLX4_IB_ROCE_V2_GSI_QP ? |
2243 | IB_GID_TYPE_ROCE_UDP_ENCAP : IB_GID_TYPE_ROCE; | |
2244 | u8 qpc_roce_mode = gid_type_to_qpc(gid_type); | |
2245 | ||
2246 | context->rlkey_roce_mode |= (qpc_roce_mode << 6); | |
2247 | } | |
cfcde11c OG |
2248 | } |
2249 | ||
225c7b1f | 2250 | if (attr_mask & IB_QP_PKEY_INDEX) { |
1ffeb2eb JM |
2251 | if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) |
2252 | context->pri_path.disable_pkey_check = 0x40; | |
225c7b1f RD |
2253 | context->pri_path.pkey_index = attr->pkey_index; |
2254 | optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; | |
2255 | } | |
2256 | ||
225c7b1f | 2257 | if (attr_mask & IB_QP_AV) { |
400b1ebc | 2258 | u8 port_num = mlx4_is_bonded(dev->dev) ? 1 : |
dbf727de | 2259 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
dbf727de MB |
2260 | u16 vlan = 0xffff; |
2261 | u8 smac[ETH_ALEN]; | |
d8966fcd DC |
2262 | int is_eth = |
2263 | rdma_cap_eth_ah(&dev->ib_dev, port_num) && | |
2264 | rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; | |
dbf727de | 2265 | |
d8966fcd | 2266 | if (is_eth) { |
47ec3866 | 2267 | gid_attr = attr->ah_attr.grh.sgid_attr; |
a70c0739 PP |
2268 | err = rdma_read_gid_l2_fields(gid_attr, &vlan, |
2269 | &smac[0]); | |
2270 | if (err) | |
2271 | goto out; | |
dbf727de | 2272 | } |
dbf727de | 2273 | |
2f5bb473 | 2274 | if (mlx4_set_path(dev, attr, attr_mask, qp, &context->pri_path, |
dbf727de | 2275 | port_num, vlan, smac)) |
225c7b1f | 2276 | goto out; |
225c7b1f RD |
2277 | |
2278 | optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | | |
2279 | MLX4_QP_OPTPAR_SCHED_QUEUE); | |
3b5daf28 MS |
2280 | |
2281 | if (is_eth && | |
2282 | (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR)) { | |
47ec3866 | 2283 | u8 qpc_roce_mode = gid_type_to_qpc(gid_attr->gid_type); |
3b5daf28 MS |
2284 | |
2285 | if (qpc_roce_mode == MLX4_QPC_ROCE_MODE_UNDEFINED) { | |
2286 | err = -EINVAL; | |
2287 | goto out; | |
2288 | } | |
2289 | context->rlkey_roce_mode |= (qpc_roce_mode << 6); | |
2290 | } | |
2291 | ||
225c7b1f RD |
2292 | } |
2293 | ||
2294 | if (attr_mask & IB_QP_TIMEOUT) { | |
fa417f7b | 2295 | context->pri_path.ackto |= attr->timeout << 3; |
225c7b1f RD |
2296 | optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; |
2297 | } | |
2298 | ||
2299 | if (attr_mask & IB_QP_ALT_PATH) { | |
225c7b1f RD |
2300 | if (attr->alt_port_num == 0 || |
2301 | attr->alt_port_num > dev->dev->caps.num_ports) | |
f5b40431 | 2302 | goto out; |
225c7b1f | 2303 | |
5ae2a7a8 RD |
2304 | if (attr->alt_pkey_index >= |
2305 | dev->dev->caps.pkey_table_len[attr->alt_port_num]) | |
f5b40431 | 2306 | goto out; |
5ae2a7a8 | 2307 | |
2f5bb473 JM |
2308 | if (mlx4_set_alt_path(dev, attr, attr_mask, qp, |
2309 | &context->alt_path, | |
297e0dad | 2310 | attr->alt_port_num)) |
f5b40431 | 2311 | goto out; |
225c7b1f RD |
2312 | |
2313 | context->alt_path.pkey_index = attr->alt_pkey_index; | |
2314 | context->alt_path.ackto = attr->alt_timeout << 3; | |
2315 | optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH; | |
2316 | } | |
2317 | ||
3078f5f1 GL |
2318 | context->pd = cpu_to_be32(pd->pdn); |
2319 | ||
2320 | if (!rwq_ind_tbl) { | |
108809a0 | 2321 | context->params1 = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28); |
3078f5f1 GL |
2322 | get_cqs(qp, src_type, &send_cq, &recv_cq); |
2323 | } else { /* Set dummy CQs to be compatible with HV and PRM */ | |
2324 | send_cq = to_mcq(rwq_ind_tbl->ind_tbl[0]->cq); | |
2325 | recv_cq = send_cq; | |
2326 | } | |
0a1405da SH |
2327 | context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); |
2328 | context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn); | |
57f01b53 | 2329 | |
95d04f07 | 2330 | /* Set "fast registration enabled" for all kernel QPs */ |
89944450 | 2331 | if (!ucontext) |
95d04f07 RD |
2332 | context->params1 |= cpu_to_be32(1 << 11); |
2333 | ||
57f01b53 JM |
2334 | if (attr_mask & IB_QP_RNR_RETRY) { |
2335 | context->params1 |= cpu_to_be32(attr->rnr_retry << 13); | |
2336 | optpar |= MLX4_QP_OPTPAR_RNR_RETRY; | |
2337 | } | |
2338 | ||
225c7b1f RD |
2339 | if (attr_mask & IB_QP_RETRY_CNT) { |
2340 | context->params1 |= cpu_to_be32(attr->retry_cnt << 16); | |
2341 | optpar |= MLX4_QP_OPTPAR_RETRY_COUNT; | |
2342 | } | |
2343 | ||
2344 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { | |
2345 | if (attr->max_rd_atomic) | |
2346 | context->params1 |= | |
2347 | cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); | |
2348 | optpar |= MLX4_QP_OPTPAR_SRA_MAX; | |
2349 | } | |
2350 | ||
2351 | if (attr_mask & IB_QP_SQ_PSN) | |
2352 | context->next_send_psn = cpu_to_be32(attr->sq_psn); | |
2353 | ||
225c7b1f RD |
2354 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
2355 | if (attr->max_dest_rd_atomic) | |
2356 | context->params2 |= | |
2357 | cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); | |
2358 | optpar |= MLX4_QP_OPTPAR_RRA_MAX; | |
2359 | } | |
2360 | ||
2361 | if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { | |
2362 | context->params2 |= to_mlx4_access_flags(qp, attr, attr_mask); | |
2363 | optpar |= MLX4_QP_OPTPAR_RWE | MLX4_QP_OPTPAR_RRE | MLX4_QP_OPTPAR_RAE; | |
2364 | } | |
2365 | ||
400b1ebc | 2366 | if (ibsrq) |
225c7b1f RD |
2367 | context->params2 |= cpu_to_be32(MLX4_QP_BIT_RIC); |
2368 | ||
2369 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { | |
2370 | context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); | |
2371 | optpar |= MLX4_QP_OPTPAR_RNR_TIMEOUT; | |
2372 | } | |
2373 | if (attr_mask & IB_QP_RQ_PSN) | |
2374 | context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | |
2375 | ||
1ffeb2eb | 2376 | /* proxy and tunnel qp qkeys will be changed in modify-qp wrappers */ |
225c7b1f | 2377 | if (attr_mask & IB_QP_QKEY) { |
1ffeb2eb JM |
2378 | if (qp->mlx4_ib_qp_type & |
2379 | (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) | |
2380 | context->qkey = cpu_to_be32(IB_QP_SET_QKEY); | |
2381 | else { | |
2382 | if (mlx4_is_mfunc(dev->dev) && | |
2383 | !(qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) && | |
2384 | (attr->qkey & MLX4_RESERVED_QKEY_MASK) == | |
2385 | MLX4_RESERVED_QKEY_BASE) { | |
2386 | pr_err("Cannot use reserved QKEY" | |
2387 | " 0x%x (range 0xffff0000..0xffffffff" | |
2388 | " is reserved)\n", attr->qkey); | |
2389 | err = -EINVAL; | |
2390 | goto out; | |
2391 | } | |
2392 | context->qkey = cpu_to_be32(attr->qkey); | |
2393 | } | |
225c7b1f RD |
2394 | optpar |= MLX4_QP_OPTPAR_Q_KEY; |
2395 | } | |
2396 | ||
400b1ebc GL |
2397 | if (ibsrq) |
2398 | context->srqn = cpu_to_be32(1 << 24 | | |
2399 | to_msrq(ibsrq)->msrq.srqn); | |
225c7b1f | 2400 | |
400b1ebc GL |
2401 | if (qp->rq.wqe_cnt && |
2402 | cur_state == IB_QPS_RESET && | |
2403 | new_state == IB_QPS_INIT) | |
225c7b1f RD |
2404 | context->db_rec_addr = cpu_to_be64(qp->db.dma); |
2405 | ||
2406 | if (cur_state == IB_QPS_INIT && | |
2407 | new_state == IB_QPS_RTR && | |
400b1ebc GL |
2408 | (qp_type == IB_QPT_GSI || qp_type == IB_QPT_SMI || |
2409 | qp_type == IB_QPT_UD || qp_type == IB_QPT_RAW_PACKET)) { | |
225c7b1f | 2410 | context->pri_path.sched_queue = (qp->port - 1) << 6; |
1ffeb2eb JM |
2411 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || |
2412 | qp->mlx4_ib_qp_type & | |
2413 | (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER)) { | |
225c7b1f | 2414 | context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE; |
1ffeb2eb JM |
2415 | if (qp->mlx4_ib_qp_type != MLX4_IB_QPT_SMI) |
2416 | context->pri_path.fl = 0x80; | |
2417 | } else { | |
2418 | if (qp->mlx4_ib_qp_type & MLX4_IB_QPT_ANY_SRIOV) | |
2419 | context->pri_path.fl = 0x80; | |
225c7b1f | 2420 | context->pri_path.sched_queue |= MLX4_IB_DEFAULT_SCHED_QUEUE; |
1ffeb2eb | 2421 | } |
2f5bb473 JM |
2422 | if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == |
2423 | IB_LINK_LAYER_ETHERNET) { | |
2424 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI || | |
2425 | qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) | |
2426 | context->pri_path.feup = 1 << 7; /* don't fsm */ | |
2427 | /* handle smac_index */ | |
2428 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_UD || | |
2429 | qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || | |
2430 | qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { | |
dbf727de | 2431 | err = handle_eth_ud_smac_index(dev, qp, context); |
bede98e7 MD |
2432 | if (err) { |
2433 | err = -EINVAL; | |
2434 | goto out; | |
2435 | } | |
9433c188 MB |
2436 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) |
2437 | dev->qp1_proxy[qp->port - 1] = qp; | |
2f5bb473 JM |
2438 | } |
2439 | } | |
225c7b1f RD |
2440 | } |
2441 | ||
400b1ebc | 2442 | if (qp_type == IB_QPT_RAW_PACKET) { |
3528f696 EC |
2443 | context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | |
2444 | MLX4_IB_LINK_TYPE_ETH; | |
d2fce8a9 OG |
2445 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { |
2446 | /* set QP to receive both tunneled & non-tunneled packets */ | |
108809a0 | 2447 | if (!rwq_ind_tbl) |
d2fce8a9 OG |
2448 | context->srqn = cpu_to_be32(7 << 28); |
2449 | } | |
2450 | } | |
3528f696 | 2451 | |
400b1ebc | 2452 | if (qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { |
297e0dad MS |
2453 | int is_eth = rdma_port_get_link_layer( |
2454 | &dev->ib_dev, qp->port) == | |
2455 | IB_LINK_LAYER_ETHERNET; | |
2456 | if (is_eth) { | |
2457 | context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH; | |
2458 | optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH; | |
2459 | } | |
2460 | } | |
2461 | ||
225c7b1f RD |
2462 | if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && |
2463 | attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) | |
2464 | sqd_event = 1; | |
2465 | else | |
2466 | sqd_event = 0; | |
2467 | ||
89944450 | 2468 | if (!ucontext && |
400b1ebc GL |
2469 | cur_state == IB_QPS_RESET && |
2470 | new_state == IB_QPS_INIT) | |
3b5daf28 | 2471 | context->rlkey_roce_mode |= (1 << 4); |
d57f5f72 | 2472 | |
c0be5fb5 EC |
2473 | /* |
2474 | * Before passing a kernel QP to the HW, make sure that the | |
0e6e7416 RD |
2475 | * ownership bits of the send queue are set and the SQ |
2476 | * headroom is stamped so that the hardware doesn't start | |
2477 | * processing stale work requests. | |
c0be5fb5 | 2478 | */ |
89944450 | 2479 | if (!ucontext && |
400b1ebc GL |
2480 | cur_state == IB_QPS_RESET && |
2481 | new_state == IB_QPS_INIT) { | |
c0be5fb5 EC |
2482 | struct mlx4_wqe_ctrl_seg *ctrl; |
2483 | int i; | |
2484 | ||
0e6e7416 | 2485 | for (i = 0; i < qp->sq.wqe_cnt; ++i) { |
c0be5fb5 EC |
2486 | ctrl = get_send_wqe(qp, i); |
2487 | ctrl->owner_opcode = cpu_to_be32(1 << 31); | |
f95ccffc JM |
2488 | ctrl->qpn_vlan.fence_size = |
2489 | 1 << (qp->sq.wqe_shift - 4); | |
2490 | stamp_send_wqe(qp, i); | |
c0be5fb5 EC |
2491 | } |
2492 | } | |
2493 | ||
108809a0 GL |
2494 | if (rwq_ind_tbl && |
2495 | cur_state == IB_QPS_RESET && | |
2496 | new_state == IB_QPS_INIT) { | |
2497 | fill_qp_rss_context(context, qp); | |
2498 | context->flags |= cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET); | |
2499 | } | |
2500 | ||
225c7b1f RD |
2501 | err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), |
2502 | to_mlx4_state(new_state), context, optpar, | |
2503 | sqd_event, &qp->mqp); | |
2504 | if (err) | |
2505 | goto out; | |
2506 | ||
2507 | qp->state = new_state; | |
2508 | ||
2509 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
2510 | qp->atomic_rd_en = attr->qp_access_flags; | |
2511 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | |
2512 | qp->resp_depth = attr->max_dest_rd_atomic; | |
fa417f7b | 2513 | if (attr_mask & IB_QP_PORT) { |
225c7b1f | 2514 | qp->port = attr->port_num; |
fa417f7b EC |
2515 | update_mcg_macs(dev, qp); |
2516 | } | |
225c7b1f RD |
2517 | if (attr_mask & IB_QP_ALT_PATH) |
2518 | qp->alt_port = attr->alt_port_num; | |
2519 | ||
2520 | if (is_sqp(dev, qp)) | |
915ec7ed | 2521 | store_sqp_attrs(qp->sqp, attr, attr_mask); |
225c7b1f RD |
2522 | |
2523 | /* | |
2524 | * If we moved QP0 to RTR, bring the IB link up; if we moved | |
2525 | * QP0 to RESET or ERROR, bring the link back down. | |
2526 | */ | |
2527 | if (is_qp0(dev, qp)) { | |
2528 | if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) | |
5ae2a7a8 | 2529 | if (mlx4_INIT_PORT(dev->dev, qp->port)) |
987c8f8f | 2530 | pr_warn("INIT_PORT failed for port %d\n", |
5ae2a7a8 | 2531 | qp->port); |
225c7b1f RD |
2532 | |
2533 | if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && | |
2534 | (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR)) | |
2535 | mlx4_CLOSE_PORT(dev->dev, qp->port); | |
2536 | } | |
2537 | ||
2538 | /* | |
2539 | * If we moved a kernel QP to RESET, clean up all old CQ | |
2540 | * entries and reinitialize the QP. | |
2541 | */ | |
2f5bb473 | 2542 | if (new_state == IB_QPS_RESET) { |
89944450 | 2543 | if (!ucontext) { |
2f5bb473 | 2544 | mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, |
400b1ebc | 2545 | ibsrq ? to_msrq(ibsrq) : NULL); |
2f5bb473 JM |
2546 | if (send_cq != recv_cq) |
2547 | mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); | |
2548 | ||
2549 | qp->rq.head = 0; | |
2550 | qp->rq.tail = 0; | |
2551 | qp->sq.head = 0; | |
2552 | qp->sq.tail = 0; | |
2553 | qp->sq_next_wqe = 0; | |
2554 | if (qp->rq.wqe_cnt) | |
2555 | *qp->db.db = 0; | |
225c7b1f | 2556 | |
2f5bb473 JM |
2557 | if (qp->flags & MLX4_IB_QP_NETIF) |
2558 | mlx4_ib_steer_qp_reg(dev, qp, 0); | |
2559 | } | |
25476b02 | 2560 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) { |
2f5bb473 JM |
2561 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
2562 | qp->pri.smac = 0; | |
25476b02 | 2563 | qp->pri.smac_port = 0; |
2f5bb473 JM |
2564 | } |
2565 | if (qp->alt.smac) { | |
2566 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | |
2567 | qp->alt.smac = 0; | |
2568 | } | |
2569 | if (qp->pri.vid < 0x1000) { | |
2570 | mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, qp->pri.vid); | |
2571 | qp->pri.vid = 0xFFFF; | |
2572 | qp->pri.candidate_vid = 0xFFFF; | |
2573 | qp->pri.update_vid = 0; | |
2574 | } | |
c1c98501 | 2575 | |
2f5bb473 JM |
2576 | if (qp->alt.vid < 0x1000) { |
2577 | mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, qp->alt.vid); | |
2578 | qp->alt.vid = 0xFFFF; | |
2579 | qp->alt.candidate_vid = 0xFFFF; | |
2580 | qp->alt.update_vid = 0; | |
2581 | } | |
225c7b1f | 2582 | } |
225c7b1f | 2583 | out: |
7b59f0f9 EBE |
2584 | if (err && qp->counter_index) |
2585 | mlx4_ib_free_qp_counter(dev, qp); | |
c1c98501 MB |
2586 | if (err && steer_qp) |
2587 | mlx4_ib_steer_qp_reg(dev, qp, 0); | |
225c7b1f | 2588 | kfree(context); |
25476b02 JM |
2589 | if (qp->pri.candidate_smac || |
2590 | (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) { | |
2f5bb473 JM |
2591 | if (err) { |
2592 | mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac); | |
2593 | } else { | |
25476b02 | 2594 | if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) |
2f5bb473 JM |
2595 | mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac); |
2596 | qp->pri.smac = qp->pri.candidate_smac; | |
2597 | qp->pri.smac_index = qp->pri.candidate_smac_index; | |
2598 | qp->pri.smac_port = qp->pri.candidate_smac_port; | |
2599 | } | |
2600 | qp->pri.candidate_smac = 0; | |
2601 | qp->pri.candidate_smac_index = 0; | |
2602 | qp->pri.candidate_smac_port = 0; | |
2603 | } | |
2604 | if (qp->alt.candidate_smac) { | |
2605 | if (err) { | |
2606 | mlx4_unregister_mac(dev->dev, qp->alt.candidate_smac_port, qp->alt.candidate_smac); | |
2607 | } else { | |
2608 | if (qp->alt.smac) | |
2609 | mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac); | |
2610 | qp->alt.smac = qp->alt.candidate_smac; | |
2611 | qp->alt.smac_index = qp->alt.candidate_smac_index; | |
2612 | qp->alt.smac_port = qp->alt.candidate_smac_port; | |
2613 | } | |
2614 | qp->alt.candidate_smac = 0; | |
2615 | qp->alt.candidate_smac_index = 0; | |
2616 | qp->alt.candidate_smac_port = 0; | |
2617 | } | |
2618 | ||
2619 | if (qp->pri.update_vid) { | |
2620 | if (err) { | |
2621 | if (qp->pri.candidate_vid < 0x1000) | |
2622 | mlx4_unregister_vlan(dev->dev, qp->pri.candidate_vlan_port, | |
2623 | qp->pri.candidate_vid); | |
2624 | } else { | |
2625 | if (qp->pri.vid < 0x1000) | |
2626 | mlx4_unregister_vlan(dev->dev, qp->pri.vlan_port, | |
2627 | qp->pri.vid); | |
2628 | qp->pri.vid = qp->pri.candidate_vid; | |
2629 | qp->pri.vlan_port = qp->pri.candidate_vlan_port; | |
2630 | qp->pri.vlan_index = qp->pri.candidate_vlan_index; | |
2631 | } | |
2632 | qp->pri.candidate_vid = 0xFFFF; | |
2633 | qp->pri.update_vid = 0; | |
2634 | } | |
2635 | ||
2636 | if (qp->alt.update_vid) { | |
2637 | if (err) { | |
2638 | if (qp->alt.candidate_vid < 0x1000) | |
2639 | mlx4_unregister_vlan(dev->dev, qp->alt.candidate_vlan_port, | |
2640 | qp->alt.candidate_vid); | |
2641 | } else { | |
2642 | if (qp->alt.vid < 0x1000) | |
2643 | mlx4_unregister_vlan(dev->dev, qp->alt.vlan_port, | |
2644 | qp->alt.vid); | |
2645 | qp->alt.vid = qp->alt.candidate_vid; | |
2646 | qp->alt.vlan_port = qp->alt.candidate_vlan_port; | |
2647 | qp->alt.vlan_index = qp->alt.candidate_vlan_index; | |
2648 | } | |
2649 | qp->alt.candidate_vid = 0xFFFF; | |
2650 | qp->alt.update_vid = 0; | |
2651 | } | |
2652 | ||
225c7b1f RD |
2653 | return err; |
2654 | } | |
2655 | ||
3078f5f1 GL |
2656 | enum { |
2657 | MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK = (IB_QP_STATE | | |
2658 | IB_QP_PORT), | |
2659 | }; | |
2660 | ||
e1b866c6 MS |
2661 | static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
2662 | int attr_mask, struct ib_udata *udata) | |
65adfa91 MT |
2663 | { |
2664 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); | |
2665 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | |
2666 | enum ib_qp_state cur_state, new_state; | |
2667 | int err = -EINVAL; | |
65adfa91 MT |
2668 | mutex_lock(&qp->mutex); |
2669 | ||
2670 | cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; | |
2671 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | |
2672 | ||
dd5f03be | 2673 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, |
d31131bb | 2674 | attr_mask)) { |
b1d8eb5a JM |
2675 | pr_debug("qpn 0x%x: invalid attribute mask specified " |
2676 | "for transition %d to %d. qp_type %d," | |
2677 | " attr_mask 0x%x\n", | |
2678 | ibqp->qp_num, cur_state, new_state, | |
2679 | ibqp->qp_type, attr_mask); | |
65adfa91 | 2680 | goto out; |
b1d8eb5a | 2681 | } |
65adfa91 | 2682 | |
3078f5f1 GL |
2683 | if (ibqp->rwq_ind_tbl) { |
2684 | if (!(((cur_state == IB_QPS_RESET) && | |
2685 | (new_state == IB_QPS_INIT)) || | |
2686 | ((cur_state == IB_QPS_INIT) && | |
2687 | (new_state == IB_QPS_RTR)))) { | |
2688 | pr_debug("qpn 0x%x: RSS QP unsupported transition %d to %d\n", | |
2689 | ibqp->qp_num, cur_state, new_state); | |
2690 | ||
2691 | err = -EOPNOTSUPP; | |
2692 | goto out; | |
2693 | } | |
2694 | ||
2695 | if (attr_mask & ~MLX4_IB_MODIFY_QP_RSS_SUP_ATTR_MSK) { | |
2696 | pr_debug("qpn 0x%x: RSS QP unsupported attribute mask 0x%x for transition %d to %d\n", | |
2697 | ibqp->qp_num, attr_mask, cur_state, new_state); | |
2698 | ||
2699 | err = -EOPNOTSUPP; | |
2700 | goto out; | |
2701 | } | |
2702 | } | |
2703 | ||
c6215745 MS |
2704 | if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) { |
2705 | if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) { | |
2706 | if ((ibqp->qp_type == IB_QPT_RC) || | |
2707 | (ibqp->qp_type == IB_QPT_UD) || | |
2708 | (ibqp->qp_type == IB_QPT_UC) || | |
2709 | (ibqp->qp_type == IB_QPT_RAW_PACKET) || | |
2710 | (ibqp->qp_type == IB_QPT_XRC_INI)) { | |
2711 | attr->port_num = mlx4_ib_bond_next_port(dev); | |
2712 | } | |
2713 | } else { | |
2714 | /* no sense in changing port_num | |
2715 | * when ports are bonded */ | |
2716 | attr_mask &= ~IB_QP_PORT; | |
2717 | } | |
2718 | } | |
2719 | ||
65adfa91 | 2720 | if ((attr_mask & IB_QP_PORT) && |
1ffeb2eb | 2721 | (attr->port_num == 0 || attr->port_num > dev->num_ports)) { |
b1d8eb5a JM |
2722 | pr_debug("qpn 0x%x: invalid port number (%d) specified " |
2723 | "for transition %d to %d. qp_type %d\n", | |
2724 | ibqp->qp_num, attr->port_num, cur_state, | |
2725 | new_state, ibqp->qp_type); | |
65adfa91 MT |
2726 | goto out; |
2727 | } | |
2728 | ||
3987a2d3 OG |
2729 | if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) && |
2730 | (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != | |
2731 | IB_LINK_LAYER_ETHERNET)) | |
2732 | goto out; | |
2733 | ||
5ae2a7a8 RD |
2734 | if (attr_mask & IB_QP_PKEY_INDEX) { |
2735 | int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; | |
b1d8eb5a JM |
2736 | if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) { |
2737 | pr_debug("qpn 0x%x: invalid pkey index (%d) specified " | |
2738 | "for transition %d to %d. qp_type %d\n", | |
2739 | ibqp->qp_num, attr->pkey_index, cur_state, | |
2740 | new_state, ibqp->qp_type); | |
5ae2a7a8 | 2741 | goto out; |
b1d8eb5a | 2742 | } |
5ae2a7a8 RD |
2743 | } |
2744 | ||
65adfa91 MT |
2745 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
2746 | attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { | |
b1d8eb5a JM |
2747 | pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. " |
2748 | "Transition %d to %d. qp_type %d\n", | |
2749 | ibqp->qp_num, attr->max_rd_atomic, cur_state, | |
2750 | new_state, ibqp->qp_type); | |
65adfa91 MT |
2751 | goto out; |
2752 | } | |
2753 | ||
2754 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | |
2755 | attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { | |
b1d8eb5a JM |
2756 | pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. " |
2757 | "Transition %d to %d. qp_type %d\n", | |
2758 | ibqp->qp_num, attr->max_dest_rd_atomic, cur_state, | |
2759 | new_state, ibqp->qp_type); | |
65adfa91 MT |
2760 | goto out; |
2761 | } | |
2762 | ||
2763 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { | |
2764 | err = 0; | |
2765 | goto out; | |
2766 | } | |
2767 | ||
3078f5f1 | 2768 | if (ibqp->rwq_ind_tbl && (new_state == IB_QPS_INIT)) { |
89944450 SR |
2769 | err = bringup_rss_rwqs(ibqp->rwq_ind_tbl, attr->port_num, |
2770 | udata); | |
3078f5f1 GL |
2771 | if (err) |
2772 | goto out; | |
2773 | } | |
2774 | ||
400b1ebc | 2775 | err = __mlx4_ib_modify_qp(ibqp, MLX4_IB_QP_SRC, attr, attr_mask, |
89944450 | 2776 | cur_state, new_state, udata); |
65adfa91 | 2777 | |
3078f5f1 | 2778 | if (ibqp->rwq_ind_tbl && err) |
89944450 | 2779 | bring_down_rss_rwqs(ibqp->rwq_ind_tbl, udata); |
3078f5f1 | 2780 | |
c6215745 MS |
2781 | if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) |
2782 | attr->port_num = 1; | |
2783 | ||
65adfa91 MT |
2784 | out: |
2785 | mutex_unlock(&qp->mutex); | |
2786 | return err; | |
2787 | } | |
2788 | ||
e1b866c6 MS |
2789 | int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
2790 | int attr_mask, struct ib_udata *udata) | |
2791 | { | |
2792 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | |
2793 | int ret; | |
2794 | ||
26e990ba JG |
2795 | if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) |
2796 | return -EOPNOTSUPP; | |
2797 | ||
e1b866c6 MS |
2798 | ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata); |
2799 | ||
2800 | if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { | |
915ec7ed | 2801 | struct mlx4_ib_sqp *sqp = mqp->sqp; |
e1b866c6 MS |
2802 | int err = 0; |
2803 | ||
2804 | if (sqp->roce_v2_gsi) | |
2805 | err = ib_modify_qp(sqp->roce_v2_gsi, attr, attr_mask); | |
2806 | if (err) | |
2807 | pr_err("Failed to modify GSI QP for RoCEv2 (%d)\n", | |
2808 | err); | |
2809 | } | |
2810 | return ret; | |
2811 | } | |
2812 | ||
99ec41d0 JM |
2813 | static int vf_get_qp0_qkey(struct mlx4_dev *dev, int qpn, u32 *qkey) |
2814 | { | |
2815 | int i; | |
2816 | for (i = 0; i < dev->caps.num_ports; i++) { | |
c73c8b1e EBE |
2817 | if (qpn == dev->caps.spec_qps[i].qp0_proxy || |
2818 | qpn == dev->caps.spec_qps[i].qp0_tunnel) { | |
2819 | *qkey = dev->caps.spec_qps[i].qp0_qkey; | |
99ec41d0 JM |
2820 | return 0; |
2821 | } | |
2822 | } | |
2823 | return -EINVAL; | |
2824 | } | |
2825 | ||
915ec7ed | 2826 | static int build_sriov_qp0_header(struct mlx4_ib_qp *qp, |
f696bf6d | 2827 | const struct ib_ud_wr *wr, |
1ffeb2eb JM |
2828 | void *wqe, unsigned *mlx_seg_len) |
2829 | { | |
915ec7ed LR |
2830 | struct mlx4_ib_dev *mdev = to_mdev(qp->ibqp.device); |
2831 | struct mlx4_ib_sqp *sqp = qp->sqp; | |
2832 | struct ib_device *ib_dev = qp->ibqp.device; | |
1ffeb2eb JM |
2833 | struct mlx4_wqe_mlx_seg *mlx = wqe; |
2834 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; | |
e622f2f4 | 2835 | struct mlx4_ib_ah *ah = to_mah(wr->ah); |
1ffeb2eb JM |
2836 | u16 pkey; |
2837 | u32 qkey; | |
2838 | int send_size; | |
2839 | int header_size; | |
2840 | int spc; | |
6693ca95 | 2841 | int err; |
1ffeb2eb JM |
2842 | int i; |
2843 | ||
e622f2f4 | 2844 | if (wr->wr.opcode != IB_WR_SEND) |
1ffeb2eb JM |
2845 | return -EINVAL; |
2846 | ||
2847 | send_size = 0; | |
2848 | ||
e622f2f4 CH |
2849 | for (i = 0; i < wr->wr.num_sge; ++i) |
2850 | send_size += wr->wr.sg_list[i].length; | |
1ffeb2eb JM |
2851 | |
2852 | /* for proxy-qp0 sends, need to add in size of tunnel header */ | |
2853 | /* for tunnel-qp0 sends, tunnel header is already in s/g list */ | |
915ec7ed | 2854 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) |
1ffeb2eb JM |
2855 | send_size += sizeof (struct mlx4_ib_tunnel_header); |
2856 | ||
25f40220 | 2857 | ib_ud_header_init(send_size, 1, 0, 0, 0, 0, 0, 0, &sqp->ud_header); |
1ffeb2eb | 2858 | |
915ec7ed | 2859 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_SMI_OWNER) { |
1ffeb2eb JM |
2860 | sqp->ud_header.lrh.service_level = |
2861 | be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; | |
2862 | sqp->ud_header.lrh.destination_lid = | |
2863 | cpu_to_be16(ah->av.ib.g_slid & 0x7f); | |
2864 | sqp->ud_header.lrh.source_lid = | |
2865 | cpu_to_be16(ah->av.ib.g_slid & 0x7f); | |
2866 | } | |
2867 | ||
2868 | mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | |
2869 | ||
2870 | /* force loopback */ | |
2871 | mlx->flags |= cpu_to_be32(MLX4_WQE_MLX_VL15 | 0x1 | MLX4_WQE_MLX_SLR); | |
2872 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | |
2873 | ||
2874 | sqp->ud_header.lrh.virtual_lane = 0; | |
e622f2f4 | 2875 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
915ec7ed | 2876 | err = ib_get_cached_pkey(ib_dev, qp->port, 0, &pkey); |
6693ca95 JM |
2877 | if (err) |
2878 | return err; | |
1ffeb2eb | 2879 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
915ec7ed | 2880 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER) |
e622f2f4 | 2881 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
1ffeb2eb JM |
2882 | else |
2883 | sqp->ud_header.bth.destination_qpn = | |
915ec7ed | 2884 | cpu_to_be32(mdev->dev->caps.spec_qps[qp->port - 1].qp0_tunnel); |
1ffeb2eb JM |
2885 | |
2886 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | |
99ec41d0 | 2887 | if (mlx4_is_master(mdev->dev)) { |
915ec7ed | 2888 | if (mlx4_get_parav_qkey(mdev->dev, qp->mqp.qpn, &qkey)) |
99ec41d0 JM |
2889 | return -EINVAL; |
2890 | } else { | |
915ec7ed | 2891 | if (vf_get_qp0_qkey(mdev->dev, qp->mqp.qpn, &qkey)) |
99ec41d0 JM |
2892 | return -EINVAL; |
2893 | } | |
1ffeb2eb | 2894 | sqp->ud_header.deth.qkey = cpu_to_be32(qkey); |
915ec7ed | 2895 | sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->mqp.qpn); |
1ffeb2eb JM |
2896 | |
2897 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | |
2898 | sqp->ud_header.immediate_present = 0; | |
2899 | ||
2900 | header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); | |
2901 | ||
2902 | /* | |
2903 | * Inline data segments may not cross a 64 byte boundary. If | |
2904 | * our UD header is bigger than the space available up to the | |
2905 | * next 64 byte boundary in the WQE, use two inline data | |
2906 | * segments to hold the UD header. | |
2907 | */ | |
2908 | spc = MLX4_INLINE_ALIGN - | |
2909 | ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); | |
2910 | if (header_size <= spc) { | |
2911 | inl->byte_count = cpu_to_be32(1 << 31 | header_size); | |
2912 | memcpy(inl + 1, sqp->header_buf, header_size); | |
2913 | i = 1; | |
2914 | } else { | |
2915 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | |
2916 | memcpy(inl + 1, sqp->header_buf, spc); | |
2917 | ||
2918 | inl = (void *) (inl + 1) + spc; | |
2919 | memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); | |
2920 | /* | |
2921 | * Need a barrier here to make sure all the data is | |
2922 | * visible before the byte_count field is set. | |
2923 | * Otherwise the HCA prefetcher could grab the 64-byte | |
2924 | * chunk with this inline segment and get a valid (!= | |
2925 | * 0xffffffff) byte count but stale data, and end up | |
2926 | * generating a packet with bad headers. | |
2927 | * | |
2928 | * The first inline segment's byte_count field doesn't | |
2929 | * need a barrier, because it comes after a | |
2930 | * control/MLX segment and therefore is at an offset | |
2931 | * of 16 mod 64. | |
2932 | */ | |
2933 | wmb(); | |
2934 | inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); | |
2935 | i = 2; | |
2936 | } | |
2937 | ||
2938 | *mlx_seg_len = | |
2939 | ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); | |
2940 | return 0; | |
2941 | } | |
2942 | ||
fd10ed8e JM |
2943 | static u8 sl_to_vl(struct mlx4_ib_dev *dev, u8 sl, int port_num) |
2944 | { | |
2945 | union sl2vl_tbl_to_u64 tmp_vltab; | |
2946 | u8 vl; | |
2947 | ||
2948 | if (sl > 15) | |
2949 | return 0xf; | |
2950 | tmp_vltab.sl64 = atomic64_read(&dev->sl2vl[port_num - 1]); | |
2951 | vl = tmp_vltab.sl8[sl >> 1]; | |
2952 | if (sl & 1) | |
2953 | vl &= 0x0f; | |
2954 | else | |
2955 | vl >>= 4; | |
2956 | return vl; | |
2957 | } | |
2958 | ||
a748d60d TB |
2959 | static int fill_gid_by_hw_index(struct mlx4_ib_dev *ibdev, u8 port_num, |
2960 | int index, union ib_gid *gid, | |
2961 | enum ib_gid_type *gid_type) | |
2962 | { | |
2963 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; | |
2964 | struct mlx4_port_gid_table *port_gid_table; | |
2965 | unsigned long flags; | |
2966 | ||
2967 | port_gid_table = &iboe->gids[port_num - 1]; | |
2968 | spin_lock_irqsave(&iboe->lock, flags); | |
2969 | memcpy(gid, &port_gid_table->gids[index].gid, sizeof(*gid)); | |
2970 | *gid_type = port_gid_table->gids[index].gid_type; | |
2971 | spin_unlock_irqrestore(&iboe->lock, flags); | |
25e62655 | 2972 | if (rdma_is_zero_gid(gid)) |
a748d60d TB |
2973 | return -ENOENT; |
2974 | ||
2975 | return 0; | |
2976 | } | |
2977 | ||
3ef967a4 | 2978 | #define MLX4_ROCEV2_QP1_SPORT 0xC000 |
915ec7ed | 2979 | static int build_mlx_header(struct mlx4_ib_qp *qp, const struct ib_ud_wr *wr, |
f438000f | 2980 | void *wqe, unsigned *mlx_seg_len) |
225c7b1f | 2981 | { |
915ec7ed LR |
2982 | struct mlx4_ib_sqp *sqp = qp->sqp; |
2983 | struct ib_device *ib_dev = qp->ibqp.device; | |
a748d60d | 2984 | struct mlx4_ib_dev *ibdev = to_mdev(ib_dev); |
225c7b1f | 2985 | struct mlx4_wqe_mlx_seg *mlx = wqe; |
6ee51a4e | 2986 | struct mlx4_wqe_ctrl_seg *ctrl = wqe; |
225c7b1f | 2987 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; |
e622f2f4 | 2988 | struct mlx4_ib_ah *ah = to_mah(wr->ah); |
4c3eb3ca | 2989 | union ib_gid sgid; |
225c7b1f RD |
2990 | u16 pkey; |
2991 | int send_size; | |
2992 | int header_size; | |
e61ef241 | 2993 | int spc; |
225c7b1f | 2994 | int i; |
1ffeb2eb | 2995 | int err = 0; |
57d88cff | 2996 | u16 vlan = 0xffff; |
a29bec12 RD |
2997 | bool is_eth; |
2998 | bool is_vlan = false; | |
2999 | bool is_grh; | |
3ef967a4 MS |
3000 | bool is_udp = false; |
3001 | int ip_version = 0; | |
225c7b1f RD |
3002 | |
3003 | send_size = 0; | |
e622f2f4 CH |
3004 | for (i = 0; i < wr->wr.num_sge; ++i) |
3005 | send_size += wr->wr.sg_list[i].length; | |
225c7b1f | 3006 | |
915ec7ed | 3007 | is_eth = rdma_port_get_link_layer(qp->ibqp.device, qp->port) == IB_LINK_LAYER_ETHERNET; |
fa417f7b | 3008 | is_grh = mlx4_ib_ah_grh_present(ah); |
4c3eb3ca | 3009 | if (is_eth) { |
a748d60d | 3010 | enum ib_gid_type gid_type; |
1ffeb2eb JM |
3011 | if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { |
3012 | /* When multi-function is enabled, the ib_core gid | |
3013 | * indexes don't necessarily match the hw ones, so | |
3014 | * we must use our own cache */ | |
6ee51a4e JM |
3015 | err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev, |
3016 | be32_to_cpu(ah->av.ib.port_pd) >> 24, | |
3017 | ah->av.ib.gid_index, &sgid.raw[0]); | |
3018 | if (err) | |
3019 | return err; | |
1ffeb2eb | 3020 | } else { |
915ec7ed LR |
3021 | err = fill_gid_by_hw_index(ibdev, qp->port, |
3022 | ah->av.ib.gid_index, &sgid, | |
3023 | &gid_type); | |
3ef967a4 | 3024 | if (!err) { |
a748d60d | 3025 | is_udp = gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP; |
3ef967a4 MS |
3026 | if (is_udp) { |
3027 | if (ipv6_addr_v4mapped((struct in6_addr *)&sgid)) | |
3028 | ip_version = 4; | |
3029 | else | |
3030 | ip_version = 6; | |
3031 | is_grh = false; | |
3032 | } | |
3033 | } else { | |
1ffeb2eb | 3034 | return err; |
3ef967a4 | 3035 | } |
1ffeb2eb | 3036 | } |
0e9855db | 3037 | if (ah->av.eth.vlan != cpu_to_be16(0xffff)) { |
297e0dad | 3038 | vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; |
cf368beb | 3039 | is_vlan = true; |
297e0dad | 3040 | } |
4c3eb3ca | 3041 | } |
25f40220 | 3042 | err = ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, |
3ef967a4 | 3043 | ip_version, is_udp, 0, &sqp->ud_header); |
25f40220 MS |
3044 | if (err) |
3045 | return err; | |
fa417f7b EC |
3046 | |
3047 | if (!is_eth) { | |
3048 | sqp->ud_header.lrh.service_level = | |
3049 | be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; | |
3050 | sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; | |
3051 | sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); | |
3052 | } | |
225c7b1f | 3053 | |
3ef967a4 | 3054 | if (is_grh || (ip_version == 6)) { |
225c7b1f | 3055 | sqp->ud_header.grh.traffic_class = |
fa417f7b | 3056 | (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; |
225c7b1f | 3057 | sqp->ud_header.grh.flow_label = |
fa417f7b EC |
3058 | ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); |
3059 | sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; | |
baa0be70 | 3060 | if (is_eth) { |
6ee51a4e | 3061 | memcpy(sqp->ud_header.grh.source_gid.raw, sgid.raw, 16); |
baa0be70 JM |
3062 | } else { |
3063 | if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { | |
3064 | /* When multi-function is enabled, the ib_core gid | |
3065 | * indexes don't necessarily match the hw ones, so | |
3066 | * we must use our own cache | |
3067 | */ | |
915ec7ed LR |
3068 | sqp->ud_header.grh.source_gid.global |
3069 | .subnet_prefix = | |
3070 | cpu_to_be64(atomic64_read( | |
3071 | &(to_mdev(ib_dev) | |
3072 | ->sriov | |
3073 | .demux[qp->port - 1] | |
3074 | .subnet_prefix))); | |
3075 | sqp->ud_header.grh.source_gid.global | |
3076 | .interface_id = | |
3077 | to_mdev(ib_dev) | |
3078 | ->sriov.demux[qp->port - 1] | |
3079 | .guid_cache[ah->av.ib.gid_index]; | |
baa0be70 | 3080 | } else { |
89af969a PP |
3081 | sqp->ud_header.grh.source_gid = |
3082 | ah->ibah.sgid_attr->gid; | |
baa0be70 | 3083 | } |
6ee51a4e | 3084 | } |
225c7b1f | 3085 | memcpy(sqp->ud_header.grh.destination_gid.raw, |
fa417f7b | 3086 | ah->av.ib.dgid, 16); |
225c7b1f RD |
3087 | } |
3088 | ||
3ef967a4 MS |
3089 | if (ip_version == 4) { |
3090 | sqp->ud_header.ip4.tos = | |
3091 | (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; | |
3092 | sqp->ud_header.ip4.id = 0; | |
3093 | sqp->ud_header.ip4.frag_off = htons(IP_DF); | |
3094 | sqp->ud_header.ip4.ttl = ah->av.eth.hop_limit; | |
3095 | ||
3096 | memcpy(&sqp->ud_header.ip4.saddr, | |
3097 | sgid.raw + 12, 4); | |
3098 | memcpy(&sqp->ud_header.ip4.daddr, ah->av.ib.dgid + 12, 4); | |
3099 | sqp->ud_header.ip4.check = ib_ud_ip4_csum(&sqp->ud_header); | |
3100 | } | |
3101 | ||
3102 | if (is_udp) { | |
3103 | sqp->ud_header.udp.dport = htons(ROCE_V2_UDP_DPORT); | |
3104 | sqp->ud_header.udp.sport = htons(MLX4_ROCEV2_QP1_SPORT); | |
3105 | sqp->ud_header.udp.csum = 0; | |
3106 | } | |
3107 | ||
225c7b1f | 3108 | mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); |
fa417f7b EC |
3109 | |
3110 | if (!is_eth) { | |
915ec7ed LR |
3111 | mlx->flags |= |
3112 | cpu_to_be32((!qp->ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | | |
3113 | (sqp->ud_header.lrh.destination_lid == | |
3114 | IB_LID_PERMISSIVE ? | |
3115 | MLX4_WQE_MLX_SLR : | |
3116 | 0) | | |
3117 | (sqp->ud_header.lrh.service_level << 8)); | |
1ffeb2eb JM |
3118 | if (ah->av.ib.port_pd & cpu_to_be32(0x80000000)) |
3119 | mlx->flags |= cpu_to_be32(0x1); /* force loopback */ | |
fa417f7b EC |
3120 | mlx->rlid = sqp->ud_header.lrh.destination_lid; |
3121 | } | |
225c7b1f | 3122 | |
e622f2f4 | 3123 | switch (wr->wr.opcode) { |
225c7b1f RD |
3124 | case IB_WR_SEND: |
3125 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | |
3126 | sqp->ud_header.immediate_present = 0; | |
3127 | break; | |
3128 | case IB_WR_SEND_WITH_IMM: | |
3129 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | |
3130 | sqp->ud_header.immediate_present = 1; | |
e622f2f4 | 3131 | sqp->ud_header.immediate_data = wr->wr.ex.imm_data; |
225c7b1f RD |
3132 | break; |
3133 | default: | |
3134 | return -EINVAL; | |
3135 | } | |
3136 | ||
fa417f7b | 3137 | if (is_eth) { |
3ef967a4 | 3138 | u16 ether_type; |
c0c1d3d7 OD |
3139 | u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13; |
3140 | ||
69ae5439 | 3141 | ether_type = (!is_udp) ? ETH_P_IBOE: |
3ef967a4 MS |
3142 | (ip_version == 4 ? ETH_P_IP : ETH_P_IPV6); |
3143 | ||
c0c1d3d7 | 3144 | mlx->sched_prio = cpu_to_be16(pcp); |
fa417f7b | 3145 | |
1049f138 | 3146 | ether_addr_copy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac); |
fa417f7b | 3147 | memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); |
6ee51a4e JM |
3148 | memcpy(&ctrl->srcrb_flags16[0], ah->av.eth.mac, 2); |
3149 | memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4); | |
3e0629cb | 3150 | |
fa417f7b EC |
3151 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) |
3152 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | |
4c3eb3ca | 3153 | if (!is_vlan) { |
3ef967a4 | 3154 | sqp->ud_header.eth.type = cpu_to_be16(ether_type); |
4c3eb3ca | 3155 | } else { |
3ef967a4 | 3156 | sqp->ud_header.vlan.type = cpu_to_be16(ether_type); |
4c3eb3ca EC |
3157 | sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); |
3158 | } | |
fa417f7b | 3159 | } else { |
915ec7ed LR |
3160 | sqp->ud_header.lrh.virtual_lane = |
3161 | !qp->ibqp.qp_num ? | |
3162 | 15 : | |
3163 | sl_to_vl(to_mdev(ib_dev), | |
3164 | sqp->ud_header.lrh.service_level, | |
3165 | qp->port); | |
3166 | if (qp->ibqp.qp_num && sqp->ud_header.lrh.virtual_lane == 15) | |
fd10ed8e | 3167 | return -EINVAL; |
fa417f7b EC |
3168 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
3169 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | |
3170 | } | |
e622f2f4 | 3171 | sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED); |
915ec7ed LR |
3172 | if (!qp->ibqp.qp_num) |
3173 | err = ib_get_cached_pkey(ib_dev, qp->port, sqp->pkey_index, | |
6693ca95 | 3174 | &pkey); |
225c7b1f | 3175 | else |
915ec7ed | 3176 | err = ib_get_cached_pkey(ib_dev, qp->port, wr->pkey_index, |
6693ca95 JM |
3177 | &pkey); |
3178 | if (err) | |
3179 | return err; | |
3180 | ||
225c7b1f | 3181 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); |
e622f2f4 | 3182 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn); |
225c7b1f | 3183 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); |
e622f2f4 CH |
3184 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->remote_qkey & 0x80000000 ? |
3185 | sqp->qkey : wr->remote_qkey); | |
915ec7ed | 3186 | sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); |
225c7b1f RD |
3187 | |
3188 | header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); | |
3189 | ||
3190 | if (0) { | |
987c8f8f | 3191 | pr_err("built UD header of size %d:\n", header_size); |
225c7b1f RD |
3192 | for (i = 0; i < header_size / 4; ++i) { |
3193 | if (i % 8 == 0) | |
987c8f8f SP |
3194 | pr_err(" [%02x] ", i * 4); |
3195 | pr_cont(" %08x", | |
3196 | be32_to_cpu(((__be32 *) sqp->header_buf)[i])); | |
225c7b1f | 3197 | if ((i + 1) % 8 == 0) |
987c8f8f | 3198 | pr_cont("\n"); |
225c7b1f | 3199 | } |
987c8f8f | 3200 | pr_err("\n"); |
225c7b1f RD |
3201 | } |
3202 | ||
e61ef241 RD |
3203 | /* |
3204 | * Inline data segments may not cross a 64 byte boundary. If | |
3205 | * our UD header is bigger than the space available up to the | |
3206 | * next 64 byte boundary in the WQE, use two inline data | |
3207 | * segments to hold the UD header. | |
3208 | */ | |
3209 | spc = MLX4_INLINE_ALIGN - | |
3210 | ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); | |
3211 | if (header_size <= spc) { | |
3212 | inl->byte_count = cpu_to_be32(1 << 31 | header_size); | |
3213 | memcpy(inl + 1, sqp->header_buf, header_size); | |
3214 | i = 1; | |
3215 | } else { | |
3216 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | |
3217 | memcpy(inl + 1, sqp->header_buf, spc); | |
3218 | ||
3219 | inl = (void *) (inl + 1) + spc; | |
3220 | memcpy(inl + 1, sqp->header_buf + spc, header_size - spc); | |
3221 | /* | |
3222 | * Need a barrier here to make sure all the data is | |
3223 | * visible before the byte_count field is set. | |
3224 | * Otherwise the HCA prefetcher could grab the 64-byte | |
3225 | * chunk with this inline segment and get a valid (!= | |
3226 | * 0xffffffff) byte count but stale data, and end up | |
3227 | * generating a packet with bad headers. | |
3228 | * | |
3229 | * The first inline segment's byte_count field doesn't | |
3230 | * need a barrier, because it comes after a | |
3231 | * control/MLX segment and therefore is at an offset | |
3232 | * of 16 mod 64. | |
3233 | */ | |
3234 | wmb(); | |
3235 | inl->byte_count = cpu_to_be32(1 << 31 | (header_size - spc)); | |
3236 | i = 2; | |
3237 | } | |
225c7b1f | 3238 | |
f438000f RD |
3239 | *mlx_seg_len = |
3240 | ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + header_size, 16); | |
3241 | return 0; | |
225c7b1f RD |
3242 | } |
3243 | ||
3244 | static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) | |
3245 | { | |
3246 | unsigned cur; | |
3247 | struct mlx4_ib_cq *cq; | |
3248 | ||
3249 | cur = wq->head - wq->tail; | |
0e6e7416 | 3250 | if (likely(cur + nreq < wq->max_post)) |
225c7b1f RD |
3251 | return 0; |
3252 | ||
3253 | cq = to_mcq(ib_cq); | |
3254 | spin_lock(&cq->lock); | |
3255 | cur = wq->head - wq->tail; | |
3256 | spin_unlock(&cq->lock); | |
3257 | ||
0e6e7416 | 3258 | return cur + nreq >= wq->max_post; |
225c7b1f RD |
3259 | } |
3260 | ||
95d04f07 RD |
3261 | static __be32 convert_access(int acc) |
3262 | { | |
6ff63e19 SM |
3263 | return (acc & IB_ACCESS_REMOTE_ATOMIC ? |
3264 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC) : 0) | | |
3265 | (acc & IB_ACCESS_REMOTE_WRITE ? | |
3266 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) | | |
3267 | (acc & IB_ACCESS_REMOTE_READ ? | |
3268 | cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ) : 0) | | |
95d04f07 RD |
3269 | (acc & IB_ACCESS_LOCAL_WRITE ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE) : 0) | |
3270 | cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ); | |
3271 | } | |
3272 | ||
1b2cd0fc | 3273 | static void set_reg_seg(struct mlx4_wqe_fmr_seg *fseg, |
f696bf6d | 3274 | const struct ib_reg_wr *wr) |
1b2cd0fc SG |
3275 | { |
3276 | struct mlx4_ib_mr *mr = to_mmr(wr->mr); | |
3277 | ||
3278 | fseg->flags = convert_access(wr->access); | |
3279 | fseg->mem_key = cpu_to_be32(wr->key); | |
3280 | fseg->buf_list = cpu_to_be64(mr->page_map); | |
3281 | fseg->start_addr = cpu_to_be64(mr->ibmr.iova); | |
3282 | fseg->reg_len = cpu_to_be64(mr->ibmr.length); | |
3283 | fseg->offset = 0; /* XXX -- is this just for ZBVA? */ | |
3284 | fseg->page_size = cpu_to_be32(ilog2(mr->ibmr.page_size)); | |
3285 | fseg->reserved[0] = 0; | |
3286 | fseg->reserved[1] = 0; | |
3287 | } | |
3288 | ||
95d04f07 RD |
3289 | static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey) |
3290 | { | |
aee38fad SM |
3291 | memset(iseg, 0, sizeof(*iseg)); |
3292 | iseg->mem_key = cpu_to_be32(rkey); | |
95d04f07 RD |
3293 | } |
3294 | ||
0fbfa6a9 RD |
3295 | static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, |
3296 | u64 remote_addr, u32 rkey) | |
3297 | { | |
3298 | rseg->raddr = cpu_to_be64(remote_addr); | |
3299 | rseg->rkey = cpu_to_be32(rkey); | |
3300 | rseg->reserved = 0; | |
3301 | } | |
3302 | ||
e622f2f4 | 3303 | static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, |
f696bf6d | 3304 | const struct ib_atomic_wr *wr) |
0fbfa6a9 | 3305 | { |
e622f2f4 CH |
3306 | if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { |
3307 | aseg->swap_add = cpu_to_be64(wr->swap); | |
3308 | aseg->compare = cpu_to_be64(wr->compare_add); | |
3309 | } else if (wr->wr.opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { | |
3310 | aseg->swap_add = cpu_to_be64(wr->compare_add); | |
3311 | aseg->compare = cpu_to_be64(wr->compare_add_mask); | |
0fbfa6a9 | 3312 | } else { |
e622f2f4 | 3313 | aseg->swap_add = cpu_to_be64(wr->compare_add); |
0fbfa6a9 RD |
3314 | aseg->compare = 0; |
3315 | } | |
3316 | ||
3317 | } | |
3318 | ||
6fa8f719 | 3319 | static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, |
f696bf6d | 3320 | const struct ib_atomic_wr *wr) |
6fa8f719 | 3321 | { |
e622f2f4 CH |
3322 | aseg->swap_add = cpu_to_be64(wr->swap); |
3323 | aseg->swap_add_mask = cpu_to_be64(wr->swap_mask); | |
3324 | aseg->compare = cpu_to_be64(wr->compare_add); | |
3325 | aseg->compare_mask = cpu_to_be64(wr->compare_add_mask); | |
6fa8f719 VS |
3326 | } |
3327 | ||
0fbfa6a9 | 3328 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, |
f696bf6d | 3329 | const struct ib_ud_wr *wr) |
0fbfa6a9 | 3330 | { |
e622f2f4 CH |
3331 | memcpy(dseg->av, &to_mah(wr->ah)->av, sizeof (struct mlx4_av)); |
3332 | dseg->dqpn = cpu_to_be32(wr->remote_qpn); | |
3333 | dseg->qkey = cpu_to_be32(wr->remote_qkey); | |
3334 | dseg->vlan = to_mah(wr->ah)->av.eth.vlan; | |
3335 | memcpy(dseg->mac, to_mah(wr->ah)->av.eth.mac, 6); | |
0fbfa6a9 RD |
3336 | } |
3337 | ||
1ffeb2eb JM |
3338 | static void set_tunnel_datagram_seg(struct mlx4_ib_dev *dev, |
3339 | struct mlx4_wqe_datagram_seg *dseg, | |
f696bf6d | 3340 | const struct ib_ud_wr *wr, |
97982f5a | 3341 | enum mlx4_ib_qp_type qpt) |
1ffeb2eb | 3342 | { |
e622f2f4 | 3343 | union mlx4_ext_av *av = &to_mah(wr->ah)->av; |
1ffeb2eb JM |
3344 | struct mlx4_av sqp_av = {0}; |
3345 | int port = *((u8 *) &av->ib.port_pd) & 0x3; | |
3346 | ||
3347 | /* force loopback */ | |
3348 | sqp_av.port_pd = av->ib.port_pd | cpu_to_be32(0x80000000); | |
3349 | sqp_av.g_slid = av->ib.g_slid & 0x7f; /* no GRH */ | |
3350 | sqp_av.sl_tclass_flowlabel = av->ib.sl_tclass_flowlabel & | |
3351 | cpu_to_be32(0xf0000000); | |
3352 | ||
3353 | memcpy(dseg->av, &sqp_av, sizeof (struct mlx4_av)); | |
97982f5a | 3354 | if (qpt == MLX4_IB_QPT_PROXY_GSI) |
c73c8b1e | 3355 | dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp1_tunnel); |
97982f5a | 3356 | else |
c73c8b1e | 3357 | dseg->dqpn = cpu_to_be32(dev->dev->caps.spec_qps[port - 1].qp0_tunnel); |
47605df9 JM |
3358 | /* Use QKEY from the QP context, which is set by master */ |
3359 | dseg->qkey = cpu_to_be32(IB_QP_SET_QKEY); | |
1ffeb2eb JM |
3360 | } |
3361 | ||
f696bf6d BVA |
3362 | static void build_tunnel_header(const struct ib_ud_wr *wr, void *wqe, |
3363 | unsigned *mlx_seg_len) | |
1ffeb2eb JM |
3364 | { |
3365 | struct mlx4_wqe_inline_seg *inl = wqe; | |
3366 | struct mlx4_ib_tunnel_header hdr; | |
e622f2f4 | 3367 | struct mlx4_ib_ah *ah = to_mah(wr->ah); |
1ffeb2eb JM |
3368 | int spc; |
3369 | int i; | |
3370 | ||
3371 | memcpy(&hdr.av, &ah->av, sizeof hdr.av); | |
e622f2f4 CH |
3372 | hdr.remote_qpn = cpu_to_be32(wr->remote_qpn); |
3373 | hdr.pkey_index = cpu_to_be16(wr->pkey_index); | |
3374 | hdr.qkey = cpu_to_be32(wr->remote_qkey); | |
5ea8bbfc JM |
3375 | memcpy(hdr.mac, ah->av.eth.mac, 6); |
3376 | hdr.vlan = ah->av.eth.vlan; | |
1ffeb2eb JM |
3377 | |
3378 | spc = MLX4_INLINE_ALIGN - | |
3379 | ((unsigned long) (inl + 1) & (MLX4_INLINE_ALIGN - 1)); | |
3380 | if (sizeof (hdr) <= spc) { | |
3381 | memcpy(inl + 1, &hdr, sizeof (hdr)); | |
3382 | wmb(); | |
3383 | inl->byte_count = cpu_to_be32(1 << 31 | sizeof (hdr)); | |
3384 | i = 1; | |
3385 | } else { | |
3386 | memcpy(inl + 1, &hdr, spc); | |
3387 | wmb(); | |
3388 | inl->byte_count = cpu_to_be32(1 << 31 | spc); | |
3389 | ||
3390 | inl = (void *) (inl + 1) + spc; | |
3391 | memcpy(inl + 1, (void *) &hdr + spc, sizeof (hdr) - spc); | |
3392 | wmb(); | |
3393 | inl->byte_count = cpu_to_be32(1 << 31 | (sizeof (hdr) - spc)); | |
3394 | i = 2; | |
3395 | } | |
3396 | ||
3397 | *mlx_seg_len = | |
3398 | ALIGN(i * sizeof (struct mlx4_wqe_inline_seg) + sizeof (hdr), 16); | |
3399 | } | |
3400 | ||
6e694ea3 JM |
3401 | static void set_mlx_icrc_seg(void *dseg) |
3402 | { | |
3403 | u32 *t = dseg; | |
3404 | struct mlx4_wqe_inline_seg *iseg = dseg; | |
3405 | ||
3406 | t[1] = 0; | |
3407 | ||
3408 | /* | |
3409 | * Need a barrier here before writing the byte_count field to | |
3410 | * make sure that all the data is visible before the | |
3411 | * byte_count field is set. Otherwise, if the segment begins | |
3412 | * a new cacheline, the HCA prefetcher could grab the 64-byte | |
3413 | * chunk and get a valid (!= * 0xffffffff) byte count but | |
3414 | * stale data, and end up sending the wrong data. | |
3415 | */ | |
3416 | wmb(); | |
3417 | ||
3418 | iseg->byte_count = cpu_to_be32((1 << 31) | 4); | |
3419 | } | |
3420 | ||
3421 | static void set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) | |
d420d9e3 | 3422 | { |
d420d9e3 RD |
3423 | dseg->lkey = cpu_to_be32(sg->lkey); |
3424 | dseg->addr = cpu_to_be64(sg->addr); | |
6e694ea3 JM |
3425 | |
3426 | /* | |
3427 | * Need a barrier here before writing the byte_count field to | |
3428 | * make sure that all the data is visible before the | |
3429 | * byte_count field is set. Otherwise, if the segment begins | |
3430 | * a new cacheline, the HCA prefetcher could grab the 64-byte | |
3431 | * chunk and get a valid (!= * 0xffffffff) byte count but | |
3432 | * stale data, and end up sending the wrong data. | |
3433 | */ | |
3434 | wmb(); | |
3435 | ||
3436 | dseg->byte_count = cpu_to_be32(sg->length); | |
d420d9e3 RD |
3437 | } |
3438 | ||
2242fa4f RD |
3439 | static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) |
3440 | { | |
3441 | dseg->byte_count = cpu_to_be32(sg->length); | |
3442 | dseg->lkey = cpu_to_be32(sg->lkey); | |
3443 | dseg->addr = cpu_to_be64(sg->addr); | |
3444 | } | |
3445 | ||
f696bf6d BVA |
3446 | static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, |
3447 | const struct ib_ud_wr *wr, struct mlx4_ib_qp *qp, | |
3448 | unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) | |
b832be1e | 3449 | { |
e622f2f4 | 3450 | unsigned halign = ALIGN(sizeof *wqe + wr->hlen, 16); |
b832be1e | 3451 | |
417608c2 EC |
3452 | if (unlikely(halign > MLX4_IB_CACHE_LINE_SIZE)) |
3453 | *blh = cpu_to_be32(1 << 6); | |
b832be1e EC |
3454 | |
3455 | if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) && | |
e622f2f4 | 3456 | wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) |
b832be1e EC |
3457 | return -EINVAL; |
3458 | ||
e622f2f4 | 3459 | memcpy(wqe->header, wr->header, wr->hlen); |
b832be1e | 3460 | |
e622f2f4 | 3461 | *lso_hdr_sz = cpu_to_be32(wr->mss << 16 | wr->hlen); |
b832be1e EC |
3462 | *lso_seg_len = halign; |
3463 | return 0; | |
3464 | } | |
3465 | ||
f696bf6d | 3466 | static __be32 send_ieth(const struct ib_send_wr *wr) |
95d04f07 RD |
3467 | { |
3468 | switch (wr->opcode) { | |
3469 | case IB_WR_SEND_WITH_IMM: | |
3470 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
3471 | return wr->ex.imm_data; | |
3472 | ||
3473 | case IB_WR_SEND_WITH_INV: | |
3474 | return cpu_to_be32(wr->ex.invalidate_rkey); | |
3475 | ||
3476 | default: | |
3477 | return 0; | |
3478 | } | |
3479 | } | |
3480 | ||
1ffeb2eb JM |
3481 | static void add_zero_len_inline(void *wqe) |
3482 | { | |
3483 | struct mlx4_wqe_inline_seg *inl = wqe; | |
3484 | memset(wqe, 0, 16); | |
3485 | inl->byte_count = cpu_to_be32(1 << 31); | |
3486 | } | |
3487 | ||
d34ac5cd BVA |
3488 | static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
3489 | const struct ib_send_wr **bad_wr, bool drain) | |
225c7b1f RD |
3490 | { |
3491 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | |
3492 | void *wqe; | |
3493 | struct mlx4_wqe_ctrl_seg *ctrl; | |
6e694ea3 | 3494 | struct mlx4_wqe_data_seg *dseg; |
225c7b1f RD |
3495 | unsigned long flags; |
3496 | int nreq; | |
3497 | int err = 0; | |
ea54b10c | 3498 | unsigned ind; |
3f649ab7 KC |
3499 | int size; |
3500 | unsigned seglen; | |
0fd7e1d8 RD |
3501 | __be32 dummy; |
3502 | __be32 *lso_wqe; | |
3f649ab7 | 3503 | __be32 lso_hdr_sz; |
417608c2 | 3504 | __be32 blh; |
225c7b1f | 3505 | int i; |
35f05dab | 3506 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
225c7b1f | 3507 | |
e1b866c6 | 3508 | if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { |
915ec7ed | 3509 | struct mlx4_ib_sqp *sqp = qp->sqp; |
e1b866c6 MS |
3510 | |
3511 | if (sqp->roce_v2_gsi) { | |
3512 | struct mlx4_ib_ah *ah = to_mah(ud_wr(wr)->ah); | |
a748d60d | 3513 | enum ib_gid_type gid_type; |
e1b866c6 MS |
3514 | union ib_gid gid; |
3515 | ||
915ec7ed | 3516 | if (!fill_gid_by_hw_index(mdev, qp->port, |
a748d60d TB |
3517 | ah->av.ib.gid_index, |
3518 | &gid, &gid_type)) | |
3519 | qp = (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? | |
3520 | to_mqp(sqp->roce_v2_gsi) : qp; | |
3521 | else | |
e1b866c6 MS |
3522 | pr_err("Failed to get gid at index %d. RoCEv2 will not work properly\n", |
3523 | ah->av.ib.gid_index); | |
e1b866c6 MS |
3524 | } |
3525 | } | |
3526 | ||
96db0e03 | 3527 | spin_lock_irqsave(&qp->sq.lock, flags); |
1975acd9 YH |
3528 | if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR && |
3529 | !drain) { | |
35f05dab YH |
3530 | err = -EIO; |
3531 | *bad_wr = wr; | |
3532 | nreq = 0; | |
3533 | goto out; | |
3534 | } | |
225c7b1f | 3535 | |
ea54b10c | 3536 | ind = qp->sq_next_wqe; |
225c7b1f RD |
3537 | |
3538 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
0fd7e1d8 | 3539 | lso_wqe = &dummy; |
417608c2 | 3540 | blh = 0; |
0fd7e1d8 | 3541 | |
225c7b1f RD |
3542 | if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { |
3543 | err = -ENOMEM; | |
3544 | *bad_wr = wr; | |
3545 | goto out; | |
3546 | } | |
3547 | ||
3548 | if (unlikely(wr->num_sge > qp->sq.max_gs)) { | |
3549 | err = -EINVAL; | |
3550 | *bad_wr = wr; | |
3551 | goto out; | |
3552 | } | |
3553 | ||
0e6e7416 | 3554 | ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); |
ea54b10c | 3555 | qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; |
225c7b1f RD |
3556 | |
3557 | ctrl->srcrb_flags = | |
3558 | (wr->send_flags & IB_SEND_SIGNALED ? | |
3559 | cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) | | |
3560 | (wr->send_flags & IB_SEND_SOLICITED ? | |
3561 | cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) | | |
8ff095ec EC |
3562 | ((wr->send_flags & IB_SEND_IP_CSUM) ? |
3563 | cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | | |
3564 | MLX4_WQE_CTRL_TCP_UDP_CSUM) : 0) | | |
225c7b1f RD |
3565 | qp->sq_signal_bits; |
3566 | ||
95d04f07 | 3567 | ctrl->imm = send_ieth(wr); |
225c7b1f RD |
3568 | |
3569 | wqe += sizeof *ctrl; | |
3570 | size = sizeof *ctrl / 16; | |
3571 | ||
1ffeb2eb JM |
3572 | switch (qp->mlx4_ib_qp_type) { |
3573 | case MLX4_IB_QPT_RC: | |
3574 | case MLX4_IB_QPT_UC: | |
225c7b1f RD |
3575 | switch (wr->opcode) { |
3576 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
3577 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
6fa8f719 | 3578 | case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: |
e622f2f4 CH |
3579 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
3580 | atomic_wr(wr)->rkey); | |
225c7b1f RD |
3581 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
3582 | ||
e622f2f4 | 3583 | set_atomic_seg(wqe, atomic_wr(wr)); |
225c7b1f | 3584 | wqe += sizeof (struct mlx4_wqe_atomic_seg); |
0fbfa6a9 | 3585 | |
225c7b1f RD |
3586 | size += (sizeof (struct mlx4_wqe_raddr_seg) + |
3587 | sizeof (struct mlx4_wqe_atomic_seg)) / 16; | |
6fa8f719 VS |
3588 | |
3589 | break; | |
3590 | ||
3591 | case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: | |
e622f2f4 CH |
3592 | set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, |
3593 | atomic_wr(wr)->rkey); | |
6fa8f719 VS |
3594 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
3595 | ||
e622f2f4 | 3596 | set_masked_atomic_seg(wqe, atomic_wr(wr)); |
6fa8f719 VS |
3597 | wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); |
3598 | ||
3599 | size += (sizeof (struct mlx4_wqe_raddr_seg) + | |
3600 | sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; | |
225c7b1f RD |
3601 | |
3602 | break; | |
3603 | ||
3604 | case IB_WR_RDMA_READ: | |
3605 | case IB_WR_RDMA_WRITE: | |
3606 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
e622f2f4 CH |
3607 | set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, |
3608 | rdma_wr(wr)->rkey); | |
225c7b1f RD |
3609 | wqe += sizeof (struct mlx4_wqe_raddr_seg); |
3610 | size += sizeof (struct mlx4_wqe_raddr_seg) / 16; | |
225c7b1f | 3611 | break; |
95d04f07 RD |
3612 | |
3613 | case IB_WR_LOCAL_INV: | |
2ac6bf4d JM |
3614 | ctrl->srcrb_flags |= |
3615 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | |
95d04f07 RD |
3616 | set_local_inv_seg(wqe, wr->ex.invalidate_rkey); |
3617 | wqe += sizeof (struct mlx4_wqe_local_inval_seg); | |
3618 | size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; | |
3619 | break; | |
3620 | ||
1b2cd0fc SG |
3621 | case IB_WR_REG_MR: |
3622 | ctrl->srcrb_flags |= | |
3623 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | |
3624 | set_reg_seg(wqe, reg_wr(wr)); | |
3625 | wqe += sizeof(struct mlx4_wqe_fmr_seg); | |
3626 | size += sizeof(struct mlx4_wqe_fmr_seg) / 16; | |
3627 | break; | |
3628 | ||
225c7b1f RD |
3629 | default: |
3630 | /* No extra segments required for sends */ | |
3631 | break; | |
3632 | } | |
3633 | break; | |
3634 | ||
1ffeb2eb | 3635 | case MLX4_IB_QPT_TUN_SMI_OWNER: |
915ec7ed LR |
3636 | err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, |
3637 | &seglen); | |
1ffeb2eb JM |
3638 | if (unlikely(err)) { |
3639 | *bad_wr = wr; | |
3640 | goto out; | |
3641 | } | |
3642 | wqe += seglen; | |
3643 | size += seglen / 16; | |
3644 | break; | |
3645 | case MLX4_IB_QPT_TUN_SMI: | |
3646 | case MLX4_IB_QPT_TUN_GSI: | |
3647 | /* this is a UD qp used in MAD responses to slaves. */ | |
e622f2f4 | 3648 | set_datagram_seg(wqe, ud_wr(wr)); |
1ffeb2eb JM |
3649 | /* set the forced-loopback bit in the data seg av */ |
3650 | *(__be32 *) wqe |= cpu_to_be32(0x80000000); | |
3651 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | |
3652 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | |
3653 | break; | |
3654 | case MLX4_IB_QPT_UD: | |
e622f2f4 | 3655 | set_datagram_seg(wqe, ud_wr(wr)); |
225c7b1f RD |
3656 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
3657 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | |
b832be1e EC |
3658 | |
3659 | if (wr->opcode == IB_WR_LSO) { | |
e622f2f4 CH |
3660 | err = build_lso_seg(wqe, ud_wr(wr), qp, &seglen, |
3661 | &lso_hdr_sz, &blh); | |
b832be1e EC |
3662 | if (unlikely(err)) { |
3663 | *bad_wr = wr; | |
3664 | goto out; | |
3665 | } | |
0fd7e1d8 | 3666 | lso_wqe = (__be32 *) wqe; |
b832be1e EC |
3667 | wqe += seglen; |
3668 | size += seglen / 16; | |
3669 | } | |
225c7b1f RD |
3670 | break; |
3671 | ||
1ffeb2eb | 3672 | case MLX4_IB_QPT_PROXY_SMI_OWNER: |
915ec7ed LR |
3673 | err = build_sriov_qp0_header(qp, ud_wr(wr), ctrl, |
3674 | &seglen); | |
1ffeb2eb JM |
3675 | if (unlikely(err)) { |
3676 | *bad_wr = wr; | |
3677 | goto out; | |
3678 | } | |
3679 | wqe += seglen; | |
3680 | size += seglen / 16; | |
3681 | /* to start tunnel header on a cache-line boundary */ | |
3682 | add_zero_len_inline(wqe); | |
3683 | wqe += 16; | |
3684 | size++; | |
e622f2f4 | 3685 | build_tunnel_header(ud_wr(wr), wqe, &seglen); |
1ffeb2eb JM |
3686 | wqe += seglen; |
3687 | size += seglen / 16; | |
3688 | break; | |
3689 | case MLX4_IB_QPT_PROXY_SMI: | |
1ffeb2eb JM |
3690 | case MLX4_IB_QPT_PROXY_GSI: |
3691 | /* If we are tunneling special qps, this is a UD qp. | |
3692 | * In this case we first add a UD segment targeting | |
3693 | * the tunnel qp, and then add a header with address | |
3694 | * information */ | |
e622f2f4 CH |
3695 | set_tunnel_datagram_seg(to_mdev(ibqp->device), wqe, |
3696 | ud_wr(wr), | |
97982f5a | 3697 | qp->mlx4_ib_qp_type); |
1ffeb2eb JM |
3698 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
3699 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | |
e622f2f4 | 3700 | build_tunnel_header(ud_wr(wr), wqe, &seglen); |
1ffeb2eb JM |
3701 | wqe += seglen; |
3702 | size += seglen / 16; | |
3703 | break; | |
3704 | ||
3705 | case MLX4_IB_QPT_SMI: | |
3706 | case MLX4_IB_QPT_GSI: | |
915ec7ed | 3707 | err = build_mlx_header(qp, ud_wr(wr), ctrl, &seglen); |
f438000f | 3708 | if (unlikely(err)) { |
225c7b1f RD |
3709 | *bad_wr = wr; |
3710 | goto out; | |
3711 | } | |
f438000f RD |
3712 | wqe += seglen; |
3713 | size += seglen / 16; | |
225c7b1f RD |
3714 | break; |
3715 | ||
3716 | default: | |
3717 | break; | |
3718 | } | |
3719 | ||
6e694ea3 JM |
3720 | /* |
3721 | * Write data segments in reverse order, so as to | |
3722 | * overwrite cacheline stamp last within each | |
3723 | * cacheline. This avoids issues with WQE | |
3724 | * prefetching. | |
3725 | */ | |
225c7b1f | 3726 | |
6e694ea3 JM |
3727 | dseg = wqe; |
3728 | dseg += wr->num_sge - 1; | |
3729 | size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16); | |
225c7b1f RD |
3730 | |
3731 | /* Add one more inline data segment for ICRC for MLX sends */ | |
1ffeb2eb JM |
3732 | if (unlikely(qp->mlx4_ib_qp_type == MLX4_IB_QPT_SMI || |
3733 | qp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI || | |
3734 | qp->mlx4_ib_qp_type & | |
3735 | (MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_TUN_SMI_OWNER))) { | |
6e694ea3 | 3736 | set_mlx_icrc_seg(dseg + 1); |
225c7b1f RD |
3737 | size += sizeof (struct mlx4_wqe_data_seg) / 16; |
3738 | } | |
3739 | ||
6e694ea3 JM |
3740 | for (i = wr->num_sge - 1; i >= 0; --i, --dseg) |
3741 | set_data_seg(dseg, wr->sg_list + i); | |
3742 | ||
0fd7e1d8 RD |
3743 | /* |
3744 | * Possibly overwrite stamping in cacheline with LSO | |
3745 | * segment only after making sure all data segments | |
3746 | * are written. | |
3747 | */ | |
3748 | wmb(); | |
3749 | *lso_wqe = lso_hdr_sz; | |
3750 | ||
224e92e0 BB |
3751 | ctrl->qpn_vlan.fence_size = (wr->send_flags & IB_SEND_FENCE ? |
3752 | MLX4_WQE_CTRL_FENCE : 0) | size; | |
225c7b1f RD |
3753 | |
3754 | /* | |
3755 | * Make sure descriptor is fully written before | |
3756 | * setting ownership bit (because HW can start | |
3757 | * executing as soon as we do). | |
3758 | */ | |
3759 | wmb(); | |
3760 | ||
59b0ed12 | 3761 | if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { |
4ba6b8ea | 3762 | *bad_wr = wr; |
225c7b1f RD |
3763 | err = -EINVAL; |
3764 | goto out; | |
3765 | } | |
3766 | ||
3767 | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | | |
417608c2 | 3768 | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; |
0e6e7416 RD |
3769 | |
3770 | /* | |
3771 | * We can improve latency by not stamping the last | |
3772 | * send queue WQE until after ringing the doorbell, so | |
3773 | * only stamp here if there are still more WQEs to post. | |
3774 | */ | |
f95ccffc JM |
3775 | if (wr->next) |
3776 | stamp_send_wqe(qp, ind + qp->sq_spare_wqes); | |
3777 | ind++; | |
225c7b1f RD |
3778 | } |
3779 | ||
3780 | out: | |
3781 | if (likely(nreq)) { | |
3782 | qp->sq.head += nreq; | |
3783 | ||
3784 | /* | |
3785 | * Make sure that descriptors are written before | |
3786 | * doorbell record. | |
3787 | */ | |
3788 | wmb(); | |
3789 | ||
97d82a48 SK |
3790 | writel_relaxed(qp->doorbell_qpn, |
3791 | to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL); | |
225c7b1f | 3792 | |
f95ccffc | 3793 | stamp_send_wqe(qp, ind + qp->sq_spare_wqes - 1); |
ea54b10c | 3794 | |
ea54b10c | 3795 | qp->sq_next_wqe = ind; |
225c7b1f RD |
3796 | } |
3797 | ||
96db0e03 | 3798 | spin_unlock_irqrestore(&qp->sq.lock, flags); |
225c7b1f RD |
3799 | |
3800 | return err; | |
3801 | } | |
3802 | ||
d34ac5cd BVA |
3803 | int mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, |
3804 | const struct ib_send_wr **bad_wr) | |
1975acd9 YH |
3805 | { |
3806 | return _mlx4_ib_post_send(ibqp, wr, bad_wr, false); | |
3807 | } | |
3808 | ||
d34ac5cd BVA |
3809 | static int _mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
3810 | const struct ib_recv_wr **bad_wr, bool drain) | |
225c7b1f RD |
3811 | { |
3812 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | |
3813 | struct mlx4_wqe_data_seg *scat; | |
3814 | unsigned long flags; | |
3815 | int err = 0; | |
3816 | int nreq; | |
3817 | int ind; | |
1ffeb2eb | 3818 | int max_gs; |
225c7b1f | 3819 | int i; |
35f05dab | 3820 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
225c7b1f | 3821 | |
1ffeb2eb | 3822 | max_gs = qp->rq.max_gs; |
225c7b1f RD |
3823 | spin_lock_irqsave(&qp->rq.lock, flags); |
3824 | ||
1975acd9 YH |
3825 | if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR && |
3826 | !drain) { | |
35f05dab YH |
3827 | err = -EIO; |
3828 | *bad_wr = wr; | |
3829 | nreq = 0; | |
3830 | goto out; | |
3831 | } | |
3832 | ||
0e6e7416 | 3833 | ind = qp->rq.head & (qp->rq.wqe_cnt - 1); |
225c7b1f RD |
3834 | |
3835 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
2b946077 | 3836 | if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
225c7b1f RD |
3837 | err = -ENOMEM; |
3838 | *bad_wr = wr; | |
3839 | goto out; | |
3840 | } | |
3841 | ||
3842 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
3843 | err = -EINVAL; | |
3844 | *bad_wr = wr; | |
3845 | goto out; | |
3846 | } | |
3847 | ||
3848 | scat = get_recv_wqe(qp, ind); | |
3849 | ||
1ffeb2eb JM |
3850 | if (qp->mlx4_ib_qp_type & (MLX4_IB_QPT_PROXY_SMI_OWNER | |
3851 | MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) { | |
3852 | ib_dma_sync_single_for_device(ibqp->device, | |
3853 | qp->sqp_proxy_rcv[ind].map, | |
3854 | sizeof (struct mlx4_ib_proxy_sqp_hdr), | |
3855 | DMA_FROM_DEVICE); | |
3856 | scat->byte_count = | |
3857 | cpu_to_be32(sizeof (struct mlx4_ib_proxy_sqp_hdr)); | |
3858 | /* use dma lkey from upper layer entry */ | |
3859 | scat->lkey = cpu_to_be32(wr->sg_list->lkey); | |
3860 | scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); | |
3861 | scat++; | |
3862 | max_gs--; | |
3863 | } | |
3864 | ||
2242fa4f RD |
3865 | for (i = 0; i < wr->num_sge; ++i) |
3866 | __set_data_seg(scat + i, wr->sg_list + i); | |
225c7b1f | 3867 | |
1ffeb2eb | 3868 | if (i < max_gs) { |
225c7b1f RD |
3869 | scat[i].byte_count = 0; |
3870 | scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY); | |
3871 | scat[i].addr = 0; | |
3872 | } | |
3873 | ||
3874 | qp->rq.wrid[ind] = wr->wr_id; | |
3875 | ||
0e6e7416 | 3876 | ind = (ind + 1) & (qp->rq.wqe_cnt - 1); |
225c7b1f RD |
3877 | } |
3878 | ||
3879 | out: | |
3880 | if (likely(nreq)) { | |
3881 | qp->rq.head += nreq; | |
3882 | ||
3883 | /* | |
3884 | * Make sure that descriptors are written before | |
3885 | * doorbell record. | |
3886 | */ | |
3887 | wmb(); | |
3888 | ||
3889 | *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); | |
3890 | } | |
3891 | ||
3892 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
3893 | ||
3894 | return err; | |
3895 | } | |
6a775e2b | 3896 | |
d34ac5cd BVA |
3897 | int mlx4_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, |
3898 | const struct ib_recv_wr **bad_wr) | |
1975acd9 YH |
3899 | { |
3900 | return _mlx4_ib_post_recv(ibqp, wr, bad_wr, false); | |
3901 | } | |
3902 | ||
6a775e2b JM |
3903 | static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state) |
3904 | { | |
3905 | switch (mlx4_state) { | |
3906 | case MLX4_QP_STATE_RST: return IB_QPS_RESET; | |
3907 | case MLX4_QP_STATE_INIT: return IB_QPS_INIT; | |
3908 | case MLX4_QP_STATE_RTR: return IB_QPS_RTR; | |
3909 | case MLX4_QP_STATE_RTS: return IB_QPS_RTS; | |
3910 | case MLX4_QP_STATE_SQ_DRAINING: | |
3911 | case MLX4_QP_STATE_SQD: return IB_QPS_SQD; | |
3912 | case MLX4_QP_STATE_SQER: return IB_QPS_SQE; | |
3913 | case MLX4_QP_STATE_ERR: return IB_QPS_ERR; | |
3914 | default: return -1; | |
3915 | } | |
3916 | } | |
3917 | ||
3918 | static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state) | |
3919 | { | |
3920 | switch (mlx4_mig_state) { | |
3921 | case MLX4_QP_PM_ARMED: return IB_MIG_ARMED; | |
3922 | case MLX4_QP_PM_REARM: return IB_MIG_REARM; | |
3923 | case MLX4_QP_PM_MIGRATED: return IB_MIG_MIGRATED; | |
3924 | default: return -1; | |
3925 | } | |
3926 | } | |
3927 | ||
3928 | static int to_ib_qp_access_flags(int mlx4_flags) | |
3929 | { | |
3930 | int ib_flags = 0; | |
3931 | ||
3932 | if (mlx4_flags & MLX4_QP_BIT_RRE) | |
3933 | ib_flags |= IB_ACCESS_REMOTE_READ; | |
3934 | if (mlx4_flags & MLX4_QP_BIT_RWE) | |
3935 | ib_flags |= IB_ACCESS_REMOTE_WRITE; | |
3936 | if (mlx4_flags & MLX4_QP_BIT_RAE) | |
3937 | ib_flags |= IB_ACCESS_REMOTE_ATOMIC; | |
3938 | ||
3939 | return ib_flags; | |
3940 | } | |
3941 | ||
71d53ab4 | 3942 | static void to_rdma_ah_attr(struct mlx4_ib_dev *ibdev, |
d8966fcd | 3943 | struct rdma_ah_attr *ah_attr, |
71d53ab4 | 3944 | struct mlx4_qp_path *path) |
6a775e2b | 3945 | { |
4c3eb3ca | 3946 | struct mlx4_dev *dev = ibdev->dev; |
d8966fcd | 3947 | u8 port_num = path->sched_queue & 0x40 ? 2 : 1; |
4c3eb3ca | 3948 | |
d8966fcd | 3949 | memset(ah_attr, 0, sizeof(*ah_attr)); |
d8966fcd | 3950 | if (port_num == 0 || port_num > dev->caps.num_ports) |
6a775e2b | 3951 | return; |
f1228867 | 3952 | ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num); |
6a775e2b | 3953 | |
44c58487 | 3954 | if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) |
d8966fcd DC |
3955 | rdma_ah_set_sl(ah_attr, ((path->sched_queue >> 3) & 0x7) | |
3956 | ((path->sched_queue & 4) << 1)); | |
4c3eb3ca | 3957 | else |
d8966fcd | 3958 | rdma_ah_set_sl(ah_attr, (path->sched_queue >> 2) & 0xf); |
44c58487 | 3959 | rdma_ah_set_port_num(ah_attr, port_num); |
4c3eb3ca | 3960 | |
d8966fcd DC |
3961 | rdma_ah_set_dlid(ah_attr, be16_to_cpu(path->rlid)); |
3962 | rdma_ah_set_path_bits(ah_attr, path->grh_mylmc & 0x7f); | |
3963 | rdma_ah_set_static_rate(ah_attr, | |
3964 | path->static_rate ? path->static_rate - 5 : 0); | |
3965 | if (path->grh_mylmc & (1 << 7)) { | |
3966 | rdma_ah_set_grh(ah_attr, NULL, | |
3967 | be32_to_cpu(path->tclass_flowlabel) & 0xfffff, | |
3968 | path->mgid_index, | |
3969 | path->hop_limit, | |
3970 | (be32_to_cpu(path->tclass_flowlabel) | |
3971 | >> 20) & 0xff); | |
3972 | rdma_ah_set_dgid_raw(ah_attr, path->rgid); | |
6a775e2b JM |
3973 | } |
3974 | } | |
3975 | ||
3976 | int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, | |
3977 | struct ib_qp_init_attr *qp_init_attr) | |
3978 | { | |
3979 | struct mlx4_ib_dev *dev = to_mdev(ibqp->device); | |
3980 | struct mlx4_ib_qp *qp = to_mqp(ibqp); | |
3981 | struct mlx4_qp_context context; | |
3982 | int mlx4_state; | |
0df67030 DB |
3983 | int err = 0; |
3984 | ||
3078f5f1 GL |
3985 | if (ibqp->rwq_ind_tbl) |
3986 | return -EOPNOTSUPP; | |
3987 | ||
0df67030 | 3988 | mutex_lock(&qp->mutex); |
6a775e2b JM |
3989 | |
3990 | if (qp->state == IB_QPS_RESET) { | |
3991 | qp_attr->qp_state = IB_QPS_RESET; | |
3992 | goto done; | |
3993 | } | |
3994 | ||
3995 | err = mlx4_qp_query(dev->dev, &qp->mqp, &context); | |
0df67030 DB |
3996 | if (err) { |
3997 | err = -EINVAL; | |
3998 | goto out; | |
3999 | } | |
6a775e2b JM |
4000 | |
4001 | mlx4_state = be32_to_cpu(context.flags) >> 28; | |
4002 | ||
0df67030 DB |
4003 | qp->state = to_ib_qp_state(mlx4_state); |
4004 | qp_attr->qp_state = qp->state; | |
6a775e2b JM |
4005 | qp_attr->path_mtu = context.mtu_msgmax >> 5; |
4006 | qp_attr->path_mig_state = | |
4007 | to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3); | |
4008 | qp_attr->qkey = be32_to_cpu(context.qkey); | |
4009 | qp_attr->rq_psn = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff; | |
4010 | qp_attr->sq_psn = be32_to_cpu(context.next_send_psn) & 0xffffff; | |
4011 | qp_attr->dest_qp_num = be32_to_cpu(context.remote_qpn) & 0xffffff; | |
4012 | qp_attr->qp_access_flags = | |
4013 | to_ib_qp_access_flags(be32_to_cpu(context.params2)); | |
4014 | ||
8138a4c2 AH |
4015 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC || |
4016 | qp->ibqp.qp_type == IB_QPT_XRC_INI || | |
4017 | qp->ibqp.qp_type == IB_QPT_XRC_TGT) { | |
71d53ab4 DC |
4018 | to_rdma_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); |
4019 | to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); | |
6a775e2b | 4020 | qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; |
d8966fcd DC |
4021 | qp_attr->alt_port_num = |
4022 | rdma_ah_get_port_num(&qp_attr->alt_ah_attr); | |
6a775e2b JM |
4023 | } |
4024 | ||
4025 | qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f; | |
1c27cb71 JM |
4026 | if (qp_attr->qp_state == IB_QPS_INIT) |
4027 | qp_attr->port_num = qp->port; | |
4028 | else | |
4029 | qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1; | |
6a775e2b JM |
4030 | |
4031 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | |
4032 | qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING; | |
4033 | ||
4034 | qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7); | |
4035 | ||
4036 | qp_attr->max_dest_rd_atomic = | |
4037 | 1 << ((be32_to_cpu(context.params2) >> 21) & 0x7); | |
4038 | qp_attr->min_rnr_timer = | |
4039 | (be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f; | |
4040 | qp_attr->timeout = context.pri_path.ackto >> 3; | |
4041 | qp_attr->retry_cnt = (be32_to_cpu(context.params1) >> 16) & 0x7; | |
4042 | qp_attr->rnr_retry = (be32_to_cpu(context.params1) >> 13) & 0x7; | |
4043 | qp_attr->alt_timeout = context.alt_path.ackto >> 3; | |
4044 | ||
4045 | done: | |
4046 | qp_attr->cur_qp_state = qp_attr->qp_state; | |
7f5eb9bb RD |
4047 | qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; |
4048 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | |
4049 | ||
6a775e2b | 4050 | if (!ibqp->uobject) { |
7f5eb9bb RD |
4051 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; |
4052 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | |
4053 | } else { | |
4054 | qp_attr->cap.max_send_wr = 0; | |
4055 | qp_attr->cap.max_send_sge = 0; | |
6a775e2b JM |
4056 | } |
4057 | ||
7f5eb9bb RD |
4058 | /* |
4059 | * We don't support inline sends for kernel QPs (yet), and we | |
4060 | * don't know what userspace's value should be. | |
4061 | */ | |
4062 | qp_attr->cap.max_inline_data = 0; | |
4063 | ||
4064 | qp_init_attr->cap = qp_attr->cap; | |
4065 | ||
521e575b RL |
4066 | qp_init_attr->create_flags = 0; |
4067 | if (qp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) | |
4068 | qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; | |
4069 | ||
4070 | if (qp->flags & MLX4_IB_QP_LSO) | |
4071 | qp_init_attr->create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; | |
4072 | ||
c1c98501 MB |
4073 | if (qp->flags & MLX4_IB_QP_NETIF) |
4074 | qp_init_attr->create_flags |= IB_QP_CREATE_NETIF_QP; | |
4075 | ||
46db567d DB |
4076 | qp_init_attr->sq_sig_type = |
4077 | qp->sq_signal_bits == cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) ? | |
4078 | IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; | |
4079 | ||
0df67030 DB |
4080 | out: |
4081 | mutex_unlock(&qp->mutex); | |
4082 | return err; | |
6a775e2b JM |
4083 | } |
4084 | ||
400b1ebc GL |
4085 | struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, |
4086 | struct ib_wq_init_attr *init_attr, | |
4087 | struct ib_udata *udata) | |
4088 | { | |
089b645d LR |
4089 | struct mlx4_dev *dev = to_mdev(pd->device)->dev; |
4090 | struct ib_qp_init_attr ib_qp_init_attr = {}; | |
400b1ebc GL |
4091 | struct mlx4_ib_qp *qp; |
4092 | struct mlx4_ib_create_wq ucmd; | |
4093 | int err, required_cmd_sz; | |
4094 | ||
e00b64f7 | 4095 | if (!udata) |
400b1ebc GL |
4096 | return ERR_PTR(-EINVAL); |
4097 | ||
078b3573 GL |
4098 | required_cmd_sz = offsetof(typeof(ucmd), comp_mask) + |
4099 | sizeof(ucmd.comp_mask); | |
400b1ebc GL |
4100 | if (udata->inlen < required_cmd_sz) { |
4101 | pr_debug("invalid inlen\n"); | |
4102 | return ERR_PTR(-EINVAL); | |
4103 | } | |
4104 | ||
4105 | if (udata->inlen > sizeof(ucmd) && | |
4106 | !ib_is_udata_cleared(udata, sizeof(ucmd), | |
4107 | udata->inlen - sizeof(ucmd))) { | |
4108 | pr_debug("inlen is not supported\n"); | |
4109 | return ERR_PTR(-EOPNOTSUPP); | |
4110 | } | |
4111 | ||
4112 | if (udata->outlen) | |
4113 | return ERR_PTR(-EOPNOTSUPP); | |
4114 | ||
400b1ebc GL |
4115 | if (init_attr->wq_type != IB_WQT_RQ) { |
4116 | pr_debug("unsupported wq type %d\n", init_attr->wq_type); | |
4117 | return ERR_PTR(-EOPNOTSUPP); | |
4118 | } | |
4119 | ||
089b645d LR |
4120 | if (init_attr->create_flags & ~IB_WQ_FLAGS_SCATTER_FCS || |
4121 | !(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { | |
400b1ebc GL |
4122 | pr_debug("unsupported create_flags %u\n", |
4123 | init_attr->create_flags); | |
4124 | return ERR_PTR(-EOPNOTSUPP); | |
4125 | } | |
4126 | ||
4127 | qp = kzalloc(sizeof(*qp), GFP_KERNEL); | |
4128 | if (!qp) | |
4129 | return ERR_PTR(-ENOMEM); | |
4130 | ||
8fd3cd2a | 4131 | mutex_init(&qp->mutex); |
400b1ebc GL |
4132 | qp->pri.vid = 0xFFFF; |
4133 | qp->alt.vid = 0xFFFF; | |
4134 | ||
400b1ebc GL |
4135 | ib_qp_init_attr.qp_context = init_attr->wq_context; |
4136 | ib_qp_init_attr.qp_type = IB_QPT_RAW_PACKET; | |
4137 | ib_qp_init_attr.cap.max_recv_wr = init_attr->max_wr; | |
4138 | ib_qp_init_attr.cap.max_recv_sge = init_attr->max_sge; | |
4139 | ib_qp_init_attr.recv_cq = init_attr->cq; | |
4140 | ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */ | |
4141 | ||
6d06c9aa GL |
4142 | if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) |
4143 | ib_qp_init_attr.create_flags |= IB_QP_CREATE_SCATTER_FCS; | |
4144 | ||
089b645d | 4145 | err = create_rq(pd, &ib_qp_init_attr, udata, qp); |
400b1ebc GL |
4146 | if (err) { |
4147 | kfree(qp); | |
4148 | return ERR_PTR(err); | |
4149 | } | |
4150 | ||
4151 | qp->ibwq.event_handler = init_attr->event_handler; | |
4152 | qp->ibwq.wq_num = qp->mqp.qpn; | |
4153 | qp->ibwq.state = IB_WQS_RESET; | |
4154 | ||
4155 | return &qp->ibwq; | |
4156 | } | |
4157 | ||
4158 | static int ib_wq2qp_state(enum ib_wq_state state) | |
4159 | { | |
4160 | switch (state) { | |
4161 | case IB_WQS_RESET: | |
4162 | return IB_QPS_RESET; | |
4163 | case IB_WQS_RDY: | |
4164 | return IB_QPS_RTR; | |
4165 | default: | |
4166 | return IB_QPS_ERR; | |
4167 | } | |
4168 | } | |
4169 | ||
89944450 SR |
4170 | static int _mlx4_ib_modify_wq(struct ib_wq *ibwq, enum ib_wq_state new_state, |
4171 | struct ib_udata *udata) | |
400b1ebc GL |
4172 | { |
4173 | struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); | |
4174 | enum ib_qp_state qp_cur_state; | |
4175 | enum ib_qp_state qp_new_state; | |
4176 | int attr_mask; | |
4177 | int err; | |
4178 | ||
4179 | /* ib_qp.state represents the WQ HW state while ib_wq.state represents | |
4180 | * the WQ logic state. | |
4181 | */ | |
4182 | qp_cur_state = qp->state; | |
4183 | qp_new_state = ib_wq2qp_state(new_state); | |
4184 | ||
4185 | if (ib_wq2qp_state(new_state) == qp_cur_state) | |
4186 | return 0; | |
4187 | ||
4188 | if (new_state == IB_WQS_RDY) { | |
4189 | struct ib_qp_attr attr = {}; | |
4190 | ||
4191 | attr.port_num = qp->port; | |
4192 | attr_mask = IB_QP_PORT; | |
4193 | ||
4194 | err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, &attr, | |
89944450 SR |
4195 | attr_mask, IB_QPS_RESET, IB_QPS_INIT, |
4196 | udata); | |
400b1ebc GL |
4197 | if (err) { |
4198 | pr_debug("WQN=0x%06x failed to apply RST->INIT on the HW QP\n", | |
4199 | ibwq->wq_num); | |
4200 | return err; | |
4201 | } | |
4202 | ||
4203 | qp_cur_state = IB_QPS_INIT; | |
4204 | } | |
4205 | ||
4206 | attr_mask = 0; | |
4207 | err = __mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, attr_mask, | |
89944450 | 4208 | qp_cur_state, qp_new_state, udata); |
400b1ebc GL |
4209 | |
4210 | if (err && (qp_cur_state == IB_QPS_INIT)) { | |
4211 | qp_new_state = IB_QPS_RESET; | |
4212 | if (__mlx4_ib_modify_qp(ibwq, MLX4_IB_RWQ_SRC, NULL, | |
89944450 SR |
4213 | attr_mask, IB_QPS_INIT, IB_QPS_RESET, |
4214 | udata)) { | |
400b1ebc GL |
4215 | pr_warn("WQN=0x%06x failed with reverting HW's resources failure\n", |
4216 | ibwq->wq_num); | |
4217 | qp_new_state = IB_QPS_INIT; | |
4218 | } | |
4219 | } | |
4220 | ||
4221 | qp->state = qp_new_state; | |
4222 | ||
4223 | return err; | |
4224 | } | |
4225 | ||
4226 | int mlx4_ib_modify_wq(struct ib_wq *ibwq, struct ib_wq_attr *wq_attr, | |
4227 | u32 wq_attr_mask, struct ib_udata *udata) | |
4228 | { | |
4229 | struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); | |
4230 | struct mlx4_ib_modify_wq ucmd = {}; | |
4231 | size_t required_cmd_sz; | |
4232 | enum ib_wq_state cur_state, new_state; | |
4233 | int err = 0; | |
4234 | ||
4235 | required_cmd_sz = offsetof(typeof(ucmd), reserved) + | |
4236 | sizeof(ucmd.reserved); | |
4237 | if (udata->inlen < required_cmd_sz) | |
4238 | return -EINVAL; | |
4239 | ||
4240 | if (udata->inlen > sizeof(ucmd) && | |
4241 | !ib_is_udata_cleared(udata, sizeof(ucmd), | |
4242 | udata->inlen - sizeof(ucmd))) | |
4243 | return -EOPNOTSUPP; | |
4244 | ||
4245 | if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) | |
4246 | return -EFAULT; | |
4247 | ||
4248 | if (ucmd.comp_mask || ucmd.reserved) | |
4249 | return -EOPNOTSUPP; | |
4250 | ||
4251 | if (wq_attr_mask & IB_WQ_FLAGS) | |
4252 | return -EOPNOTSUPP; | |
4253 | ||
f9744288 LR |
4254 | cur_state = wq_attr->curr_wq_state; |
4255 | new_state = wq_attr->wq_state; | |
400b1ebc GL |
4256 | |
4257 | if ((new_state == IB_WQS_RDY) && (cur_state == IB_WQS_ERR)) | |
4258 | return -EINVAL; | |
4259 | ||
4260 | if ((new_state == IB_WQS_ERR) && (cur_state == IB_WQS_RESET)) | |
4261 | return -EINVAL; | |
4262 | ||
3078f5f1 GL |
4263 | /* Need to protect against the parent RSS which also may modify WQ |
4264 | * state. | |
4265 | */ | |
4266 | mutex_lock(&qp->mutex); | |
4267 | ||
400b1ebc GL |
4268 | /* Can update HW state only if a RSS QP has already associated to this |
4269 | * WQ, so we can apply its port on the WQ. | |
4270 | */ | |
4271 | if (qp->rss_usecnt) | |
89944450 | 4272 | err = _mlx4_ib_modify_wq(ibwq, new_state, udata); |
400b1ebc GL |
4273 | |
4274 | if (!err) | |
4275 | ibwq->state = new_state; | |
4276 | ||
3078f5f1 GL |
4277 | mutex_unlock(&qp->mutex); |
4278 | ||
400b1ebc GL |
4279 | return err; |
4280 | } | |
4281 | ||
add53535 | 4282 | int mlx4_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata) |
400b1ebc GL |
4283 | { |
4284 | struct mlx4_ib_dev *dev = to_mdev(ibwq->device); | |
4285 | struct mlx4_ib_qp *qp = to_mqp((struct ib_qp *)ibwq); | |
4286 | ||
4287 | if (qp->counter_index) | |
4288 | mlx4_ib_free_qp_counter(dev, qp); | |
4289 | ||
bdeacabd | 4290 | destroy_qp_common(dev, qp, MLX4_IB_RWQ_SRC, udata); |
400b1ebc GL |
4291 | |
4292 | kfree(qp); | |
add53535 | 4293 | return 0; |
400b1ebc | 4294 | } |
b8d46ca0 | 4295 | |
c0a6b5ec LR |
4296 | int mlx4_ib_create_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table, |
4297 | struct ib_rwq_ind_table_init_attr *init_attr, | |
4298 | struct ib_udata *udata) | |
b8d46ca0 | 4299 | { |
b8d46ca0 GL |
4300 | struct mlx4_ib_create_rwq_ind_tbl_resp resp = {}; |
4301 | unsigned int ind_tbl_size = 1 << init_attr->log_ind_tbl_size; | |
c0a6b5ec | 4302 | struct ib_device *device = rwq_ind_table->device; |
b8d46ca0 GL |
4303 | unsigned int base_wqn; |
4304 | size_t min_resp_len; | |
c0a6b5ec | 4305 | int i, err = 0; |
b8d46ca0 GL |
4306 | |
4307 | if (udata->inlen > 0 && | |
4308 | !ib_is_udata_cleared(udata, 0, | |
4309 | udata->inlen)) | |
c0a6b5ec | 4310 | return -EOPNOTSUPP; |
b8d46ca0 GL |
4311 | |
4312 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); | |
4313 | if (udata->outlen && udata->outlen < min_resp_len) | |
c0a6b5ec | 4314 | return -EINVAL; |
b8d46ca0 GL |
4315 | |
4316 | if (ind_tbl_size > | |
4317 | device->attrs.rss_caps.max_rwq_indirection_table_size) { | |
4318 | pr_debug("log_ind_tbl_size = %d is bigger than supported = %d\n", | |
4319 | ind_tbl_size, | |
4320 | device->attrs.rss_caps.max_rwq_indirection_table_size); | |
c0a6b5ec | 4321 | return -EINVAL; |
b8d46ca0 GL |
4322 | } |
4323 | ||
4324 | base_wqn = init_attr->ind_tbl[0]->wq_num; | |
4325 | ||
4326 | if (base_wqn % ind_tbl_size) { | |
4327 | pr_debug("WQN=0x%x isn't aligned with indirection table size\n", | |
4328 | base_wqn); | |
c0a6b5ec | 4329 | return -EINVAL; |
b8d46ca0 GL |
4330 | } |
4331 | ||
4332 | for (i = 1; i < ind_tbl_size; i++) { | |
4333 | if (++base_wqn != init_attr->ind_tbl[i]->wq_num) { | |
4334 | pr_debug("indirection table's WQNs aren't consecutive\n"); | |
c0a6b5ec | 4335 | return -EINVAL; |
b8d46ca0 GL |
4336 | } |
4337 | } | |
4338 | ||
b8d46ca0 GL |
4339 | if (udata->outlen) { |
4340 | resp.response_length = offsetof(typeof(resp), response_length) + | |
4341 | sizeof(resp.response_length); | |
4342 | err = ib_copy_to_udata(udata, &resp, resp.response_length); | |
b8d46ca0 GL |
4343 | } |
4344 | ||
c0a6b5ec | 4345 | return err; |
b8d46ca0 | 4346 | } |
1975acd9 YH |
4347 | |
4348 | struct mlx4_ib_drain_cqe { | |
4349 | struct ib_cqe cqe; | |
4350 | struct completion done; | |
4351 | }; | |
4352 | ||
4353 | static void mlx4_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc) | |
4354 | { | |
4355 | struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe, | |
4356 | struct mlx4_ib_drain_cqe, | |
4357 | cqe); | |
4358 | ||
4359 | complete(&cqe->done); | |
4360 | } | |
4361 | ||
4362 | /* This function returns only once the drained WR was completed */ | |
4363 | static void handle_drain_completion(struct ib_cq *cq, | |
4364 | struct mlx4_ib_drain_cqe *sdrain, | |
4365 | struct mlx4_ib_dev *dev) | |
4366 | { | |
4367 | struct mlx4_dev *mdev = dev->dev; | |
4368 | ||
4369 | if (cq->poll_ctx == IB_POLL_DIRECT) { | |
4370 | while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0) | |
4371 | ib_process_cq_direct(cq, -1); | |
4372 | return; | |
4373 | } | |
4374 | ||
4375 | if (mdev->persist->state == MLX4_DEVICE_STATE_INTERNAL_ERROR) { | |
4376 | struct mlx4_ib_cq *mcq = to_mcq(cq); | |
4377 | bool triggered = false; | |
4378 | unsigned long flags; | |
4379 | ||
4380 | spin_lock_irqsave(&dev->reset_flow_resource_lock, flags); | |
4381 | /* Make sure that the CQ handler won't run if wasn't run yet */ | |
4382 | if (!mcq->mcq.reset_notify_added) | |
4383 | mcq->mcq.reset_notify_added = 1; | |
4384 | else | |
4385 | triggered = true; | |
4386 | spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags); | |
4387 | ||
4388 | if (triggered) { | |
4389 | /* Wait for any scheduled/running task to be ended */ | |
4390 | switch (cq->poll_ctx) { | |
4391 | case IB_POLL_SOFTIRQ: | |
4392 | irq_poll_disable(&cq->iop); | |
4393 | irq_poll_enable(&cq->iop); | |
4394 | break; | |
4395 | case IB_POLL_WORKQUEUE: | |
4396 | cancel_work_sync(&cq->work); | |
4397 | break; | |
4398 | default: | |
4399 | WARN_ON_ONCE(1); | |
4400 | } | |
4401 | } | |
4402 | ||
4403 | /* Run the CQ handler - this makes sure that the drain WR will | |
4404 | * be processed if wasn't processed yet. | |
4405 | */ | |
4406 | mcq->mcq.comp(&mcq->mcq); | |
4407 | } | |
4408 | ||
4409 | wait_for_completion(&sdrain->done); | |
4410 | } | |
4411 | ||
4412 | void mlx4_ib_drain_sq(struct ib_qp *qp) | |
4413 | { | |
4414 | struct ib_cq *cq = qp->send_cq; | |
4415 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; | |
4416 | struct mlx4_ib_drain_cqe sdrain; | |
d34ac5cd | 4417 | const struct ib_send_wr *bad_swr; |
1975acd9 YH |
4418 | struct ib_rdma_wr swr = { |
4419 | .wr = { | |
4420 | .next = NULL, | |
4421 | { .wr_cqe = &sdrain.cqe, }, | |
4422 | .opcode = IB_WR_RDMA_WRITE, | |
4423 | }, | |
4424 | }; | |
4425 | int ret; | |
4426 | struct mlx4_ib_dev *dev = to_mdev(qp->device); | |
4427 | struct mlx4_dev *mdev = dev->dev; | |
4428 | ||
4429 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); | |
4430 | if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) { | |
4431 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); | |
4432 | return; | |
4433 | } | |
4434 | ||
4435 | sdrain.cqe.done = mlx4_ib_drain_qp_done; | |
4436 | init_completion(&sdrain.done); | |
4437 | ||
4438 | ret = _mlx4_ib_post_send(qp, &swr.wr, &bad_swr, true); | |
4439 | if (ret) { | |
4440 | WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); | |
4441 | return; | |
4442 | } | |
4443 | ||
4444 | handle_drain_completion(cq, &sdrain, dev); | |
4445 | } | |
4446 | ||
4447 | void mlx4_ib_drain_rq(struct ib_qp *qp) | |
4448 | { | |
4449 | struct ib_cq *cq = qp->recv_cq; | |
4450 | struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; | |
4451 | struct mlx4_ib_drain_cqe rdrain; | |
d34ac5cd BVA |
4452 | struct ib_recv_wr rwr = {}; |
4453 | const struct ib_recv_wr *bad_rwr; | |
1975acd9 YH |
4454 | int ret; |
4455 | struct mlx4_ib_dev *dev = to_mdev(qp->device); | |
4456 | struct mlx4_dev *mdev = dev->dev; | |
4457 | ||
4458 | ret = ib_modify_qp(qp, &attr, IB_QP_STATE); | |
4459 | if (ret && mdev->persist->state != MLX4_DEVICE_STATE_INTERNAL_ERROR) { | |
4460 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); | |
4461 | return; | |
4462 | } | |
4463 | ||
4464 | rwr.wr_cqe = &rdrain.cqe; | |
4465 | rdrain.cqe.done = mlx4_ib_drain_qp_done; | |
4466 | init_completion(&rdrain.done); | |
4467 | ||
4468 | ret = _mlx4_ib_post_recv(qp, &rwr, &bad_rwr, true); | |
4469 | if (ret) { | |
4470 | WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret); | |
4471 | return; | |
4472 | } | |
4473 | ||
4474 | handle_drain_completion(cq, &rdrain, dev); | |
4475 | } |