Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
80c8ec2c | 3 | * Copyright (c) 2005 Cisco Systems. All rights reserved. |
2a1d9b7f RD |
4 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. |
5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. | |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | * | |
35 | * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ | |
36 | */ | |
37 | ||
38 | #include <linux/init.h> | |
39 | ||
a4d61e84 RD |
40 | #include <rdma/ib_verbs.h> |
41 | #include <rdma/ib_cache.h> | |
42 | #include <rdma/ib_pack.h> | |
1da177e4 LT |
43 | |
44 | #include "mthca_dev.h" | |
45 | #include "mthca_cmd.h" | |
46 | #include "mthca_memfree.h" | |
c04bc3d1 | 47 | #include "mthca_wqe.h" |
1da177e4 LT |
48 | |
49 | enum { | |
50 | MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, | |
51 | MTHCA_ACK_REQ_FREQ = 10, | |
52 | MTHCA_FLIGHT_LIMIT = 9, | |
80c8ec2c RD |
53 | MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ |
54 | MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ | |
55 | MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ | |
1da177e4 LT |
56 | }; |
57 | ||
58 | enum { | |
59 | MTHCA_QP_STATE_RST = 0, | |
60 | MTHCA_QP_STATE_INIT = 1, | |
61 | MTHCA_QP_STATE_RTR = 2, | |
62 | MTHCA_QP_STATE_RTS = 3, | |
63 | MTHCA_QP_STATE_SQE = 4, | |
64 | MTHCA_QP_STATE_SQD = 5, | |
65 | MTHCA_QP_STATE_ERR = 6, | |
66 | MTHCA_QP_STATE_DRAINING = 7 | |
67 | }; | |
68 | ||
69 | enum { | |
70 | MTHCA_QP_ST_RC = 0x0, | |
71 | MTHCA_QP_ST_UC = 0x1, | |
72 | MTHCA_QP_ST_RD = 0x2, | |
73 | MTHCA_QP_ST_UD = 0x3, | |
74 | MTHCA_QP_ST_MLX = 0x7 | |
75 | }; | |
76 | ||
77 | enum { | |
78 | MTHCA_QP_PM_MIGRATED = 0x3, | |
79 | MTHCA_QP_PM_ARMED = 0x0, | |
80 | MTHCA_QP_PM_REARM = 0x1 | |
81 | }; | |
82 | ||
83 | enum { | |
84 | /* qp_context flags */ | |
85 | MTHCA_QP_BIT_DE = 1 << 8, | |
86 | /* params1 */ | |
87 | MTHCA_QP_BIT_SRE = 1 << 15, | |
88 | MTHCA_QP_BIT_SWE = 1 << 14, | |
89 | MTHCA_QP_BIT_SAE = 1 << 13, | |
90 | MTHCA_QP_BIT_SIC = 1 << 4, | |
91 | MTHCA_QP_BIT_SSC = 1 << 3, | |
92 | /* params2 */ | |
93 | MTHCA_QP_BIT_RRE = 1 << 15, | |
94 | MTHCA_QP_BIT_RWE = 1 << 14, | |
95 | MTHCA_QP_BIT_RAE = 1 << 13, | |
96 | MTHCA_QP_BIT_RIC = 1 << 4, | |
97 | MTHCA_QP_BIT_RSC = 1 << 3 | |
98 | }; | |
99 | ||
100 | struct mthca_qp_path { | |
97f52eb4 SH |
101 | __be32 port_pkey; |
102 | u8 rnr_retry; | |
103 | u8 g_mylmc; | |
104 | __be16 rlid; | |
105 | u8 ackto; | |
106 | u8 mgid_index; | |
107 | u8 static_rate; | |
108 | u8 hop_limit; | |
109 | __be32 sl_tclass_flowlabel; | |
110 | u8 rgid[16]; | |
1da177e4 LT |
111 | } __attribute__((packed)); |
112 | ||
113 | struct mthca_qp_context { | |
97f52eb4 SH |
114 | __be32 flags; |
115 | __be32 tavor_sched_queue; /* Reserved on Arbel */ | |
116 | u8 mtu_msgmax; | |
117 | u8 rq_size_stride; /* Reserved on Tavor */ | |
118 | u8 sq_size_stride; /* Reserved on Tavor */ | |
119 | u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ | |
120 | __be32 usr_page; | |
121 | __be32 local_qpn; | |
122 | __be32 remote_qpn; | |
123 | u32 reserved1[2]; | |
1da177e4 LT |
124 | struct mthca_qp_path pri_path; |
125 | struct mthca_qp_path alt_path; | |
97f52eb4 SH |
126 | __be32 rdd; |
127 | __be32 pd; | |
128 | __be32 wqe_base; | |
129 | __be32 wqe_lkey; | |
130 | __be32 params1; | |
131 | __be32 reserved2; | |
132 | __be32 next_send_psn; | |
133 | __be32 cqn_snd; | |
134 | __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ | |
135 | __be32 snd_db_index; /* (debugging only entries) */ | |
136 | __be32 last_acked_psn; | |
137 | __be32 ssn; | |
138 | __be32 params2; | |
139 | __be32 rnr_nextrecvpsn; | |
140 | __be32 ra_buff_indx; | |
141 | __be32 cqn_rcv; | |
142 | __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ | |
143 | __be32 rcv_db_index; /* (debugging only entries) */ | |
144 | __be32 qkey; | |
145 | __be32 srqn; | |
146 | __be32 rmsn; | |
147 | __be16 rq_wqe_counter; /* reserved on Tavor */ | |
148 | __be16 sq_wqe_counter; /* reserved on Tavor */ | |
149 | u32 reserved3[18]; | |
1da177e4 LT |
150 | } __attribute__((packed)); |
151 | ||
152 | struct mthca_qp_param { | |
97f52eb4 SH |
153 | __be32 opt_param_mask; |
154 | u32 reserved1; | |
1da177e4 | 155 | struct mthca_qp_context context; |
97f52eb4 | 156 | u32 reserved2[62]; |
1da177e4 LT |
157 | } __attribute__((packed)); |
158 | ||
159 | enum { | |
160 | MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, | |
161 | MTHCA_QP_OPTPAR_RRE = 1 << 1, | |
162 | MTHCA_QP_OPTPAR_RAE = 1 << 2, | |
163 | MTHCA_QP_OPTPAR_RWE = 1 << 3, | |
164 | MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, | |
165 | MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, | |
166 | MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, | |
167 | MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, | |
168 | MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, | |
169 | MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, | |
170 | MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, | |
171 | MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, | |
172 | MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, | |
173 | MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, | |
174 | MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, | |
175 | MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, | |
176 | MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 | |
177 | }; | |
178 | ||
1da177e4 LT |
179 | static const u8 mthca_opcode[] = { |
180 | [IB_WR_SEND] = MTHCA_OPCODE_SEND, | |
181 | [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, | |
182 | [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, | |
183 | [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, | |
184 | [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, | |
185 | [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, | |
186 | [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, | |
187 | }; | |
188 | ||
189 | static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) | |
190 | { | |
191 | return qp->qpn >= dev->qp_table.sqp_start && | |
192 | qp->qpn <= dev->qp_table.sqp_start + 3; | |
193 | } | |
194 | ||
195 | static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) | |
196 | { | |
197 | return qp->qpn >= dev->qp_table.sqp_start && | |
198 | qp->qpn <= dev->qp_table.sqp_start + 1; | |
199 | } | |
200 | ||
201 | static void *get_recv_wqe(struct mthca_qp *qp, int n) | |
202 | { | |
203 | if (qp->is_direct) | |
204 | return qp->queue.direct.buf + (n << qp->rq.wqe_shift); | |
205 | else | |
206 | return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + | |
207 | ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); | |
208 | } | |
209 | ||
210 | static void *get_send_wqe(struct mthca_qp *qp, int n) | |
211 | { | |
212 | if (qp->is_direct) | |
213 | return qp->queue.direct.buf + qp->send_wqe_offset + | |
214 | (n << qp->sq.wqe_shift); | |
215 | else | |
216 | return qp->queue.page_list[(qp->send_wqe_offset + | |
217 | (n << qp->sq.wqe_shift)) >> | |
218 | PAGE_SHIFT].buf + | |
219 | ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & | |
220 | (PAGE_SIZE - 1)); | |
221 | } | |
222 | ||
223 | void mthca_qp_event(struct mthca_dev *dev, u32 qpn, | |
224 | enum ib_event_type event_type) | |
225 | { | |
226 | struct mthca_qp *qp; | |
227 | struct ib_event event; | |
228 | ||
229 | spin_lock(&dev->qp_table.lock); | |
230 | qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); | |
231 | if (qp) | |
232 | atomic_inc(&qp->refcount); | |
233 | spin_unlock(&dev->qp_table.lock); | |
234 | ||
235 | if (!qp) { | |
236 | mthca_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
237 | return; | |
238 | } | |
239 | ||
240 | event.device = &dev->ib_dev; | |
241 | event.event = event_type; | |
242 | event.element.qp = &qp->ibqp; | |
243 | if (qp->ibqp.event_handler) | |
244 | qp->ibqp.event_handler(&event, qp->ibqp.qp_context); | |
245 | ||
246 | if (atomic_dec_and_test(&qp->refcount)) | |
247 | wake_up(&qp->wait); | |
248 | } | |
249 | ||
250 | static int to_mthca_state(enum ib_qp_state ib_state) | |
251 | { | |
252 | switch (ib_state) { | |
253 | case IB_QPS_RESET: return MTHCA_QP_STATE_RST; | |
254 | case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; | |
255 | case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; | |
256 | case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; | |
257 | case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; | |
258 | case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; | |
259 | case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; | |
260 | default: return -1; | |
261 | } | |
262 | } | |
263 | ||
264 | enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; | |
265 | ||
266 | static int to_mthca_st(int transport) | |
267 | { | |
268 | switch (transport) { | |
269 | case RC: return MTHCA_QP_ST_RC; | |
270 | case UC: return MTHCA_QP_ST_UC; | |
271 | case UD: return MTHCA_QP_ST_UD; | |
272 | case RD: return MTHCA_QP_ST_RD; | |
273 | case MLX: return MTHCA_QP_ST_MLX; | |
274 | default: return -1; | |
275 | } | |
276 | } | |
277 | ||
278 | static const struct { | |
279 | int trans; | |
280 | u32 req_param[NUM_TRANS]; | |
281 | u32 opt_param[NUM_TRANS]; | |
282 | } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | |
283 | [IB_QPS_RESET] = { | |
284 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
285 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
286 | [IB_QPS_INIT] = { | |
287 | .trans = MTHCA_TRANS_RST2INIT, | |
288 | .req_param = { | |
289 | [UD] = (IB_QP_PKEY_INDEX | | |
290 | IB_QP_PORT | | |
291 | IB_QP_QKEY), | |
9e6970b5 RD |
292 | [UC] = (IB_QP_PKEY_INDEX | |
293 | IB_QP_PORT | | |
294 | IB_QP_ACCESS_FLAGS), | |
1da177e4 LT |
295 | [RC] = (IB_QP_PKEY_INDEX | |
296 | IB_QP_PORT | | |
297 | IB_QP_ACCESS_FLAGS), | |
298 | [MLX] = (IB_QP_PKEY_INDEX | | |
299 | IB_QP_QKEY), | |
300 | }, | |
301 | /* bug-for-bug compatibility with VAPI: */ | |
302 | .opt_param = { | |
303 | [MLX] = IB_QP_PORT | |
304 | } | |
305 | }, | |
306 | }, | |
307 | [IB_QPS_INIT] = { | |
308 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
309 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
310 | [IB_QPS_INIT] = { | |
311 | .trans = MTHCA_TRANS_INIT2INIT, | |
312 | .opt_param = { | |
313 | [UD] = (IB_QP_PKEY_INDEX | | |
314 | IB_QP_PORT | | |
315 | IB_QP_QKEY), | |
9e6970b5 RD |
316 | [UC] = (IB_QP_PKEY_INDEX | |
317 | IB_QP_PORT | | |
318 | IB_QP_ACCESS_FLAGS), | |
1da177e4 LT |
319 | [RC] = (IB_QP_PKEY_INDEX | |
320 | IB_QP_PORT | | |
321 | IB_QP_ACCESS_FLAGS), | |
322 | [MLX] = (IB_QP_PKEY_INDEX | | |
323 | IB_QP_QKEY), | |
324 | } | |
325 | }, | |
326 | [IB_QPS_RTR] = { | |
327 | .trans = MTHCA_TRANS_INIT2RTR, | |
328 | .req_param = { | |
9e6970b5 RD |
329 | [UC] = (IB_QP_AV | |
330 | IB_QP_PATH_MTU | | |
331 | IB_QP_DEST_QPN | | |
332 | IB_QP_RQ_PSN | | |
333 | IB_QP_MAX_DEST_RD_ATOMIC), | |
1da177e4 LT |
334 | [RC] = (IB_QP_AV | |
335 | IB_QP_PATH_MTU | | |
336 | IB_QP_DEST_QPN | | |
337 | IB_QP_RQ_PSN | | |
338 | IB_QP_MAX_DEST_RD_ATOMIC | | |
339 | IB_QP_MIN_RNR_TIMER), | |
340 | }, | |
341 | .opt_param = { | |
342 | [UD] = (IB_QP_PKEY_INDEX | | |
343 | IB_QP_QKEY), | |
9e6970b5 RD |
344 | [UC] = (IB_QP_ALT_PATH | |
345 | IB_QP_ACCESS_FLAGS | | |
346 | IB_QP_PKEY_INDEX), | |
1da177e4 LT |
347 | [RC] = (IB_QP_ALT_PATH | |
348 | IB_QP_ACCESS_FLAGS | | |
349 | IB_QP_PKEY_INDEX), | |
350 | [MLX] = (IB_QP_PKEY_INDEX | | |
351 | IB_QP_QKEY), | |
352 | } | |
353 | } | |
354 | }, | |
355 | [IB_QPS_RTR] = { | |
356 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
357 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
358 | [IB_QPS_RTS] = { | |
359 | .trans = MTHCA_TRANS_RTR2RTS, | |
360 | .req_param = { | |
361 | [UD] = IB_QP_SQ_PSN, | |
9e6970b5 RD |
362 | [UC] = (IB_QP_SQ_PSN | |
363 | IB_QP_MAX_QP_RD_ATOMIC), | |
1da177e4 LT |
364 | [RC] = (IB_QP_TIMEOUT | |
365 | IB_QP_RETRY_CNT | | |
366 | IB_QP_RNR_RETRY | | |
367 | IB_QP_SQ_PSN | | |
368 | IB_QP_MAX_QP_RD_ATOMIC), | |
369 | [MLX] = IB_QP_SQ_PSN, | |
370 | }, | |
371 | .opt_param = { | |
372 | [UD] = (IB_QP_CUR_STATE | | |
373 | IB_QP_QKEY), | |
9e6970b5 RD |
374 | [UC] = (IB_QP_CUR_STATE | |
375 | IB_QP_ALT_PATH | | |
376 | IB_QP_ACCESS_FLAGS | | |
377 | IB_QP_PKEY_INDEX | | |
378 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
379 | [RC] = (IB_QP_CUR_STATE | |
380 | IB_QP_ALT_PATH | | |
381 | IB_QP_ACCESS_FLAGS | | |
382 | IB_QP_PKEY_INDEX | | |
383 | IB_QP_MIN_RNR_TIMER | | |
384 | IB_QP_PATH_MIG_STATE), | |
385 | [MLX] = (IB_QP_CUR_STATE | | |
386 | IB_QP_QKEY), | |
387 | } | |
388 | } | |
389 | }, | |
390 | [IB_QPS_RTS] = { | |
391 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
392 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
393 | [IB_QPS_RTS] = { | |
394 | .trans = MTHCA_TRANS_RTS2RTS, | |
395 | .opt_param = { | |
396 | [UD] = (IB_QP_CUR_STATE | | |
397 | IB_QP_QKEY), | |
9e6970b5 RD |
398 | [UC] = (IB_QP_ACCESS_FLAGS | |
399 | IB_QP_ALT_PATH | | |
400 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
401 | [RC] = (IB_QP_ACCESS_FLAGS | |
402 | IB_QP_ALT_PATH | | |
403 | IB_QP_PATH_MIG_STATE | | |
404 | IB_QP_MIN_RNR_TIMER), | |
405 | [MLX] = (IB_QP_CUR_STATE | | |
406 | IB_QP_QKEY), | |
407 | } | |
408 | }, | |
409 | [IB_QPS_SQD] = { | |
410 | .trans = MTHCA_TRANS_RTS2SQD, | |
411 | }, | |
412 | }, | |
413 | [IB_QPS_SQD] = { | |
414 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
415 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
416 | [IB_QPS_RTS] = { | |
417 | .trans = MTHCA_TRANS_SQD2RTS, | |
418 | .opt_param = { | |
419 | [UD] = (IB_QP_CUR_STATE | | |
420 | IB_QP_QKEY), | |
9e6970b5 RD |
421 | [UC] = (IB_QP_CUR_STATE | |
422 | IB_QP_ALT_PATH | | |
423 | IB_QP_ACCESS_FLAGS | | |
424 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
425 | [RC] = (IB_QP_CUR_STATE | |
426 | IB_QP_ALT_PATH | | |
427 | IB_QP_ACCESS_FLAGS | | |
428 | IB_QP_MIN_RNR_TIMER | | |
429 | IB_QP_PATH_MIG_STATE), | |
430 | [MLX] = (IB_QP_CUR_STATE | | |
431 | IB_QP_QKEY), | |
432 | } | |
433 | }, | |
434 | [IB_QPS_SQD] = { | |
435 | .trans = MTHCA_TRANS_SQD2SQD, | |
436 | .opt_param = { | |
437 | [UD] = (IB_QP_PKEY_INDEX | | |
438 | IB_QP_QKEY), | |
9e6970b5 RD |
439 | [UC] = (IB_QP_AV | |
440 | IB_QP_MAX_QP_RD_ATOMIC | | |
441 | IB_QP_MAX_DEST_RD_ATOMIC | | |
442 | IB_QP_CUR_STATE | | |
443 | IB_QP_ALT_PATH | | |
444 | IB_QP_ACCESS_FLAGS | | |
445 | IB_QP_PKEY_INDEX | | |
446 | IB_QP_PATH_MIG_STATE), | |
1da177e4 LT |
447 | [RC] = (IB_QP_AV | |
448 | IB_QP_TIMEOUT | | |
449 | IB_QP_RETRY_CNT | | |
450 | IB_QP_RNR_RETRY | | |
451 | IB_QP_MAX_QP_RD_ATOMIC | | |
452 | IB_QP_MAX_DEST_RD_ATOMIC | | |
453 | IB_QP_CUR_STATE | | |
454 | IB_QP_ALT_PATH | | |
455 | IB_QP_ACCESS_FLAGS | | |
456 | IB_QP_PKEY_INDEX | | |
457 | IB_QP_MIN_RNR_TIMER | | |
458 | IB_QP_PATH_MIG_STATE), | |
459 | [MLX] = (IB_QP_PKEY_INDEX | | |
460 | IB_QP_QKEY), | |
461 | } | |
462 | } | |
463 | }, | |
464 | [IB_QPS_SQE] = { | |
465 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
466 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }, | |
467 | [IB_QPS_RTS] = { | |
468 | .trans = MTHCA_TRANS_SQERR2RTS, | |
469 | .opt_param = { | |
470 | [UD] = (IB_QP_CUR_STATE | | |
471 | IB_QP_QKEY), | |
9e6970b5 | 472 | [UC] = (IB_QP_CUR_STATE), |
1da177e4 LT |
473 | [RC] = (IB_QP_CUR_STATE | |
474 | IB_QP_MIN_RNR_TIMER), | |
475 | [MLX] = (IB_QP_CUR_STATE | | |
476 | IB_QP_QKEY), | |
477 | } | |
478 | } | |
479 | }, | |
480 | [IB_QPS_ERR] = { | |
481 | [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST }, | |
482 | [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR } | |
483 | } | |
484 | }; | |
485 | ||
486 | static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, | |
487 | int attr_mask) | |
488 | { | |
489 | if (attr_mask & IB_QP_PKEY_INDEX) | |
490 | sqp->pkey_index = attr->pkey_index; | |
491 | if (attr_mask & IB_QP_QKEY) | |
492 | sqp->qkey = attr->qkey; | |
493 | if (attr_mask & IB_QP_SQ_PSN) | |
494 | sqp->send_psn = attr->sq_psn; | |
495 | } | |
496 | ||
497 | static void init_port(struct mthca_dev *dev, int port) | |
498 | { | |
499 | int err; | |
500 | u8 status; | |
501 | struct mthca_init_ib_param param; | |
502 | ||
503 | memset(¶m, 0, sizeof param); | |
504 | ||
da6561c2 RD |
505 | param.port_width = dev->limits.port_width_cap; |
506 | param.vl_cap = dev->limits.vl_cap; | |
507 | param.mtu_cap = dev->limits.mtu_cap; | |
508 | param.gid_cap = dev->limits.gid_table_len; | |
509 | param.pkey_cap = dev->limits.pkey_table_len; | |
1da177e4 LT |
510 | |
511 | err = mthca_INIT_IB(dev, ¶m, port, &status); | |
512 | if (err) | |
513 | mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); | |
514 | if (status) | |
515 | mthca_warn(dev, "INIT_IB returned status %02x.\n", status); | |
516 | } | |
517 | ||
518 | int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) | |
519 | { | |
520 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
521 | struct mthca_qp *qp = to_mqp(ibqp); | |
522 | enum ib_qp_state cur_state, new_state; | |
ed878458 | 523 | struct mthca_mailbox *mailbox; |
1da177e4 LT |
524 | struct mthca_qp_param *qp_param; |
525 | struct mthca_qp_context *qp_context; | |
526 | u32 req_param, opt_param; | |
527 | u8 status; | |
528 | int err; | |
529 | ||
530 | if (attr_mask & IB_QP_CUR_STATE) { | |
531 | if (attr->cur_qp_state != IB_QPS_RTR && | |
532 | attr->cur_qp_state != IB_QPS_RTS && | |
533 | attr->cur_qp_state != IB_QPS_SQD && | |
534 | attr->cur_qp_state != IB_QPS_SQE) | |
535 | return -EINVAL; | |
536 | else | |
537 | cur_state = attr->cur_qp_state; | |
538 | } else { | |
539 | spin_lock_irq(&qp->sq.lock); | |
540 | spin_lock(&qp->rq.lock); | |
541 | cur_state = qp->state; | |
542 | spin_unlock(&qp->rq.lock); | |
543 | spin_unlock_irq(&qp->sq.lock); | |
544 | } | |
545 | ||
546 | if (attr_mask & IB_QP_STATE) { | |
547 | if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) | |
548 | return -EINVAL; | |
549 | new_state = attr->qp_state; | |
550 | } else | |
551 | new_state = cur_state; | |
552 | ||
553 | if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) { | |
554 | mthca_dbg(dev, "Illegal QP transition " | |
555 | "%d->%d\n", cur_state, new_state); | |
556 | return -EINVAL; | |
557 | } | |
558 | ||
559 | req_param = state_table[cur_state][new_state].req_param[qp->transport]; | |
560 | opt_param = state_table[cur_state][new_state].opt_param[qp->transport]; | |
561 | ||
562 | if ((req_param & attr_mask) != req_param) { | |
563 | mthca_dbg(dev, "QP transition " | |
564 | "%d->%d missing req attr 0x%08x\n", | |
565 | cur_state, new_state, | |
566 | req_param & ~attr_mask); | |
567 | return -EINVAL; | |
568 | } | |
569 | ||
570 | if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) { | |
571 | mthca_dbg(dev, "QP transition (transport %d) " | |
572 | "%d->%d has extra attr 0x%08x\n", | |
573 | qp->transport, | |
574 | cur_state, new_state, | |
575 | attr_mask & ~(req_param | opt_param | | |
576 | IB_QP_STATE)); | |
577 | return -EINVAL; | |
578 | } | |
579 | ||
ed878458 RD |
580 | mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); |
581 | if (IS_ERR(mailbox)) | |
582 | return PTR_ERR(mailbox); | |
583 | qp_param = mailbox->buf; | |
1da177e4 LT |
584 | qp_context = &qp_param->context; |
585 | memset(qp_param, 0, sizeof *qp_param); | |
586 | ||
587 | qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | | |
588 | (to_mthca_st(qp->transport) << 16)); | |
589 | qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); | |
590 | if (!(attr_mask & IB_QP_PATH_MIG_STATE)) | |
591 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
592 | else { | |
593 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); | |
594 | switch (attr->path_mig_state) { | |
595 | case IB_MIG_MIGRATED: | |
596 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); | |
597 | break; | |
598 | case IB_MIG_REARM: | |
599 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); | |
600 | break; | |
601 | case IB_MIG_ARMED: | |
602 | qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); | |
603 | break; | |
604 | } | |
605 | } | |
606 | ||
607 | /* leave tavor_sched_queue as 0 */ | |
608 | ||
609 | if (qp->transport == MLX || qp->transport == UD) | |
610 | qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; | |
611 | else if (attr_mask & IB_QP_PATH_MTU) | |
612 | qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; | |
613 | ||
d10ddbf6 | 614 | if (mthca_is_memfree(dev)) { |
ec34a922 RD |
615 | if (qp->rq.max) |
616 | qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; | |
617 | qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; | |
618 | ||
619 | if (qp->sq.max) | |
620 | qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; | |
621 | qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; | |
1da177e4 LT |
622 | } |
623 | ||
624 | /* leave arbel_sched_queue as 0 */ | |
625 | ||
80c8ec2c RD |
626 | if (qp->ibqp.uobject) |
627 | qp_context->usr_page = | |
628 | cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); | |
629 | else | |
630 | qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); | |
1da177e4 LT |
631 | qp_context->local_qpn = cpu_to_be32(qp->qpn); |
632 | if (attr_mask & IB_QP_DEST_QPN) { | |
633 | qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); | |
634 | } | |
635 | ||
636 | if (qp->transport == MLX) | |
637 | qp_context->pri_path.port_pkey |= | |
638 | cpu_to_be32(to_msqp(qp)->port << 24); | |
639 | else { | |
640 | if (attr_mask & IB_QP_PORT) { | |
641 | qp_context->pri_path.port_pkey |= | |
642 | cpu_to_be32(attr->port_num << 24); | |
643 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); | |
644 | } | |
645 | } | |
646 | ||
647 | if (attr_mask & IB_QP_PKEY_INDEX) { | |
648 | qp_context->pri_path.port_pkey |= | |
649 | cpu_to_be32(attr->pkey_index); | |
650 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); | |
651 | } | |
652 | ||
653 | if (attr_mask & IB_QP_RNR_RETRY) { | |
654 | qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; | |
655 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY); | |
656 | } | |
657 | ||
658 | if (attr_mask & IB_QP_AV) { | |
659 | qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; | |
660 | qp_context->pri_path.rlid = cpu_to_be16(attr->ah_attr.dlid); | |
cd123d7f | 661 | qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate; |
1da177e4 LT |
662 | if (attr->ah_attr.ah_flags & IB_AH_GRH) { |
663 | qp_context->pri_path.g_mylmc |= 1 << 7; | |
664 | qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index; | |
665 | qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit; | |
666 | qp_context->pri_path.sl_tclass_flowlabel = | |
667 | cpu_to_be32((attr->ah_attr.sl << 28) | | |
668 | (attr->ah_attr.grh.traffic_class << 20) | | |
669 | (attr->ah_attr.grh.flow_label)); | |
670 | memcpy(qp_context->pri_path.rgid, | |
671 | attr->ah_attr.grh.dgid.raw, 16); | |
672 | } else { | |
673 | qp_context->pri_path.sl_tclass_flowlabel = | |
674 | cpu_to_be32(attr->ah_attr.sl << 28); | |
675 | } | |
676 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); | |
677 | } | |
678 | ||
679 | if (attr_mask & IB_QP_TIMEOUT) { | |
680 | qp_context->pri_path.ackto = attr->timeout; | |
681 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); | |
682 | } | |
683 | ||
684 | /* XXX alt_path */ | |
685 | ||
686 | /* leave rdd as 0 */ | |
687 | qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); | |
688 | /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ | |
689 | qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); | |
690 | qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | | |
691 | (MTHCA_FLIGHT_LIMIT << 24) | | |
692 | MTHCA_QP_BIT_SRE | | |
693 | MTHCA_QP_BIT_SWE | | |
694 | MTHCA_QP_BIT_SAE); | |
695 | if (qp->sq_policy == IB_SIGNAL_ALL_WR) | |
696 | qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); | |
697 | if (attr_mask & IB_QP_RETRY_CNT) { | |
698 | qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); | |
699 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); | |
700 | } | |
701 | ||
34a4a753 RD |
702 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { |
703 | qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ? | |
704 | ffs(attr->max_rd_atomic) - 1 : 0, | |
1da177e4 LT |
705 | 7) << 21); |
706 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); | |
707 | } | |
708 | ||
709 | if (attr_mask & IB_QP_SQ_PSN) | |
710 | qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); | |
711 | qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); | |
712 | ||
d10ddbf6 | 713 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
714 | qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); |
715 | qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); | |
716 | } | |
717 | ||
718 | if (attr_mask & IB_QP_ACCESS_FLAGS) { | |
719 | /* | |
720 | * Only enable RDMA/atomics if we have responder | |
721 | * resources set to a non-zero value. | |
722 | */ | |
723 | if (qp->resp_depth) { | |
724 | qp_context->params2 |= | |
725 | cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ? | |
726 | MTHCA_QP_BIT_RWE : 0); | |
727 | qp_context->params2 |= | |
728 | cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ? | |
729 | MTHCA_QP_BIT_RRE : 0); | |
730 | qp_context->params2 |= | |
731 | cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ? | |
732 | MTHCA_QP_BIT_RAE : 0); | |
733 | } | |
734 | ||
735 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | | |
736 | MTHCA_QP_OPTPAR_RRE | | |
737 | MTHCA_QP_OPTPAR_RAE); | |
738 | ||
739 | qp->atomic_rd_en = attr->qp_access_flags; | |
740 | } | |
741 | ||
34a4a753 | 742 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { |
1da177e4 LT |
743 | u8 rra_max; |
744 | ||
34a4a753 | 745 | if (qp->resp_depth && !attr->max_dest_rd_atomic) { |
1da177e4 LT |
746 | /* |
747 | * Lowering our responder resources to zero. | |
748 | * Turn off RDMA/atomics as responder. | |
749 | * (RWE/RRE/RAE in params2 already zero) | |
750 | */ | |
751 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | | |
752 | MTHCA_QP_OPTPAR_RRE | | |
753 | MTHCA_QP_OPTPAR_RAE); | |
754 | } | |
755 | ||
34a4a753 | 756 | if (!qp->resp_depth && attr->max_dest_rd_atomic) { |
1da177e4 LT |
757 | /* |
758 | * Increasing our responder resources from | |
759 | * zero. Turn on RDMA/atomics as appropriate. | |
760 | */ | |
761 | qp_context->params2 |= | |
762 | cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ? | |
763 | MTHCA_QP_BIT_RWE : 0); | |
764 | qp_context->params2 |= | |
765 | cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ? | |
766 | MTHCA_QP_BIT_RRE : 0); | |
767 | qp_context->params2 |= | |
768 | cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ? | |
769 | MTHCA_QP_BIT_RAE : 0); | |
770 | ||
771 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | | |
772 | MTHCA_QP_OPTPAR_RRE | | |
773 | MTHCA_QP_OPTPAR_RAE); | |
774 | } | |
775 | ||
776 | for (rra_max = 0; | |
34a4a753 | 777 | 1 << rra_max < attr->max_dest_rd_atomic && |
1da177e4 LT |
778 | rra_max < dev->qp_table.rdb_shift; |
779 | ++rra_max) | |
780 | ; /* nothing */ | |
781 | ||
782 | qp_context->params2 |= cpu_to_be32(rra_max << 21); | |
783 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); | |
784 | ||
34a4a753 | 785 | qp->resp_depth = attr->max_dest_rd_atomic; |
1da177e4 LT |
786 | } |
787 | ||
788 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); | |
789 | ||
ec34a922 RD |
790 | if (ibqp->srq) |
791 | qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); | |
792 | ||
1da177e4 LT |
793 | if (attr_mask & IB_QP_MIN_RNR_TIMER) { |
794 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); | |
795 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); | |
796 | } | |
797 | if (attr_mask & IB_QP_RQ_PSN) | |
798 | qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); | |
799 | ||
800 | qp_context->ra_buff_indx = | |
801 | cpu_to_be32(dev->qp_table.rdb_base + | |
802 | ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << | |
803 | dev->qp_table.rdb_shift)); | |
804 | ||
805 | qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); | |
806 | ||
d10ddbf6 | 807 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
808 | qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); |
809 | ||
810 | if (attr_mask & IB_QP_QKEY) { | |
811 | qp_context->qkey = cpu_to_be32(attr->qkey); | |
812 | qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); | |
813 | } | |
814 | ||
ec34a922 RD |
815 | if (ibqp->srq) |
816 | qp_context->srqn = cpu_to_be32(1 << 24 | | |
817 | to_msrq(ibqp->srq)->srqn); | |
818 | ||
1da177e4 | 819 | err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, |
ed878458 | 820 | qp->qpn, 0, mailbox, 0, &status); |
1da177e4 LT |
821 | if (status) { |
822 | mthca_warn(dev, "modify QP %d returned status %02x.\n", | |
823 | state_table[cur_state][new_state].trans, status); | |
824 | err = -EINVAL; | |
825 | } | |
826 | ||
827 | if (!err) | |
828 | qp->state = new_state; | |
829 | ||
ed878458 | 830 | mthca_free_mailbox(dev, mailbox); |
1da177e4 LT |
831 | |
832 | if (is_sqp(dev, qp)) | |
833 | store_attrs(to_msqp(qp), attr, attr_mask); | |
834 | ||
835 | /* | |
836 | * If we are moving QP0 to RTR, bring the IB link up; if we | |
837 | * are moving QP0 to RESET or ERROR, bring the link back down. | |
838 | */ | |
839 | if (is_qp0(dev, qp)) { | |
840 | if (cur_state != IB_QPS_RTR && | |
841 | new_state == IB_QPS_RTR) | |
842 | init_port(dev, to_msqp(qp)->port); | |
843 | ||
844 | if (cur_state != IB_QPS_RESET && | |
845 | cur_state != IB_QPS_ERR && | |
846 | (new_state == IB_QPS_RESET || | |
847 | new_state == IB_QPS_ERR)) | |
848 | mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); | |
849 | } | |
850 | ||
851 | return err; | |
852 | } | |
853 | ||
854 | /* | |
855 | * Allocate and register buffer for WQEs. qp->rq.max, sq.max, | |
856 | * rq.max_gs and sq.max_gs must all be assigned. | |
857 | * mthca_alloc_wqe_buf will calculate rq.wqe_shift and | |
858 | * sq.wqe_shift (as well as send_wqe_offset, is_direct, and | |
859 | * queue) | |
860 | */ | |
861 | static int mthca_alloc_wqe_buf(struct mthca_dev *dev, | |
862 | struct mthca_pd *pd, | |
863 | struct mthca_qp *qp) | |
864 | { | |
865 | int size; | |
1da177e4 LT |
866 | int err = -ENOMEM; |
867 | ||
868 | size = sizeof (struct mthca_next_seg) + | |
869 | qp->rq.max_gs * sizeof (struct mthca_data_seg); | |
870 | ||
871 | for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; | |
872 | qp->rq.wqe_shift++) | |
873 | ; /* nothing */ | |
874 | ||
875 | size = sizeof (struct mthca_next_seg) + | |
876 | qp->sq.max_gs * sizeof (struct mthca_data_seg); | |
877 | switch (qp->transport) { | |
878 | case MLX: | |
879 | size += 2 * sizeof (struct mthca_data_seg); | |
880 | break; | |
881 | case UD: | |
d10ddbf6 | 882 | if (mthca_is_memfree(dev)) |
1da177e4 LT |
883 | size += sizeof (struct mthca_arbel_ud_seg); |
884 | else | |
885 | size += sizeof (struct mthca_tavor_ud_seg); | |
886 | break; | |
887 | default: | |
888 | /* bind seg is as big as atomic + raddr segs */ | |
889 | size += sizeof (struct mthca_bind_seg); | |
890 | } | |
891 | ||
892 | for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; | |
893 | qp->sq.wqe_shift++) | |
894 | ; /* nothing */ | |
895 | ||
896 | qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, | |
897 | 1 << qp->sq.wqe_shift); | |
80c8ec2c RD |
898 | |
899 | /* | |
900 | * If this is a userspace QP, we don't actually have to | |
901 | * allocate anything. All we need is to calculate the WQE | |
902 | * sizes and the send_wqe_offset, so we're done now. | |
903 | */ | |
904 | if (pd->ibpd.uobject) | |
905 | return 0; | |
906 | ||
1da177e4 LT |
907 | size = PAGE_ALIGN(qp->send_wqe_offset + |
908 | (qp->sq.max << qp->sq.wqe_shift)); | |
909 | ||
910 | qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), | |
911 | GFP_KERNEL); | |
912 | if (!qp->wrid) | |
913 | goto err_out; | |
914 | ||
87b81670 RD |
915 | err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, |
916 | &qp->queue, &qp->is_direct, pd, 0, &qp->mr); | |
1da177e4 | 917 | if (err) |
87b81670 | 918 | goto err_out; |
1da177e4 | 919 | |
1da177e4 LT |
920 | return 0; |
921 | ||
87b81670 | 922 | err_out: |
1da177e4 | 923 | kfree(qp->wrid); |
1da177e4 LT |
924 | return err; |
925 | } | |
926 | ||
80c8ec2c | 927 | static void mthca_free_wqe_buf(struct mthca_dev *dev, |
1da177e4 LT |
928 | struct mthca_qp *qp) |
929 | { | |
87b81670 RD |
930 | mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + |
931 | (qp->sq.max << qp->sq.wqe_shift)), | |
932 | &qp->queue, qp->is_direct, &qp->mr); | |
80c8ec2c RD |
933 | kfree(qp->wrid); |
934 | } | |
935 | ||
936 | static int mthca_map_memfree(struct mthca_dev *dev, | |
937 | struct mthca_qp *qp) | |
938 | { | |
939 | int ret; | |
1da177e4 | 940 | |
d10ddbf6 | 941 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
942 | ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); |
943 | if (ret) | |
944 | return ret; | |
945 | ||
946 | ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); | |
947 | if (ret) | |
948 | goto err_qpc; | |
949 | ||
80c8ec2c RD |
950 | ret = mthca_table_get(dev, dev->qp_table.rdb_table, |
951 | qp->qpn << dev->qp_table.rdb_shift); | |
952 | if (ret) | |
953 | goto err_eqpc; | |
1da177e4 | 954 | |
1da177e4 LT |
955 | } |
956 | ||
957 | return 0; | |
958 | ||
1da177e4 LT |
959 | err_eqpc: |
960 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
961 | ||
962 | err_qpc: | |
963 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
964 | ||
965 | return ret; | |
966 | } | |
967 | ||
80c8ec2c RD |
968 | static void mthca_unmap_memfree(struct mthca_dev *dev, |
969 | struct mthca_qp *qp) | |
970 | { | |
971 | mthca_table_put(dev, dev->qp_table.rdb_table, | |
972 | qp->qpn << dev->qp_table.rdb_shift); | |
973 | mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); | |
974 | mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); | |
975 | } | |
976 | ||
977 | static int mthca_alloc_memfree(struct mthca_dev *dev, | |
978 | struct mthca_qp *qp) | |
979 | { | |
980 | int ret = 0; | |
981 | ||
982 | if (mthca_is_memfree(dev)) { | |
983 | qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, | |
984 | qp->qpn, &qp->rq.db); | |
985 | if (qp->rq.db_index < 0) | |
986 | return ret; | |
987 | ||
988 | qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, | |
989 | qp->qpn, &qp->sq.db); | |
990 | if (qp->sq.db_index < 0) | |
991 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
992 | } | |
993 | ||
994 | return ret; | |
995 | } | |
996 | ||
1da177e4 LT |
997 | static void mthca_free_memfree(struct mthca_dev *dev, |
998 | struct mthca_qp *qp) | |
999 | { | |
d10ddbf6 | 1000 | if (mthca_is_memfree(dev)) { |
1da177e4 LT |
1001 | mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); |
1002 | mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); | |
1da177e4 LT |
1003 | } |
1004 | } | |
1005 | ||
1006 | static void mthca_wq_init(struct mthca_wq* wq) | |
1007 | { | |
1008 | spin_lock_init(&wq->lock); | |
1009 | wq->next_ind = 0; | |
1010 | wq->last_comp = wq->max - 1; | |
1011 | wq->head = 0; | |
1012 | wq->tail = 0; | |
1013 | wq->last = NULL; | |
1014 | } | |
1015 | ||
1016 | static int mthca_alloc_qp_common(struct mthca_dev *dev, | |
1017 | struct mthca_pd *pd, | |
1018 | struct mthca_cq *send_cq, | |
1019 | struct mthca_cq *recv_cq, | |
1020 | enum ib_sig_type send_policy, | |
1021 | struct mthca_qp *qp) | |
1022 | { | |
1da177e4 LT |
1023 | int ret; |
1024 | int i; | |
1025 | ||
1026 | atomic_set(&qp->refcount, 1); | |
1027 | qp->state = IB_QPS_RESET; | |
1028 | qp->atomic_rd_en = 0; | |
1029 | qp->resp_depth = 0; | |
1030 | qp->sq_policy = send_policy; | |
1031 | mthca_wq_init(&qp->sq); | |
1032 | mthca_wq_init(&qp->rq); | |
1033 | ||
80c8ec2c | 1034 | ret = mthca_map_memfree(dev, qp); |
1da177e4 LT |
1035 | if (ret) |
1036 | return ret; | |
1037 | ||
1038 | ret = mthca_alloc_wqe_buf(dev, pd, qp); | |
1039 | if (ret) { | |
80c8ec2c RD |
1040 | mthca_unmap_memfree(dev, qp); |
1041 | return ret; | |
1042 | } | |
1043 | ||
1044 | /* | |
1045 | * If this is a userspace QP, we're done now. The doorbells | |
1046 | * will be allocated and buffers will be initialized in | |
1047 | * userspace. | |
1048 | */ | |
1049 | if (pd->ibpd.uobject) | |
1050 | return 0; | |
1051 | ||
1052 | ret = mthca_alloc_memfree(dev, qp); | |
1053 | if (ret) { | |
1054 | mthca_free_wqe_buf(dev, qp); | |
1055 | mthca_unmap_memfree(dev, qp); | |
1da177e4 LT |
1056 | return ret; |
1057 | } | |
1058 | ||
d10ddbf6 | 1059 | if (mthca_is_memfree(dev)) { |
ddf841f0 RD |
1060 | struct mthca_next_seg *next; |
1061 | struct mthca_data_seg *scatter; | |
1062 | int size = (sizeof (struct mthca_next_seg) + | |
1063 | qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; | |
1064 | ||
1da177e4 | 1065 | for (i = 0; i < qp->rq.max; ++i) { |
ddf841f0 RD |
1066 | next = get_recv_wqe(qp, i); |
1067 | next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << | |
1068 | qp->rq.wqe_shift); | |
1069 | next->ee_nds = cpu_to_be32(size); | |
1070 | ||
1071 | for (scatter = (void *) (next + 1); | |
1072 | (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); | |
1073 | ++scatter) | |
1074 | scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); | |
1da177e4 LT |
1075 | } |
1076 | ||
1077 | for (i = 0; i < qp->sq.max; ++i) { | |
ddf841f0 RD |
1078 | next = get_send_wqe(qp, i); |
1079 | next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << | |
1080 | qp->sq.wqe_shift) + | |
1081 | qp->send_wqe_offset); | |
1da177e4 LT |
1082 | } |
1083 | } | |
1084 | ||
1085 | return 0; | |
1086 | } | |
1087 | ||
80c8ec2c RD |
1088 | static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, |
1089 | struct mthca_qp *qp) | |
1da177e4 | 1090 | { |
80c8ec2c RD |
1091 | /* Sanity check QP size before proceeding */ |
1092 | if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 || | |
1093 | cap->max_send_sge > 64 || cap->max_recv_sge > 64) | |
1094 | return -EINVAL; | |
1da177e4 | 1095 | |
80c8ec2c RD |
1096 | if (mthca_is_memfree(dev)) { |
1097 | qp->rq.max = cap->max_recv_wr ? | |
1098 | roundup_pow_of_two(cap->max_recv_wr) : 0; | |
1099 | qp->sq.max = cap->max_send_wr ? | |
1100 | roundup_pow_of_two(cap->max_send_wr) : 0; | |
1101 | } else { | |
1102 | qp->rq.max = cap->max_recv_wr; | |
1103 | qp->sq.max = cap->max_send_wr; | |
1104 | } | |
1da177e4 | 1105 | |
80c8ec2c RD |
1106 | qp->rq.max_gs = cap->max_recv_sge; |
1107 | qp->sq.max_gs = max_t(int, cap->max_send_sge, | |
1108 | ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, | |
1109 | MTHCA_INLINE_CHUNK_SIZE) / | |
1110 | sizeof (struct mthca_data_seg)); | |
1da177e4 | 1111 | |
80c8ec2c RD |
1112 | /* |
1113 | * For MLX transport we need 2 extra S/G entries: | |
1114 | * one for the header and one for the checksum at the end | |
1115 | */ | |
1116 | if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) || | |
1117 | qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg) | |
1118 | return -EINVAL; | |
1da177e4 | 1119 | |
80c8ec2c | 1120 | return 0; |
1da177e4 LT |
1121 | } |
1122 | ||
1123 | int mthca_alloc_qp(struct mthca_dev *dev, | |
1124 | struct mthca_pd *pd, | |
1125 | struct mthca_cq *send_cq, | |
1126 | struct mthca_cq *recv_cq, | |
1127 | enum ib_qp_type type, | |
1128 | enum ib_sig_type send_policy, | |
80c8ec2c | 1129 | struct ib_qp_cap *cap, |
1da177e4 LT |
1130 | struct mthca_qp *qp) |
1131 | { | |
1132 | int err; | |
1133 | ||
80c8ec2c RD |
1134 | err = mthca_set_qp_size(dev, cap, qp); |
1135 | if (err) | |
1136 | return err; | |
1da177e4 LT |
1137 | |
1138 | switch (type) { | |
1139 | case IB_QPT_RC: qp->transport = RC; break; | |
1140 | case IB_QPT_UC: qp->transport = UC; break; | |
1141 | case IB_QPT_UD: qp->transport = UD; break; | |
1142 | default: return -EINVAL; | |
1143 | } | |
1144 | ||
1145 | qp->qpn = mthca_alloc(&dev->qp_table.alloc); | |
1146 | if (qp->qpn == -1) | |
1147 | return -ENOMEM; | |
1148 | ||
1149 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1150 | send_policy, qp); | |
1151 | if (err) { | |
1152 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1153 | return err; | |
1154 | } | |
1155 | ||
1156 | spin_lock_irq(&dev->qp_table.lock); | |
1157 | mthca_array_set(&dev->qp_table.qp, | |
1158 | qp->qpn & (dev->limits.num_qps - 1), qp); | |
1159 | spin_unlock_irq(&dev->qp_table.lock); | |
1160 | ||
1161 | return 0; | |
1162 | } | |
1163 | ||
1164 | int mthca_alloc_sqp(struct mthca_dev *dev, | |
1165 | struct mthca_pd *pd, | |
1166 | struct mthca_cq *send_cq, | |
1167 | struct mthca_cq *recv_cq, | |
1168 | enum ib_sig_type send_policy, | |
80c8ec2c | 1169 | struct ib_qp_cap *cap, |
1da177e4 LT |
1170 | int qpn, |
1171 | int port, | |
1172 | struct mthca_sqp *sqp) | |
1173 | { | |
1da177e4 | 1174 | u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; |
80c8ec2c | 1175 | int err; |
1da177e4 | 1176 | |
80c8ec2c RD |
1177 | err = mthca_set_qp_size(dev, cap, &sqp->qp); |
1178 | if (err) | |
1179 | return err; | |
1da177e4 LT |
1180 | |
1181 | sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; | |
1182 | sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1183 | &sqp->header_dma, GFP_KERNEL); | |
1184 | if (!sqp->header_buf) | |
1185 | return -ENOMEM; | |
1186 | ||
1187 | spin_lock_irq(&dev->qp_table.lock); | |
1188 | if (mthca_array_get(&dev->qp_table.qp, mqpn)) | |
1189 | err = -EBUSY; | |
1190 | else | |
1191 | mthca_array_set(&dev->qp_table.qp, mqpn, sqp); | |
1192 | spin_unlock_irq(&dev->qp_table.lock); | |
1193 | ||
1194 | if (err) | |
1195 | goto err_out; | |
1196 | ||
1197 | sqp->port = port; | |
1198 | sqp->qp.qpn = mqpn; | |
1199 | sqp->qp.transport = MLX; | |
1200 | ||
1201 | err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, | |
1202 | send_policy, &sqp->qp); | |
1203 | if (err) | |
1204 | goto err_out_free; | |
1205 | ||
1206 | atomic_inc(&pd->sqp_count); | |
1207 | ||
1208 | return 0; | |
1209 | ||
1210 | err_out_free: | |
1211 | /* | |
1212 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1213 | * without taking a lock. | |
1214 | */ | |
1215 | spin_lock_irq(&send_cq->lock); | |
1216 | if (send_cq != recv_cq) | |
1217 | spin_lock(&recv_cq->lock); | |
1218 | ||
1219 | spin_lock(&dev->qp_table.lock); | |
1220 | mthca_array_clear(&dev->qp_table.qp, mqpn); | |
1221 | spin_unlock(&dev->qp_table.lock); | |
1222 | ||
1223 | if (send_cq != recv_cq) | |
1224 | spin_unlock(&recv_cq->lock); | |
1225 | spin_unlock_irq(&send_cq->lock); | |
1226 | ||
1227 | err_out: | |
1228 | dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, | |
1229 | sqp->header_buf, sqp->header_dma); | |
1230 | ||
1231 | return err; | |
1232 | } | |
1233 | ||
1234 | void mthca_free_qp(struct mthca_dev *dev, | |
1235 | struct mthca_qp *qp) | |
1236 | { | |
1237 | u8 status; | |
1da177e4 LT |
1238 | struct mthca_cq *send_cq; |
1239 | struct mthca_cq *recv_cq; | |
1240 | ||
1241 | send_cq = to_mcq(qp->ibqp.send_cq); | |
1242 | recv_cq = to_mcq(qp->ibqp.recv_cq); | |
1243 | ||
1244 | /* | |
1245 | * Lock CQs here, so that CQ polling code can do QP lookup | |
1246 | * without taking a lock. | |
1247 | */ | |
1248 | spin_lock_irq(&send_cq->lock); | |
1249 | if (send_cq != recv_cq) | |
1250 | spin_lock(&recv_cq->lock); | |
1251 | ||
1252 | spin_lock(&dev->qp_table.lock); | |
1253 | mthca_array_clear(&dev->qp_table.qp, | |
1254 | qp->qpn & (dev->limits.num_qps - 1)); | |
1255 | spin_unlock(&dev->qp_table.lock); | |
1256 | ||
1257 | if (send_cq != recv_cq) | |
1258 | spin_unlock(&recv_cq->lock); | |
1259 | spin_unlock_irq(&send_cq->lock); | |
1260 | ||
1261 | atomic_dec(&qp->refcount); | |
1262 | wait_event(qp->wait, !atomic_read(&qp->refcount)); | |
1263 | ||
1264 | if (qp->state != IB_QPS_RESET) | |
1265 | mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); | |
1266 | ||
80c8ec2c RD |
1267 | /* |
1268 | * If this is a userspace QP, the buffers, MR, CQs and so on | |
1269 | * will be cleaned up in userspace, so all we have to do is | |
1270 | * unref the mem-free tables and free the QPN in our table. | |
1271 | */ | |
1272 | if (!qp->ibqp.uobject) { | |
ec34a922 RD |
1273 | mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, |
1274 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
80c8ec2c | 1275 | if (qp->ibqp.send_cq != qp->ibqp.recv_cq) |
ec34a922 RD |
1276 | mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, |
1277 | qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); | |
1da177e4 | 1278 | |
80c8ec2c RD |
1279 | mthca_free_memfree(dev, qp); |
1280 | mthca_free_wqe_buf(dev, qp); | |
1da177e4 LT |
1281 | } |
1282 | ||
80c8ec2c | 1283 | mthca_unmap_memfree(dev, qp); |
1da177e4 LT |
1284 | |
1285 | if (is_sqp(dev, qp)) { | |
1286 | atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); | |
1287 | dma_free_coherent(&dev->pdev->dev, | |
1288 | to_msqp(qp)->header_buf_size, | |
1289 | to_msqp(qp)->header_buf, | |
1290 | to_msqp(qp)->header_dma); | |
1291 | } else | |
1292 | mthca_free(&dev->qp_table.alloc, qp->qpn); | |
1293 | } | |
1294 | ||
1295 | /* Create UD header for an MLX send and build a data segment for it */ | |
1296 | static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |
1297 | int ind, struct ib_send_wr *wr, | |
1298 | struct mthca_mlx_seg *mlx, | |
1299 | struct mthca_data_seg *data) | |
1300 | { | |
1301 | int header_size; | |
1302 | int err; | |
97f52eb4 | 1303 | u16 pkey; |
1da177e4 LT |
1304 | |
1305 | ib_ud_header_init(256, /* assume a MAD */ | |
1306 | sqp->ud_header.grh_present, | |
1307 | &sqp->ud_header); | |
1308 | ||
1309 | err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); | |
1310 | if (err) | |
1311 | return err; | |
1312 | mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); | |
1313 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | | |
97f52eb4 SH |
1314 | (sqp->ud_header.lrh.destination_lid == |
1315 | IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | | |
1da177e4 LT |
1316 | (sqp->ud_header.lrh.service_level << 8)); |
1317 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | |
1318 | mlx->vcrc = 0; | |
1319 | ||
1320 | switch (wr->opcode) { | |
1321 | case IB_WR_SEND: | |
1322 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; | |
1323 | sqp->ud_header.immediate_present = 0; | |
1324 | break; | |
1325 | case IB_WR_SEND_WITH_IMM: | |
1326 | sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | |
1327 | sqp->ud_header.immediate_present = 1; | |
1328 | sqp->ud_header.immediate_data = wr->imm_data; | |
1329 | break; | |
1330 | default: | |
1331 | return -EINVAL; | |
1332 | } | |
1333 | ||
1334 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | |
97f52eb4 SH |
1335 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) |
1336 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | |
1da177e4 LT |
1337 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
1338 | if (!sqp->qp.ibqp.qp_num) | |
1339 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | |
97f52eb4 | 1340 | sqp->pkey_index, &pkey); |
1da177e4 LT |
1341 | else |
1342 | ib_get_cached_pkey(&dev->ib_dev, sqp->port, | |
97f52eb4 SH |
1343 | wr->wr.ud.pkey_index, &pkey); |
1344 | sqp->ud_header.bth.pkey = cpu_to_be16(pkey); | |
1da177e4 LT |
1345 | sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
1346 | sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); | |
1347 | sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? | |
1348 | sqp->qkey : wr->wr.ud.remote_qkey); | |
1349 | sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); | |
1350 | ||
1351 | header_size = ib_ud_header_pack(&sqp->ud_header, | |
1352 | sqp->header_buf + | |
1353 | ind * MTHCA_UD_HEADER_SIZE); | |
1354 | ||
1355 | data->byte_count = cpu_to_be32(header_size); | |
1356 | data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); | |
1357 | data->addr = cpu_to_be64(sqp->header_dma + | |
1358 | ind * MTHCA_UD_HEADER_SIZE); | |
1359 | ||
1360 | return 0; | |
1361 | } | |
1362 | ||
1363 | static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, | |
1364 | struct ib_cq *ib_cq) | |
1365 | { | |
1366 | unsigned cur; | |
1367 | struct mthca_cq *cq; | |
1368 | ||
1369 | cur = wq->head - wq->tail; | |
1370 | if (likely(cur + nreq < wq->max)) | |
1371 | return 0; | |
1372 | ||
1373 | cq = to_mcq(ib_cq); | |
1374 | spin_lock(&cq->lock); | |
1375 | cur = wq->head - wq->tail; | |
1376 | spin_unlock(&cq->lock); | |
1377 | ||
1378 | return cur + nreq >= wq->max; | |
1379 | } | |
1380 | ||
1381 | int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1382 | struct ib_send_wr **bad_wr) | |
1383 | { | |
1384 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1385 | struct mthca_qp *qp = to_mqp(ibqp); | |
1386 | void *wqe; | |
1387 | void *prev_wqe; | |
1388 | unsigned long flags; | |
1389 | int err = 0; | |
1390 | int nreq; | |
1391 | int i; | |
1392 | int size; | |
1393 | int size0 = 0; | |
1394 | u32 f0 = 0; | |
1395 | int ind; | |
1396 | u8 op0 = 0; | |
1397 | ||
1398 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1399 | ||
1400 | /* XXX check that state is OK to post send */ | |
1401 | ||
1402 | ind = qp->sq.next_ind; | |
1403 | ||
1404 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1405 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | |
1406 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1407 | " %d max, %d nreq)\n", qp->qpn, | |
1408 | qp->sq.head, qp->sq.tail, | |
1409 | qp->sq.max, nreq); | |
1410 | err = -ENOMEM; | |
1411 | *bad_wr = wr; | |
1412 | goto out; | |
1413 | } | |
1414 | ||
1415 | wqe = get_send_wqe(qp, ind); | |
1416 | prev_wqe = qp->sq.last; | |
1417 | qp->sq.last = wqe; | |
1418 | ||
1419 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1420 | ((struct mthca_next_seg *) wqe)->ee_nds = 0; | |
1421 | ((struct mthca_next_seg *) wqe)->flags = | |
1422 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1423 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1424 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1425 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1426 | cpu_to_be32(1); | |
1427 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1428 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1429 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1430 | |
1431 | wqe += sizeof (struct mthca_next_seg); | |
1432 | size = sizeof (struct mthca_next_seg) / 16; | |
1433 | ||
1434 | switch (qp->transport) { | |
1435 | case RC: | |
1436 | switch (wr->opcode) { | |
1437 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1438 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1439 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1440 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1441 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1442 | cpu_to_be32(wr->wr.atomic.rkey); | |
1443 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1444 | ||
1445 | wqe += sizeof (struct mthca_raddr_seg); | |
1446 | ||
1447 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1448 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1449 | cpu_to_be64(wr->wr.atomic.swap); | |
1450 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1451 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1452 | } else { | |
1453 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1454 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1455 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1456 | } | |
1457 | ||
1458 | wqe += sizeof (struct mthca_atomic_seg); | |
1459 | size += sizeof (struct mthca_raddr_seg) / 16 + | |
1460 | sizeof (struct mthca_atomic_seg); | |
1461 | break; | |
1462 | ||
1463 | case IB_WR_RDMA_WRITE: | |
1464 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1465 | case IB_WR_RDMA_READ: | |
1466 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1467 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1468 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1469 | cpu_to_be32(wr->wr.rdma.rkey); | |
1470 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1471 | wqe += sizeof (struct mthca_raddr_seg); | |
1472 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1473 | break; | |
1474 | ||
1475 | default: | |
1476 | /* No extra segments required for sends */ | |
1477 | break; | |
1478 | } | |
1479 | ||
1480 | break; | |
1481 | ||
9e6970b5 RD |
1482 | case UC: |
1483 | switch (wr->opcode) { | |
1484 | case IB_WR_RDMA_WRITE: | |
1485 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1486 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1487 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1488 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1489 | cpu_to_be32(wr->wr.rdma.rkey); | |
1490 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1491 | wqe += sizeof (struct mthca_raddr_seg); | |
1492 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1493 | break; | |
1494 | ||
1495 | default: | |
1496 | /* No extra segments required for sends */ | |
1497 | break; | |
1498 | } | |
1499 | ||
1500 | break; | |
1501 | ||
1da177e4 LT |
1502 | case UD: |
1503 | ((struct mthca_tavor_ud_seg *) wqe)->lkey = | |
1504 | cpu_to_be32(to_mah(wr->wr.ud.ah)->key); | |
1505 | ((struct mthca_tavor_ud_seg *) wqe)->av_addr = | |
1506 | cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); | |
1507 | ((struct mthca_tavor_ud_seg *) wqe)->dqpn = | |
1508 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1509 | ((struct mthca_tavor_ud_seg *) wqe)->qkey = | |
1510 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1511 | ||
1512 | wqe += sizeof (struct mthca_tavor_ud_seg); | |
1513 | size += sizeof (struct mthca_tavor_ud_seg) / 16; | |
1514 | break; | |
1515 | ||
1516 | case MLX: | |
1517 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1518 | wqe - sizeof (struct mthca_next_seg), | |
1519 | wqe); | |
1520 | if (err) { | |
1521 | *bad_wr = wr; | |
1522 | goto out; | |
1523 | } | |
1524 | wqe += sizeof (struct mthca_data_seg); | |
1525 | size += sizeof (struct mthca_data_seg) / 16; | |
1526 | break; | |
1527 | } | |
1528 | ||
1529 | if (wr->num_sge > qp->sq.max_gs) { | |
1530 | mthca_err(dev, "too many gathers\n"); | |
1531 | err = -EINVAL; | |
1532 | *bad_wr = wr; | |
1533 | goto out; | |
1534 | } | |
1535 | ||
1536 | for (i = 0; i < wr->num_sge; ++i) { | |
1537 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1538 | cpu_to_be32(wr->sg_list[i].length); | |
1539 | ((struct mthca_data_seg *) wqe)->lkey = | |
1540 | cpu_to_be32(wr->sg_list[i].lkey); | |
1541 | ((struct mthca_data_seg *) wqe)->addr = | |
1542 | cpu_to_be64(wr->sg_list[i].addr); | |
1543 | wqe += sizeof (struct mthca_data_seg); | |
1544 | size += sizeof (struct mthca_data_seg) / 16; | |
1545 | } | |
1546 | ||
1547 | /* Add one more inline data segment for ICRC */ | |
1548 | if (qp->transport == MLX) { | |
1549 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1550 | cpu_to_be32((1 << 31) | 4); | |
1551 | ((u32 *) wqe)[1] = 0; | |
1552 | wqe += sizeof (struct mthca_data_seg); | |
1553 | size += sizeof (struct mthca_data_seg) / 16; | |
1554 | } | |
1555 | ||
1556 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
1557 | ||
1558 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
1559 | mthca_err(dev, "opcode invalid\n"); | |
1560 | err = -EINVAL; | |
1561 | *bad_wr = wr; | |
1562 | goto out; | |
1563 | } | |
1564 | ||
1565 | if (prev_wqe) { | |
1566 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | |
1567 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
1568 | qp->send_wqe_offset) | | |
1569 | mthca_opcode[wr->opcode]); | |
1570 | wmb(); | |
1571 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1572 | cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size); | |
1573 | } | |
1574 | ||
1575 | if (!size0) { | |
1576 | size0 = size; | |
1577 | op0 = mthca_opcode[wr->opcode]; | |
1578 | } | |
1579 | ||
1580 | ++ind; | |
1581 | if (unlikely(ind >= qp->sq.max)) | |
1582 | ind -= qp->sq.max; | |
1583 | } | |
1584 | ||
1585 | out: | |
1586 | if (likely(nreq)) { | |
97f52eb4 | 1587 | __be32 doorbell[2]; |
1da177e4 LT |
1588 | |
1589 | doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) + | |
1590 | qp->send_wqe_offset) | f0 | op0); | |
1591 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1592 | ||
1593 | wmb(); | |
1594 | ||
1595 | mthca_write64(doorbell, | |
1596 | dev->kar + MTHCA_SEND_DOORBELL, | |
1597 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1598 | } | |
1599 | ||
1600 | qp->sq.next_ind = ind; | |
1601 | qp->sq.head += nreq; | |
1602 | ||
1603 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
1604 | return err; | |
1605 | } | |
1606 | ||
1607 | int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1608 | struct ib_recv_wr **bad_wr) | |
1609 | { | |
1610 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1611 | struct mthca_qp *qp = to_mqp(ibqp); | |
1612 | unsigned long flags; | |
1613 | int err = 0; | |
1614 | int nreq; | |
1615 | int i; | |
1616 | int size; | |
1617 | int size0 = 0; | |
1618 | int ind; | |
1619 | void *wqe; | |
1620 | void *prev_wqe; | |
1621 | ||
1622 | spin_lock_irqsave(&qp->rq.lock, flags); | |
1623 | ||
1624 | /* XXX check that state is OK to post receive */ | |
1625 | ||
1626 | ind = qp->rq.next_ind; | |
1627 | ||
1628 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1629 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | |
1630 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
1631 | " %d max, %d nreq)\n", qp->qpn, | |
1632 | qp->rq.head, qp->rq.tail, | |
1633 | qp->rq.max, nreq); | |
1634 | err = -ENOMEM; | |
1635 | *bad_wr = wr; | |
1636 | goto out; | |
1637 | } | |
1638 | ||
1639 | wqe = get_recv_wqe(qp, ind); | |
1640 | prev_wqe = qp->rq.last; | |
1641 | qp->rq.last = wqe; | |
1642 | ||
1643 | ((struct mthca_next_seg *) wqe)->nda_op = 0; | |
1644 | ((struct mthca_next_seg *) wqe)->ee_nds = | |
1645 | cpu_to_be32(MTHCA_NEXT_DBD); | |
1646 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
1647 | ||
1648 | wqe += sizeof (struct mthca_next_seg); | |
1649 | size = sizeof (struct mthca_next_seg) / 16; | |
1650 | ||
1651 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
1652 | err = -EINVAL; | |
1653 | *bad_wr = wr; | |
1654 | goto out; | |
1655 | } | |
1656 | ||
1657 | for (i = 0; i < wr->num_sge; ++i) { | |
1658 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1659 | cpu_to_be32(wr->sg_list[i].length); | |
1660 | ((struct mthca_data_seg *) wqe)->lkey = | |
1661 | cpu_to_be32(wr->sg_list[i].lkey); | |
1662 | ((struct mthca_data_seg *) wqe)->addr = | |
1663 | cpu_to_be64(wr->sg_list[i].addr); | |
1664 | wqe += sizeof (struct mthca_data_seg); | |
1665 | size += sizeof (struct mthca_data_seg) / 16; | |
1666 | } | |
1667 | ||
1668 | qp->wrid[ind] = wr->wr_id; | |
1669 | ||
1670 | if (likely(prev_wqe)) { | |
1671 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | |
1672 | cpu_to_be32((ind << qp->rq.wqe_shift) | 1); | |
1673 | wmb(); | |
1674 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1675 | cpu_to_be32(MTHCA_NEXT_DBD | size); | |
1676 | } | |
1677 | ||
1678 | if (!size0) | |
1679 | size0 = size; | |
1680 | ||
1681 | ++ind; | |
1682 | if (unlikely(ind >= qp->rq.max)) | |
1683 | ind -= qp->rq.max; | |
1684 | } | |
1685 | ||
1686 | out: | |
1687 | if (likely(nreq)) { | |
97f52eb4 | 1688 | __be32 doorbell[2]; |
1da177e4 LT |
1689 | |
1690 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | |
1691 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq); | |
1692 | ||
1693 | wmb(); | |
1694 | ||
1695 | mthca_write64(doorbell, | |
1696 | dev->kar + MTHCA_RECEIVE_DOORBELL, | |
1697 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1698 | } | |
1699 | ||
1700 | qp->rq.next_ind = ind; | |
1701 | qp->rq.head += nreq; | |
1702 | ||
1703 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
1704 | return err; | |
1705 | } | |
1706 | ||
1707 | int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
1708 | struct ib_send_wr **bad_wr) | |
1709 | { | |
1710 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1711 | struct mthca_qp *qp = to_mqp(ibqp); | |
1712 | void *wqe; | |
1713 | void *prev_wqe; | |
1714 | unsigned long flags; | |
1715 | int err = 0; | |
1716 | int nreq; | |
1717 | int i; | |
1718 | int size; | |
1719 | int size0 = 0; | |
1720 | u32 f0 = 0; | |
1721 | int ind; | |
1722 | u8 op0 = 0; | |
1723 | ||
1724 | spin_lock_irqsave(&qp->sq.lock, flags); | |
1725 | ||
1726 | /* XXX check that state is OK to post send */ | |
1727 | ||
1728 | ind = qp->sq.head & (qp->sq.max - 1); | |
1729 | ||
1730 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1731 | if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | |
1732 | mthca_err(dev, "SQ %06x full (%u head, %u tail," | |
1733 | " %d max, %d nreq)\n", qp->qpn, | |
1734 | qp->sq.head, qp->sq.tail, | |
1735 | qp->sq.max, nreq); | |
1736 | err = -ENOMEM; | |
1737 | *bad_wr = wr; | |
1738 | goto out; | |
1739 | } | |
1740 | ||
1741 | wqe = get_send_wqe(qp, ind); | |
1742 | prev_wqe = qp->sq.last; | |
1743 | qp->sq.last = wqe; | |
1744 | ||
1745 | ((struct mthca_next_seg *) wqe)->flags = | |
1746 | ((wr->send_flags & IB_SEND_SIGNALED) ? | |
1747 | cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | | |
1748 | ((wr->send_flags & IB_SEND_SOLICITED) ? | |
1749 | cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | | |
1750 | cpu_to_be32(1); | |
1751 | if (wr->opcode == IB_WR_SEND_WITH_IMM || | |
1752 | wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) | |
3fba2317 | 1753 | ((struct mthca_next_seg *) wqe)->imm = wr->imm_data; |
1da177e4 LT |
1754 | |
1755 | wqe += sizeof (struct mthca_next_seg); | |
1756 | size = sizeof (struct mthca_next_seg) / 16; | |
1757 | ||
1758 | switch (qp->transport) { | |
ddb934e0 RD |
1759 | case RC: |
1760 | switch (wr->opcode) { | |
1761 | case IB_WR_ATOMIC_CMP_AND_SWP: | |
1762 | case IB_WR_ATOMIC_FETCH_AND_ADD: | |
1763 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1764 | cpu_to_be64(wr->wr.atomic.remote_addr); | |
1765 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1766 | cpu_to_be32(wr->wr.atomic.rkey); | |
1767 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1768 | ||
1769 | wqe += sizeof (struct mthca_raddr_seg); | |
1770 | ||
1771 | if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { | |
1772 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1773 | cpu_to_be64(wr->wr.atomic.swap); | |
1774 | ((struct mthca_atomic_seg *) wqe)->compare = | |
1775 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1776 | } else { | |
1777 | ((struct mthca_atomic_seg *) wqe)->swap_add = | |
1778 | cpu_to_be64(wr->wr.atomic.compare_add); | |
1779 | ((struct mthca_atomic_seg *) wqe)->compare = 0; | |
1780 | } | |
1781 | ||
1782 | wqe += sizeof (struct mthca_atomic_seg); | |
1783 | size += sizeof (struct mthca_raddr_seg) / 16 + | |
1784 | sizeof (struct mthca_atomic_seg); | |
1785 | break; | |
1786 | ||
9e6970b5 RD |
1787 | case IB_WR_RDMA_READ: |
1788 | case IB_WR_RDMA_WRITE: | |
1789 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
1790 | ((struct mthca_raddr_seg *) wqe)->raddr = | |
1791 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1792 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1793 | cpu_to_be32(wr->wr.rdma.rkey); | |
1794 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1795 | wqe += sizeof (struct mthca_raddr_seg); | |
1796 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1797 | break; | |
1798 | ||
1799 | default: | |
1800 | /* No extra segments required for sends */ | |
1801 | break; | |
1802 | } | |
1803 | ||
1804 | break; | |
1805 | ||
1806 | case UC: | |
1807 | switch (wr->opcode) { | |
ddb934e0 RD |
1808 | case IB_WR_RDMA_WRITE: |
1809 | case IB_WR_RDMA_WRITE_WITH_IMM: | |
ddb934e0 RD |
1810 | ((struct mthca_raddr_seg *) wqe)->raddr = |
1811 | cpu_to_be64(wr->wr.rdma.remote_addr); | |
1812 | ((struct mthca_raddr_seg *) wqe)->rkey = | |
1813 | cpu_to_be32(wr->wr.rdma.rkey); | |
1814 | ((struct mthca_raddr_seg *) wqe)->reserved = 0; | |
1815 | wqe += sizeof (struct mthca_raddr_seg); | |
1816 | size += sizeof (struct mthca_raddr_seg) / 16; | |
1817 | break; | |
1818 | ||
1819 | default: | |
1820 | /* No extra segments required for sends */ | |
1821 | break; | |
1822 | } | |
1823 | ||
1824 | break; | |
1825 | ||
1da177e4 LT |
1826 | case UD: |
1827 | memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, | |
1828 | to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); | |
1829 | ((struct mthca_arbel_ud_seg *) wqe)->dqpn = | |
1830 | cpu_to_be32(wr->wr.ud.remote_qpn); | |
1831 | ((struct mthca_arbel_ud_seg *) wqe)->qkey = | |
1832 | cpu_to_be32(wr->wr.ud.remote_qkey); | |
1833 | ||
1834 | wqe += sizeof (struct mthca_arbel_ud_seg); | |
1835 | size += sizeof (struct mthca_arbel_ud_seg) / 16; | |
1836 | break; | |
1837 | ||
1838 | case MLX: | |
1839 | err = build_mlx_header(dev, to_msqp(qp), ind, wr, | |
1840 | wqe - sizeof (struct mthca_next_seg), | |
1841 | wqe); | |
1842 | if (err) { | |
1843 | *bad_wr = wr; | |
1844 | goto out; | |
1845 | } | |
1846 | wqe += sizeof (struct mthca_data_seg); | |
1847 | size += sizeof (struct mthca_data_seg) / 16; | |
1848 | break; | |
1849 | } | |
1850 | ||
1851 | if (wr->num_sge > qp->sq.max_gs) { | |
1852 | mthca_err(dev, "too many gathers\n"); | |
1853 | err = -EINVAL; | |
1854 | *bad_wr = wr; | |
1855 | goto out; | |
1856 | } | |
1857 | ||
1858 | for (i = 0; i < wr->num_sge; ++i) { | |
1859 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1860 | cpu_to_be32(wr->sg_list[i].length); | |
1861 | ((struct mthca_data_seg *) wqe)->lkey = | |
1862 | cpu_to_be32(wr->sg_list[i].lkey); | |
1863 | ((struct mthca_data_seg *) wqe)->addr = | |
1864 | cpu_to_be64(wr->sg_list[i].addr); | |
1865 | wqe += sizeof (struct mthca_data_seg); | |
1866 | size += sizeof (struct mthca_data_seg) / 16; | |
1867 | } | |
1868 | ||
1869 | /* Add one more inline data segment for ICRC */ | |
1870 | if (qp->transport == MLX) { | |
1871 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1872 | cpu_to_be32((1 << 31) | 4); | |
1873 | ((u32 *) wqe)[1] = 0; | |
1874 | wqe += sizeof (struct mthca_data_seg); | |
1875 | size += sizeof (struct mthca_data_seg) / 16; | |
1876 | } | |
1877 | ||
1878 | qp->wrid[ind + qp->rq.max] = wr->wr_id; | |
1879 | ||
1880 | if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { | |
1881 | mthca_err(dev, "opcode invalid\n"); | |
1882 | err = -EINVAL; | |
1883 | *bad_wr = wr; | |
1884 | goto out; | |
1885 | } | |
1886 | ||
1887 | if (likely(prev_wqe)) { | |
1888 | ((struct mthca_next_seg *) prev_wqe)->nda_op = | |
1889 | cpu_to_be32(((ind << qp->sq.wqe_shift) + | |
1890 | qp->send_wqe_offset) | | |
1891 | mthca_opcode[wr->opcode]); | |
1892 | wmb(); | |
1893 | ((struct mthca_next_seg *) prev_wqe)->ee_nds = | |
1894 | cpu_to_be32(MTHCA_NEXT_DBD | size); | |
1895 | } | |
1896 | ||
1897 | if (!size0) { | |
1898 | size0 = size; | |
1899 | op0 = mthca_opcode[wr->opcode]; | |
1900 | } | |
1901 | ||
1902 | ++ind; | |
1903 | if (unlikely(ind >= qp->sq.max)) | |
1904 | ind -= qp->sq.max; | |
1905 | } | |
1906 | ||
1907 | out: | |
1908 | if (likely(nreq)) { | |
97f52eb4 | 1909 | __be32 doorbell[2]; |
1da177e4 LT |
1910 | |
1911 | doorbell[0] = cpu_to_be32((nreq << 24) | | |
1912 | ((qp->sq.head & 0xffff) << 8) | | |
1913 | f0 | op0); | |
1914 | doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); | |
1915 | ||
1916 | qp->sq.head += nreq; | |
1917 | ||
1918 | /* | |
1919 | * Make sure that descriptors are written before | |
1920 | * doorbell record. | |
1921 | */ | |
1922 | wmb(); | |
1923 | *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); | |
1924 | ||
1925 | /* | |
1926 | * Make sure doorbell record is written before we | |
1927 | * write MMIO send doorbell. | |
1928 | */ | |
1929 | wmb(); | |
1930 | mthca_write64(doorbell, | |
1931 | dev->kar + MTHCA_SEND_DOORBELL, | |
1932 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | |
1933 | } | |
1934 | ||
1935 | spin_unlock_irqrestore(&qp->sq.lock, flags); | |
1936 | return err; | |
1937 | } | |
1938 | ||
1939 | int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
1940 | struct ib_recv_wr **bad_wr) | |
1941 | { | |
1942 | struct mthca_dev *dev = to_mdev(ibqp->device); | |
1943 | struct mthca_qp *qp = to_mqp(ibqp); | |
1944 | unsigned long flags; | |
1945 | int err = 0; | |
1946 | int nreq; | |
1947 | int ind; | |
1948 | int i; | |
1949 | void *wqe; | |
1950 | ||
1951 | spin_lock_irqsave(&qp->rq.lock, flags); | |
1952 | ||
1953 | /* XXX check that state is OK to post receive */ | |
1954 | ||
1955 | ind = qp->rq.head & (qp->rq.max - 1); | |
1956 | ||
1957 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | |
1958 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | |
1959 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | |
1960 | " %d max, %d nreq)\n", qp->qpn, | |
1961 | qp->rq.head, qp->rq.tail, | |
1962 | qp->rq.max, nreq); | |
1963 | err = -ENOMEM; | |
1964 | *bad_wr = wr; | |
1965 | goto out; | |
1966 | } | |
1967 | ||
1968 | wqe = get_recv_wqe(qp, ind); | |
1969 | ||
1970 | ((struct mthca_next_seg *) wqe)->flags = 0; | |
1971 | ||
1972 | wqe += sizeof (struct mthca_next_seg); | |
1973 | ||
1974 | if (unlikely(wr->num_sge > qp->rq.max_gs)) { | |
1975 | err = -EINVAL; | |
1976 | *bad_wr = wr; | |
1977 | goto out; | |
1978 | } | |
1979 | ||
1980 | for (i = 0; i < wr->num_sge; ++i) { | |
1981 | ((struct mthca_data_seg *) wqe)->byte_count = | |
1982 | cpu_to_be32(wr->sg_list[i].length); | |
1983 | ((struct mthca_data_seg *) wqe)->lkey = | |
1984 | cpu_to_be32(wr->sg_list[i].lkey); | |
1985 | ((struct mthca_data_seg *) wqe)->addr = | |
1986 | cpu_to_be64(wr->sg_list[i].addr); | |
1987 | wqe += sizeof (struct mthca_data_seg); | |
1988 | } | |
1989 | ||
1990 | if (i < qp->rq.max_gs) { | |
1991 | ((struct mthca_data_seg *) wqe)->byte_count = 0; | |
ddf841f0 | 1992 | ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); |
1da177e4 LT |
1993 | ((struct mthca_data_seg *) wqe)->addr = 0; |
1994 | } | |
1995 | ||
1996 | qp->wrid[ind] = wr->wr_id; | |
1997 | ||
1998 | ++ind; | |
1999 | if (unlikely(ind >= qp->rq.max)) | |
2000 | ind -= qp->rq.max; | |
2001 | } | |
2002 | out: | |
2003 | if (likely(nreq)) { | |
2004 | qp->rq.head += nreq; | |
2005 | ||
2006 | /* | |
2007 | * Make sure that descriptors are written before | |
2008 | * doorbell record. | |
2009 | */ | |
2010 | wmb(); | |
2011 | *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); | |
2012 | } | |
2013 | ||
2014 | spin_unlock_irqrestore(&qp->rq.lock, flags); | |
2015 | return err; | |
2016 | } | |
2017 | ||
2018 | int mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, | |
97f52eb4 | 2019 | int index, int *dbd, __be32 *new_wqe) |
1da177e4 LT |
2020 | { |
2021 | struct mthca_next_seg *next; | |
2022 | ||
ec34a922 RD |
2023 | /* |
2024 | * For SRQs, all WQEs generate a CQE, so we're always at the | |
2025 | * end of the doorbell chain. | |
2026 | */ | |
2027 | if (qp->ibqp.srq) { | |
2028 | *new_wqe = 0; | |
2029 | return 0; | |
2030 | } | |
2031 | ||
1da177e4 LT |
2032 | if (is_send) |
2033 | next = get_send_wqe(qp, index); | |
2034 | else | |
2035 | next = get_recv_wqe(qp, index); | |
2036 | ||
288bdeb4 | 2037 | *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); |
1da177e4 LT |
2038 | if (next->ee_nds & cpu_to_be32(0x3f)) |
2039 | *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | | |
2040 | (next->ee_nds & cpu_to_be32(0x3f)); | |
2041 | else | |
2042 | *new_wqe = 0; | |
2043 | ||
2044 | return 0; | |
2045 | } | |
2046 | ||
2047 | int __devinit mthca_init_qp_table(struct mthca_dev *dev) | |
2048 | { | |
2049 | int err; | |
2050 | u8 status; | |
2051 | int i; | |
2052 | ||
2053 | spin_lock_init(&dev->qp_table.lock); | |
2054 | ||
2055 | /* | |
2056 | * We reserve 2 extra QPs per port for the special QPs. The | |
2057 | * special QP for port 1 has to be even, so round up. | |
2058 | */ | |
2059 | dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; | |
2060 | err = mthca_alloc_init(&dev->qp_table.alloc, | |
2061 | dev->limits.num_qps, | |
2062 | (1 << 24) - 1, | |
2063 | dev->qp_table.sqp_start + | |
2064 | MTHCA_MAX_PORTS * 2); | |
2065 | if (err) | |
2066 | return err; | |
2067 | ||
2068 | err = mthca_array_init(&dev->qp_table.qp, | |
2069 | dev->limits.num_qps); | |
2070 | if (err) { | |
2071 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2072 | return err; | |
2073 | } | |
2074 | ||
2075 | for (i = 0; i < 2; ++i) { | |
2076 | err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, | |
2077 | dev->qp_table.sqp_start + i * 2, | |
2078 | &status); | |
2079 | if (err) | |
2080 | goto err_out; | |
2081 | if (status) { | |
2082 | mthca_warn(dev, "CONF_SPECIAL_QP returned " | |
2083 | "status %02x, aborting.\n", | |
2084 | status); | |
2085 | err = -EINVAL; | |
2086 | goto err_out; | |
2087 | } | |
2088 | } | |
2089 | return 0; | |
2090 | ||
2091 | err_out: | |
2092 | for (i = 0; i < 2; ++i) | |
2093 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2094 | ||
2095 | mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); | |
2096 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2097 | ||
2098 | return err; | |
2099 | } | |
2100 | ||
2101 | void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) | |
2102 | { | |
2103 | int i; | |
2104 | u8 status; | |
2105 | ||
2106 | for (i = 0; i < 2; ++i) | |
2107 | mthca_CONF_SPECIAL_QP(dev, i, 0, &status); | |
2108 | ||
2109 | mthca_alloc_cleanup(&dev->qp_table.alloc); | |
2110 | } |