Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 4 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
6 | * | |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | */ | |
35 | ||
5a0e3ad6 | 36 | #include <linux/gfp.h> |
225c7b1f RD |
37 | #include <linux/mlx4/cmd.h> |
38 | #include <linux/mlx4/qp.h> | |
39 | ||
40 | #include "mlx4.h" | |
41 | #include "icm.h" | |
42 | ||
43 | void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) | |
44 | { | |
45 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
46 | struct mlx4_qp *qp; | |
47 | ||
48 | spin_lock(&qp_table->lock); | |
49 | ||
50 | qp = __mlx4_qp_lookup(dev, qpn); | |
51 | if (qp) | |
52 | atomic_inc(&qp->refcount); | |
53 | ||
54 | spin_unlock(&qp_table->lock); | |
55 | ||
56 | if (!qp) { | |
57 | mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
58 | return; | |
59 | } | |
60 | ||
61 | qp->event(qp, event_type); | |
62 | ||
63 | if (atomic_dec_and_test(&qp->refcount)) | |
64 | complete(&qp->free); | |
65 | } | |
66 | ||
67 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |
68 | enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, | |
69 | struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, | |
70 | int sqd_event, struct mlx4_qp *qp) | |
71 | { | |
72 | static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { | |
73 | [MLX4_QP_STATE_RST] = { | |
74 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
75 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
76 | [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, | |
77 | }, | |
78 | [MLX4_QP_STATE_INIT] = { | |
79 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
80 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
81 | [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, | |
82 | [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, | |
83 | }, | |
84 | [MLX4_QP_STATE_RTR] = { | |
85 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
86 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
87 | [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, | |
88 | }, | |
89 | [MLX4_QP_STATE_RTS] = { | |
90 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
91 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
92 | [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, | |
93 | [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, | |
94 | }, | |
95 | [MLX4_QP_STATE_SQD] = { | |
96 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
97 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
98 | [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, | |
99 | [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, | |
100 | }, | |
101 | [MLX4_QP_STATE_SQER] = { | |
102 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
103 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
104 | [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, | |
105 | }, | |
106 | [MLX4_QP_STATE_ERR] = { | |
107 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
108 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
109 | } | |
110 | }; | |
111 | ||
112 | struct mlx4_cmd_mailbox *mailbox; | |
113 | int ret = 0; | |
114 | ||
9ed87fd3 | 115 | if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || |
225c7b1f RD |
116 | !op[cur_state][new_state]) |
117 | return -EINVAL; | |
118 | ||
119 | if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) | |
120 | return mlx4_cmd(dev, 0, qp->qpn, 2, | |
121 | MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); | |
122 | ||
123 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
124 | if (IS_ERR(mailbox)) | |
125 | return PTR_ERR(mailbox); | |
126 | ||
127 | if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { | |
128 | u64 mtt_addr = mlx4_mtt_addr(dev, mtt); | |
129 | context->mtt_base_addr_h = mtt_addr >> 32; | |
130 | context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
131 | context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | |
132 | } | |
133 | ||
134 | *(__be32 *) mailbox->buf = cpu_to_be32(optpar); | |
135 | memcpy(mailbox->buf + 8, context, sizeof *context); | |
136 | ||
137 | ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = | |
138 | cpu_to_be32(qp->qpn); | |
139 | ||
140 | ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), | |
141 | new_state == MLX4_QP_STATE_RST ? 2 : 0, | |
142 | op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); | |
143 | ||
144 | mlx4_free_cmd_mailbox(dev, mailbox); | |
145 | return ret; | |
146 | } | |
147 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); | |
148 | ||
a3cdcbfa YP |
149 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) |
150 | { | |
151 | struct mlx4_priv *priv = mlx4_priv(dev); | |
152 | struct mlx4_qp_table *qp_table = &priv->qp_table; | |
153 | int qpn; | |
154 | ||
155 | qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); | |
156 | if (qpn == -1) | |
157 | return -ENOMEM; | |
158 | ||
159 | *base = qpn; | |
160 | return 0; | |
161 | } | |
162 | EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); | |
163 | ||
164 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | |
165 | { | |
166 | struct mlx4_priv *priv = mlx4_priv(dev); | |
167 | struct mlx4_qp_table *qp_table = &priv->qp_table; | |
168 | if (base_qpn < dev->caps.sqp_start + 8) | |
169 | return; | |
170 | ||
171 | mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); | |
172 | } | |
173 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); | |
174 | ||
175 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | |
225c7b1f RD |
176 | { |
177 | struct mlx4_priv *priv = mlx4_priv(dev); | |
178 | struct mlx4_qp_table *qp_table = &priv->qp_table; | |
179 | int err; | |
180 | ||
a3cdcbfa YP |
181 | if (!qpn) |
182 | return -EINVAL; | |
183 | ||
184 | qp->qpn = qpn; | |
225c7b1f RD |
185 | |
186 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); | |
187 | if (err) | |
188 | goto err_out; | |
189 | ||
190 | err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); | |
191 | if (err) | |
192 | goto err_put_qp; | |
193 | ||
194 | err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); | |
195 | if (err) | |
196 | goto err_put_auxc; | |
197 | ||
198 | err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); | |
199 | if (err) | |
200 | goto err_put_altc; | |
201 | ||
202 | err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); | |
203 | if (err) | |
204 | goto err_put_rdmarc; | |
205 | ||
206 | spin_lock_irq(&qp_table->lock); | |
207 | err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); | |
208 | spin_unlock_irq(&qp_table->lock); | |
209 | if (err) | |
210 | goto err_put_cmpt; | |
211 | ||
212 | atomic_set(&qp->refcount, 1); | |
213 | init_completion(&qp->free); | |
214 | ||
215 | return 0; | |
216 | ||
217 | err_put_cmpt: | |
218 | mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); | |
219 | ||
220 | err_put_rdmarc: | |
221 | mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); | |
222 | ||
223 | err_put_altc: | |
224 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | |
225 | ||
226 | err_put_auxc: | |
227 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | |
228 | ||
229 | err_put_qp: | |
230 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | |
231 | ||
232 | err_out: | |
225c7b1f RD |
233 | return err; |
234 | } | |
235 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | |
236 | ||
237 | void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) | |
238 | { | |
239 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
240 | unsigned long flags; | |
241 | ||
242 | spin_lock_irqsave(&qp_table->lock, flags); | |
243 | radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); | |
244 | spin_unlock_irqrestore(&qp_table->lock, flags); | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(mlx4_qp_remove); | |
247 | ||
248 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) | |
249 | { | |
250 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
251 | ||
252 | if (atomic_dec_and_test(&qp->refcount)) | |
253 | complete(&qp->free); | |
254 | wait_for_completion(&qp->free); | |
255 | ||
256 | mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); | |
257 | mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); | |
258 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | |
259 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | |
260 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | |
225c7b1f RD |
261 | } |
262 | EXPORT_SYMBOL_GPL(mlx4_qp_free); | |
263 | ||
264 | static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) | |
265 | { | |
266 | return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, | |
267 | MLX4_CMD_TIME_CLASS_B); | |
268 | } | |
269 | ||
3d73c288 | 270 | int mlx4_init_qp_table(struct mlx4_dev *dev) |
225c7b1f RD |
271 | { |
272 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
273 | int err; | |
93fc9e1b | 274 | int reserved_from_top = 0; |
225c7b1f RD |
275 | |
276 | spin_lock_init(&qp_table->lock); | |
277 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); | |
278 | ||
279 | /* | |
280 | * We reserve 2 extra QPs per port for the special QPs. The | |
281 | * block of special QPs must be aligned to a multiple of 8, so | |
282 | * round up. | |
283 | */ | |
93fc9e1b YP |
284 | dev->caps.sqp_start = |
285 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); | |
286 | ||
287 | { | |
288 | int sort[MLX4_NUM_QP_REGION]; | |
289 | int i, j, tmp; | |
290 | int last_base = dev->caps.num_qps; | |
291 | ||
292 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) | |
293 | sort[i] = i; | |
294 | ||
295 | for (i = MLX4_NUM_QP_REGION; i > 0; --i) { | |
296 | for (j = 2; j < i; ++j) { | |
297 | if (dev->caps.reserved_qps_cnt[sort[j]] > | |
298 | dev->caps.reserved_qps_cnt[sort[j - 1]]) { | |
299 | tmp = sort[j]; | |
300 | sort[j] = sort[j - 1]; | |
301 | sort[j - 1] = tmp; | |
302 | } | |
303 | } | |
304 | } | |
305 | ||
306 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { | |
307 | last_base -= dev->caps.reserved_qps_cnt[sort[i]]; | |
308 | dev->caps.reserved_qps_base[sort[i]] = last_base; | |
309 | reserved_from_top += | |
310 | dev->caps.reserved_qps_cnt[sort[i]]; | |
311 | } | |
312 | ||
313 | } | |
314 | ||
225c7b1f | 315 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, |
93fc9e1b YP |
316 | (1 << 23) - 1, dev->caps.sqp_start + 8, |
317 | reserved_from_top); | |
225c7b1f RD |
318 | if (err) |
319 | return err; | |
320 | ||
321 | return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); | |
322 | } | |
323 | ||
324 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev) | |
325 | { | |
326 | mlx4_CONF_SPECIAL_QP(dev, 0); | |
327 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); | |
328 | } | |
6a775e2b JM |
329 | |
330 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | |
331 | struct mlx4_qp_context *context) | |
332 | { | |
333 | struct mlx4_cmd_mailbox *mailbox; | |
334 | int err; | |
335 | ||
336 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
337 | if (IS_ERR(mailbox)) | |
338 | return PTR_ERR(mailbox); | |
339 | ||
340 | err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, | |
341 | MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); | |
342 | if (!err) | |
343 | memcpy(context, mailbox->buf + 8, sizeof *context); | |
344 | ||
345 | mlx4_free_cmd_mailbox(dev, mailbox); | |
346 | return err; | |
347 | } | |
348 | EXPORT_SYMBOL_GPL(mlx4_qp_query); | |
349 | ||
ed4d3c10 YP |
350 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
351 | struct mlx4_qp_context *context, | |
352 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) | |
353 | { | |
354 | int err; | |
355 | int i; | |
356 | enum mlx4_qp_state states[] = { | |
357 | MLX4_QP_STATE_RST, | |
358 | MLX4_QP_STATE_INIT, | |
359 | MLX4_QP_STATE_RTR, | |
360 | MLX4_QP_STATE_RTS | |
361 | }; | |
362 | ||
363 | for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { | |
364 | context->flags &= cpu_to_be32(~(0xf << 28)); | |
365 | context->flags |= cpu_to_be32(states[i + 1] << 28); | |
366 | err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], | |
367 | context, 0, 0, qp); | |
368 | if (err) { | |
369 | mlx4_err(dev, "Failed to bring QP to state: " | |
370 | "%d with error: %d\n", | |
371 | states[i + 1], err); | |
372 | return err; | |
373 | } | |
374 | ||
375 | *qp_state = states[i + 1]; | |
376 | } | |
377 | ||
378 | return 0; | |
379 | } | |
380 | EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); |