Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
51a379d0 | 4 | * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. |
225c7b1f RD |
5 | * Copyright (c) 2004 Voltaire, Inc. All rights reserved. |
6 | * | |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
34 | */ | |
35 | ||
225c7b1f RD |
36 | #include <linux/mlx4/cmd.h> |
37 | #include <linux/mlx4/qp.h> | |
38 | ||
39 | #include "mlx4.h" | |
40 | #include "icm.h" | |
41 | ||
42 | void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) | |
43 | { | |
44 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
45 | struct mlx4_qp *qp; | |
46 | ||
47 | spin_lock(&qp_table->lock); | |
48 | ||
49 | qp = __mlx4_qp_lookup(dev, qpn); | |
50 | if (qp) | |
51 | atomic_inc(&qp->refcount); | |
52 | ||
53 | spin_unlock(&qp_table->lock); | |
54 | ||
55 | if (!qp) { | |
56 | mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn); | |
57 | return; | |
58 | } | |
59 | ||
60 | qp->event(qp, event_type); | |
61 | ||
62 | if (atomic_dec_and_test(&qp->refcount)) | |
63 | complete(&qp->free); | |
64 | } | |
65 | ||
66 | int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |
67 | enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state, | |
68 | struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar, | |
69 | int sqd_event, struct mlx4_qp *qp) | |
70 | { | |
71 | static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = { | |
72 | [MLX4_QP_STATE_RST] = { | |
73 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
74 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
75 | [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP, | |
76 | }, | |
77 | [MLX4_QP_STATE_INIT] = { | |
78 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
79 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
80 | [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP, | |
81 | [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP, | |
82 | }, | |
83 | [MLX4_QP_STATE_RTR] = { | |
84 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
85 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
86 | [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP, | |
87 | }, | |
88 | [MLX4_QP_STATE_RTS] = { | |
89 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
90 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
91 | [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP, | |
92 | [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP, | |
93 | }, | |
94 | [MLX4_QP_STATE_SQD] = { | |
95 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
96 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
97 | [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP, | |
98 | [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP, | |
99 | }, | |
100 | [MLX4_QP_STATE_SQER] = { | |
101 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
102 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
103 | [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP, | |
104 | }, | |
105 | [MLX4_QP_STATE_ERR] = { | |
106 | [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP, | |
107 | [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP, | |
108 | } | |
109 | }; | |
110 | ||
111 | struct mlx4_cmd_mailbox *mailbox; | |
112 | int ret = 0; | |
113 | ||
9ed87fd3 | 114 | if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE || |
225c7b1f RD |
115 | !op[cur_state][new_state]) |
116 | return -EINVAL; | |
117 | ||
118 | if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) | |
119 | return mlx4_cmd(dev, 0, qp->qpn, 2, | |
120 | MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A); | |
121 | ||
122 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
123 | if (IS_ERR(mailbox)) | |
124 | return PTR_ERR(mailbox); | |
125 | ||
126 | if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) { | |
127 | u64 mtt_addr = mlx4_mtt_addr(dev, mtt); | |
128 | context->mtt_base_addr_h = mtt_addr >> 32; | |
129 | context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
130 | context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | |
131 | } | |
132 | ||
133 | *(__be32 *) mailbox->buf = cpu_to_be32(optpar); | |
134 | memcpy(mailbox->buf + 8, context, sizeof *context); | |
135 | ||
136 | ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = | |
137 | cpu_to_be32(qp->qpn); | |
138 | ||
139 | ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31), | |
140 | new_state == MLX4_QP_STATE_RST ? 2 : 0, | |
141 | op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C); | |
142 | ||
143 | mlx4_free_cmd_mailbox(dev, mailbox); | |
144 | return ret; | |
145 | } | |
146 | EXPORT_SYMBOL_GPL(mlx4_qp_modify); | |
147 | ||
a3cdcbfa YP |
148 | int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) |
149 | { | |
150 | struct mlx4_priv *priv = mlx4_priv(dev); | |
151 | struct mlx4_qp_table *qp_table = &priv->qp_table; | |
152 | int qpn; | |
153 | ||
154 | qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align); | |
155 | if (qpn == -1) | |
156 | return -ENOMEM; | |
157 | ||
158 | *base = qpn; | |
159 | return 0; | |
160 | } | |
161 | EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range); | |
162 | ||
163 | void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) | |
164 | { | |
165 | struct mlx4_priv *priv = mlx4_priv(dev); | |
166 | struct mlx4_qp_table *qp_table = &priv->qp_table; | |
167 | if (base_qpn < dev->caps.sqp_start + 8) | |
168 | return; | |
169 | ||
170 | mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); | |
171 | } | |
172 | EXPORT_SYMBOL_GPL(mlx4_qp_release_range); | |
173 | ||
174 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) | |
225c7b1f RD |
175 | { |
176 | struct mlx4_priv *priv = mlx4_priv(dev); | |
177 | struct mlx4_qp_table *qp_table = &priv->qp_table; | |
178 | int err; | |
179 | ||
a3cdcbfa YP |
180 | if (!qpn) |
181 | return -EINVAL; | |
182 | ||
183 | qp->qpn = qpn; | |
225c7b1f RD |
184 | |
185 | err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn); | |
186 | if (err) | |
187 | goto err_out; | |
188 | ||
189 | err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn); | |
190 | if (err) | |
191 | goto err_put_qp; | |
192 | ||
193 | err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn); | |
194 | if (err) | |
195 | goto err_put_auxc; | |
196 | ||
197 | err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn); | |
198 | if (err) | |
199 | goto err_put_altc; | |
200 | ||
201 | err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn); | |
202 | if (err) | |
203 | goto err_put_rdmarc; | |
204 | ||
205 | spin_lock_irq(&qp_table->lock); | |
206 | err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp); | |
207 | spin_unlock_irq(&qp_table->lock); | |
208 | if (err) | |
209 | goto err_put_cmpt; | |
210 | ||
211 | atomic_set(&qp->refcount, 1); | |
212 | init_completion(&qp->free); | |
213 | ||
214 | return 0; | |
215 | ||
216 | err_put_cmpt: | |
217 | mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); | |
218 | ||
219 | err_put_rdmarc: | |
220 | mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); | |
221 | ||
222 | err_put_altc: | |
223 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | |
224 | ||
225 | err_put_auxc: | |
226 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | |
227 | ||
228 | err_put_qp: | |
229 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | |
230 | ||
231 | err_out: | |
225c7b1f RD |
232 | return err; |
233 | } | |
234 | EXPORT_SYMBOL_GPL(mlx4_qp_alloc); | |
235 | ||
236 | void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) | |
237 | { | |
238 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
239 | unsigned long flags; | |
240 | ||
241 | spin_lock_irqsave(&qp_table->lock, flags); | |
242 | radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); | |
243 | spin_unlock_irqrestore(&qp_table->lock, flags); | |
244 | } | |
245 | EXPORT_SYMBOL_GPL(mlx4_qp_remove); | |
246 | ||
247 | void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) | |
248 | { | |
249 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
250 | ||
251 | if (atomic_dec_and_test(&qp->refcount)) | |
252 | complete(&qp->free); | |
253 | wait_for_completion(&qp->free); | |
254 | ||
255 | mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn); | |
256 | mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn); | |
257 | mlx4_table_put(dev, &qp_table->altc_table, qp->qpn); | |
258 | mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn); | |
259 | mlx4_table_put(dev, &qp_table->qp_table, qp->qpn); | |
225c7b1f RD |
260 | } |
261 | EXPORT_SYMBOL_GPL(mlx4_qp_free); | |
262 | ||
263 | static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn) | |
264 | { | |
265 | return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP, | |
266 | MLX4_CMD_TIME_CLASS_B); | |
267 | } | |
268 | ||
3d73c288 | 269 | int mlx4_init_qp_table(struct mlx4_dev *dev) |
225c7b1f RD |
270 | { |
271 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | |
272 | int err; | |
93fc9e1b | 273 | int reserved_from_top = 0; |
225c7b1f RD |
274 | |
275 | spin_lock_init(&qp_table->lock); | |
276 | INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC); | |
277 | ||
278 | /* | |
279 | * We reserve 2 extra QPs per port for the special QPs. The | |
280 | * block of special QPs must be aligned to a multiple of 8, so | |
281 | * round up. | |
282 | */ | |
93fc9e1b YP |
283 | dev->caps.sqp_start = |
284 | ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8); | |
285 | ||
286 | { | |
287 | int sort[MLX4_NUM_QP_REGION]; | |
288 | int i, j, tmp; | |
289 | int last_base = dev->caps.num_qps; | |
290 | ||
291 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) | |
292 | sort[i] = i; | |
293 | ||
294 | for (i = MLX4_NUM_QP_REGION; i > 0; --i) { | |
295 | for (j = 2; j < i; ++j) { | |
296 | if (dev->caps.reserved_qps_cnt[sort[j]] > | |
297 | dev->caps.reserved_qps_cnt[sort[j - 1]]) { | |
298 | tmp = sort[j]; | |
299 | sort[j] = sort[j - 1]; | |
300 | sort[j - 1] = tmp; | |
301 | } | |
302 | } | |
303 | } | |
304 | ||
305 | for (i = 1; i < MLX4_NUM_QP_REGION; ++i) { | |
306 | last_base -= dev->caps.reserved_qps_cnt[sort[i]]; | |
307 | dev->caps.reserved_qps_base[sort[i]] = last_base; | |
308 | reserved_from_top += | |
309 | dev->caps.reserved_qps_cnt[sort[i]]; | |
310 | } | |
311 | ||
312 | } | |
313 | ||
225c7b1f | 314 | err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, |
93fc9e1b YP |
315 | (1 << 23) - 1, dev->caps.sqp_start + 8, |
316 | reserved_from_top); | |
225c7b1f RD |
317 | if (err) |
318 | return err; | |
319 | ||
320 | return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start); | |
321 | } | |
322 | ||
323 | void mlx4_cleanup_qp_table(struct mlx4_dev *dev) | |
324 | { | |
325 | mlx4_CONF_SPECIAL_QP(dev, 0); | |
326 | mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap); | |
327 | } | |
6a775e2b JM |
328 | |
329 | int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, | |
330 | struct mlx4_qp_context *context) | |
331 | { | |
332 | struct mlx4_cmd_mailbox *mailbox; | |
333 | int err; | |
334 | ||
335 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
336 | if (IS_ERR(mailbox)) | |
337 | return PTR_ERR(mailbox); | |
338 | ||
339 | err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0, | |
340 | MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A); | |
341 | if (!err) | |
342 | memcpy(context, mailbox->buf + 8, sizeof *context); | |
343 | ||
344 | mlx4_free_cmd_mailbox(dev, mailbox); | |
345 | return err; | |
346 | } | |
347 | EXPORT_SYMBOL_GPL(mlx4_qp_query); | |
348 | ||
ed4d3c10 YP |
349 | int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, |
350 | struct mlx4_qp_context *context, | |
351 | struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) | |
352 | { | |
353 | int err; | |
354 | int i; | |
355 | enum mlx4_qp_state states[] = { | |
356 | MLX4_QP_STATE_RST, | |
357 | MLX4_QP_STATE_INIT, | |
358 | MLX4_QP_STATE_RTR, | |
359 | MLX4_QP_STATE_RTS | |
360 | }; | |
361 | ||
362 | for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { | |
363 | context->flags &= cpu_to_be32(~(0xf << 28)); | |
364 | context->flags |= cpu_to_be32(states[i + 1] << 28); | |
365 | err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], | |
366 | context, 0, 0, qp); | |
367 | if (err) { | |
368 | mlx4_err(dev, "Failed to bring QP to state: " | |
369 | "%d with error: %d\n", | |
370 | states[i + 1], err); | |
371 | return err; | |
372 | } | |
373 | ||
374 | *qp_state = states[i + 1]; | |
375 | } | |
376 | ||
377 | return 0; | |
378 | } | |
379 | EXPORT_SYMBOL_GPL(mlx4_qp_to_ready); |