2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/transobj.h>
41 #include "mlx5_core.h"
43 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
46 struct mlx5_qp_table *table = &dev->priv.qp_table;
47 struct mlx5_core_rsc_common *common;
49 spin_lock(&table->lock);
51 common = radix_tree_lookup(&table->tree, rsn);
53 atomic_inc(&common->refcount);
55 spin_unlock(&table->lock);
58 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
67 if (atomic_dec_and_test(&common->refcount))
68 complete(&common->free);
71 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
73 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
74 struct mlx5_core_qp *qp;
79 switch (common->res) {
83 qp = (struct mlx5_core_qp *)common;
84 qp->event(qp, event_type);
88 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
91 mlx5_core_put_rsc(common);
94 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
95 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
97 struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
98 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
99 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
100 struct mlx5_core_qp *qp =
101 container_of(common, struct mlx5_core_qp, common);
102 struct mlx5_pagefault pfault;
105 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
110 pfault.event_subtype = eqe->sub_type;
111 pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
112 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
113 pfault.bytes_committed = be32_to_cpu(
114 pf_eqe->bytes_committed);
117 "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
118 eqe->sub_type, pfault.flags);
120 switch (eqe->sub_type) {
121 case MLX5_PFAULT_SUBTYPE_RDMA:
122 /* RDMA based event */
124 be32_to_cpu(pf_eqe->rdma.r_key);
125 pfault.rdma.packet_size =
126 be16_to_cpu(pf_eqe->rdma.packet_length);
127 pfault.rdma.rdma_op_len =
128 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
129 pfault.rdma.rdma_va =
130 be64_to_cpu(pf_eqe->rdma.rdma_va);
132 "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
133 qpn, pfault.rdma.r_key);
135 "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
136 pfault.rdma.rdma_op_len);
138 "PAGE_FAULT: rdma_va: 0x%016llx,\n",
139 pfault.rdma.rdma_va);
141 "PAGE_FAULT: bytes_committed: 0x%06x\n",
142 pfault.bytes_committed);
145 case MLX5_PFAULT_SUBTYPE_WQE:
146 /* WQE based event */
147 pfault.wqe.wqe_index =
148 be16_to_cpu(pf_eqe->wqe.wqe_index);
149 pfault.wqe.packet_size =
150 be16_to_cpu(pf_eqe->wqe.packet_length);
152 "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
153 qpn, pfault.wqe.wqe_index);
155 "PAGE_FAULT: bytes_committed: 0x%06x\n",
156 pfault.bytes_committed);
161 "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
163 /* Unsupported page faults should still be resolved by the
168 if (qp->pfault_handler) {
169 qp->pfault_handler(qp, &pfault);
172 "ODP event for QP %08x, without a fault handler in QP\n",
174 /* Page fault will remain unresolved. QP will hang until it is
179 mlx5_core_put_rsc(common);
183 static int create_qprqsq_common(struct mlx5_core_dev *dev,
184 struct mlx5_core_qp *qp,
187 struct mlx5_qp_table *table = &dev->priv.qp_table;
190 qp->common.res = rsc_type;
191 spin_lock_irq(&table->lock);
192 err = radix_tree_insert(&table->tree,
193 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
195 spin_unlock_irq(&table->lock);
199 atomic_set(&qp->common.refcount, 1);
200 init_completion(&qp->common.free);
201 qp->pid = current->pid;
206 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
207 struct mlx5_core_qp *qp)
209 struct mlx5_qp_table *table = &dev->priv.qp_table;
212 spin_lock_irqsave(&table->lock, flags);
213 radix_tree_delete(&table->tree,
214 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
215 spin_unlock_irqrestore(&table->lock, flags);
216 mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
217 wait_for_completion(&qp->common.free);
220 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
221 struct mlx5_core_qp *qp,
222 struct mlx5_create_qp_mbox_in *in,
225 struct mlx5_create_qp_mbox_out out;
226 struct mlx5_destroy_qp_mbox_in din;
227 struct mlx5_destroy_qp_mbox_out dout;
230 memset(&out, 0, sizeof(out));
231 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
233 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
235 mlx5_core_warn(dev, "ret %d\n", err);
239 if (out.hdr.status) {
240 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
241 atomic_read(&dev->num_qps));
242 return mlx5_cmd_status_to_err(&out.hdr);
245 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
246 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
248 err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
252 err = mlx5_debug_qp_add(dev, qp);
254 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
257 atomic_inc(&dev->num_qps);
262 memset(&din, 0, sizeof(din));
263 memset(&dout, 0, sizeof(dout));
264 din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
265 din.qpn = cpu_to_be32(qp->qpn);
266 mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
270 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
272 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
273 struct mlx5_core_qp *qp)
275 struct mlx5_destroy_qp_mbox_in in;
276 struct mlx5_destroy_qp_mbox_out out;
279 mlx5_debug_qp_remove(dev, qp);
281 destroy_qprqsq_common(dev, qp);
283 memset(&in, 0, sizeof(in));
284 memset(&out, 0, sizeof(out));
285 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
286 in.qpn = cpu_to_be32(qp->qpn);
287 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
292 return mlx5_cmd_status_to_err(&out.hdr);
294 atomic_dec(&dev->num_qps);
297 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
299 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
300 enum mlx5_qp_state new_state,
301 struct mlx5_modify_qp_mbox_in *in, int sqd_event,
302 struct mlx5_core_qp *qp)
304 static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
305 [MLX5_QP_STATE_RST] = {
306 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
307 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
308 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
310 [MLX5_QP_STATE_INIT] = {
311 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
312 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
313 [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
314 [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
316 [MLX5_QP_STATE_RTR] = {
317 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
318 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
319 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
321 [MLX5_QP_STATE_RTS] = {
322 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
323 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
324 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
326 [MLX5_QP_STATE_SQD] = {
327 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
328 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
330 [MLX5_QP_STATE_SQER] = {
331 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
332 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
333 [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
335 [MLX5_QP_STATE_ERR] = {
336 [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
337 [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
341 struct mlx5_modify_qp_mbox_out out;
345 if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
346 !optab[cur_state][new_state])
349 memset(&out, 0, sizeof(out));
350 op = optab[cur_state][new_state];
351 in->hdr.opcode = cpu_to_be16(op);
352 in->qpn = cpu_to_be32(qp->qpn);
353 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
357 return mlx5_cmd_status_to_err(&out.hdr);
359 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
361 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
363 struct mlx5_qp_table *table = &dev->priv.qp_table;
365 memset(table, 0, sizeof(*table));
366 spin_lock_init(&table->lock);
367 INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
368 mlx5_qp_debugfs_init(dev);
371 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
373 mlx5_qp_debugfs_cleanup(dev);
376 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
377 struct mlx5_query_qp_mbox_out *out, int outlen)
379 struct mlx5_query_qp_mbox_in in;
382 memset(&in, 0, sizeof(in));
383 memset(out, 0, outlen);
384 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
385 in.qpn = cpu_to_be32(qp->qpn);
386 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
391 return mlx5_cmd_status_to_err(&out->hdr);
395 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
397 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
399 struct mlx5_alloc_xrcd_mbox_in in;
400 struct mlx5_alloc_xrcd_mbox_out out;
403 memset(&in, 0, sizeof(in));
404 memset(&out, 0, sizeof(out));
405 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
406 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
411 err = mlx5_cmd_status_to_err(&out.hdr);
413 *xrcdn = be32_to_cpu(out.xrcdn);
417 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
419 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
421 struct mlx5_dealloc_xrcd_mbox_in in;
422 struct mlx5_dealloc_xrcd_mbox_out out;
425 memset(&in, 0, sizeof(in));
426 memset(&out, 0, sizeof(out));
427 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
428 in.xrcdn = cpu_to_be32(xrcdn);
429 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
434 err = mlx5_cmd_status_to_err(&out.hdr);
438 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
440 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
441 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
444 struct mlx5_page_fault_resume_mbox_in in;
445 struct mlx5_page_fault_resume_mbox_out out;
448 memset(&in, 0, sizeof(in));
449 memset(&out, 0, sizeof(out));
450 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
452 flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
453 MLX5_PAGE_FAULT_RESUME_WRITE |
454 MLX5_PAGE_FAULT_RESUME_RDMA);
455 flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
456 in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
457 (flags << MLX5_QPN_BITS));
458 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
463 err = mlx5_cmd_status_to_err(&out.hdr);
467 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
470 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
471 struct mlx5_core_qp *rq)
476 err = mlx5_core_create_rq(dev, in, inlen, &rqn);
481 err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
488 mlx5_core_destroy_rq(dev, rq->qpn);
492 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
494 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
495 struct mlx5_core_qp *rq)
497 destroy_qprqsq_common(dev, rq);
498 mlx5_core_destroy_rq(dev, rq->qpn);
500 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
502 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
503 struct mlx5_core_qp *sq)
508 err = mlx5_core_create_sq(dev, in, inlen, &sqn);
513 err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
520 mlx5_core_destroy_sq(dev, sq->qpn);
524 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
526 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
527 struct mlx5_core_qp *sq)
529 destroy_qprqsq_common(dev, sq);
530 mlx5_core_destroy_sq(dev, sq->qpn);
532 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);