net/mlx5_core: Add RQ and SQ event handling
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / qp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39 #include <linux/mlx5/transobj.h>
40
41 #include "mlx5_core.h"
42
43 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
44                                                  u32 rsn)
45 {
46         struct mlx5_qp_table *table = &dev->priv.qp_table;
47         struct mlx5_core_rsc_common *common;
48
49         spin_lock(&table->lock);
50
51         common = radix_tree_lookup(&table->tree, rsn);
52         if (common)
53                 atomic_inc(&common->refcount);
54
55         spin_unlock(&table->lock);
56
57         if (!common) {
58                 mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
59                                rsn);
60                 return NULL;
61         }
62         return common;
63 }
64
65 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
66 {
67         if (atomic_dec_and_test(&common->refcount))
68                 complete(&common->free);
69 }
70
71 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
72 {
73         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
74         struct mlx5_core_qp *qp;
75
76         if (!common)
77                 return;
78
79         switch (common->res) {
80         case MLX5_RES_QP:
81         case MLX5_RES_RQ:
82         case MLX5_RES_SQ:
83                 qp = (struct mlx5_core_qp *)common;
84                 qp->event(qp, event_type);
85                 break;
86
87         default:
88                 mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
89         }
90
91         mlx5_core_put_rsc(common);
92 }
93
94 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
95 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
96 {
97         struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
98         int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
99         struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
100         struct mlx5_core_qp *qp =
101                 container_of(common, struct mlx5_core_qp, common);
102         struct mlx5_pagefault pfault;
103
104         if (!qp) {
105                 mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
106                                qpn);
107                 return;
108         }
109
110         pfault.event_subtype = eqe->sub_type;
111         pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
112                 (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
113         pfault.bytes_committed = be32_to_cpu(
114                 pf_eqe->bytes_committed);
115
116         mlx5_core_dbg(dev,
117                       "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
118                       eqe->sub_type, pfault.flags);
119
120         switch (eqe->sub_type) {
121         case MLX5_PFAULT_SUBTYPE_RDMA:
122                 /* RDMA based event */
123                 pfault.rdma.r_key =
124                         be32_to_cpu(pf_eqe->rdma.r_key);
125                 pfault.rdma.packet_size =
126                         be16_to_cpu(pf_eqe->rdma.packet_length);
127                 pfault.rdma.rdma_op_len =
128                         be32_to_cpu(pf_eqe->rdma.rdma_op_len);
129                 pfault.rdma.rdma_va =
130                         be64_to_cpu(pf_eqe->rdma.rdma_va);
131                 mlx5_core_dbg(dev,
132                               "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
133                               qpn, pfault.rdma.r_key);
134                 mlx5_core_dbg(dev,
135                               "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
136                               pfault.rdma.rdma_op_len);
137                 mlx5_core_dbg(dev,
138                               "PAGE_FAULT: rdma_va: 0x%016llx,\n",
139                               pfault.rdma.rdma_va);
140                 mlx5_core_dbg(dev,
141                               "PAGE_FAULT: bytes_committed: 0x%06x\n",
142                               pfault.bytes_committed);
143                 break;
144
145         case MLX5_PFAULT_SUBTYPE_WQE:
146                 /* WQE based event */
147                 pfault.wqe.wqe_index =
148                         be16_to_cpu(pf_eqe->wqe.wqe_index);
149                 pfault.wqe.packet_size =
150                         be16_to_cpu(pf_eqe->wqe.packet_length);
151                 mlx5_core_dbg(dev,
152                               "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
153                               qpn, pfault.wqe.wqe_index);
154                 mlx5_core_dbg(dev,
155                               "PAGE_FAULT: bytes_committed: 0x%06x\n",
156                               pfault.bytes_committed);
157                 break;
158
159         default:
160                 mlx5_core_warn(dev,
161                                "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
162                                eqe->sub_type, qpn);
163                 /* Unsupported page faults should still be resolved by the
164                  * page fault handler
165                  */
166         }
167
168         if (qp->pfault_handler) {
169                 qp->pfault_handler(qp, &pfault);
170         } else {
171                 mlx5_core_err(dev,
172                               "ODP event for QP %08x, without a fault handler in QP\n",
173                               qpn);
174                 /* Page fault will remain unresolved. QP will hang until it is
175                  * destroyed
176                  */
177         }
178
179         mlx5_core_put_rsc(common);
180 }
181 #endif
182
183 static int create_qprqsq_common(struct mlx5_core_dev *dev,
184                                 struct mlx5_core_qp *qp,
185                                 int rsc_type)
186 {
187         struct mlx5_qp_table *table = &dev->priv.qp_table;
188         int err;
189
190         qp->common.res = rsc_type;
191         spin_lock_irq(&table->lock);
192         err = radix_tree_insert(&table->tree,
193                                 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
194                                 qp);
195         spin_unlock_irq(&table->lock);
196         if (err)
197                 return err;
198
199         atomic_set(&qp->common.refcount, 1);
200         init_completion(&qp->common.free);
201         qp->pid = current->pid;
202
203         return 0;
204 }
205
206 static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
207                                   struct mlx5_core_qp *qp)
208 {
209         struct mlx5_qp_table *table = &dev->priv.qp_table;
210         unsigned long flags;
211
212         spin_lock_irqsave(&table->lock, flags);
213         radix_tree_delete(&table->tree,
214                           qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
215         spin_unlock_irqrestore(&table->lock, flags);
216         mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
217         wait_for_completion(&qp->common.free);
218 }
219
220 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
221                         struct mlx5_core_qp *qp,
222                         struct mlx5_create_qp_mbox_in *in,
223                         int inlen)
224 {
225         struct mlx5_create_qp_mbox_out out;
226         struct mlx5_destroy_qp_mbox_in din;
227         struct mlx5_destroy_qp_mbox_out dout;
228         int err;
229
230         memset(&out, 0, sizeof(out));
231         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
232
233         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
234         if (err) {
235                 mlx5_core_warn(dev, "ret %d\n", err);
236                 return err;
237         }
238
239         if (out.hdr.status) {
240                 mlx5_core_warn(dev, "current num of QPs 0x%x\n",
241                                atomic_read(&dev->num_qps));
242                 return mlx5_cmd_status_to_err(&out.hdr);
243         }
244
245         qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
246         mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
247
248         err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
249         if (err)
250                 goto err_cmd;
251
252         err = mlx5_debug_qp_add(dev, qp);
253         if (err)
254                 mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
255                               qp->qpn);
256
257         atomic_inc(&dev->num_qps);
258
259         return 0;
260
261 err_cmd:
262         memset(&din, 0, sizeof(din));
263         memset(&dout, 0, sizeof(dout));
264         din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
265         din.qpn = cpu_to_be32(qp->qpn);
266         mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
267
268         return err;
269 }
270 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
271
272 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
273                          struct mlx5_core_qp *qp)
274 {
275         struct mlx5_destroy_qp_mbox_in in;
276         struct mlx5_destroy_qp_mbox_out out;
277         int err;
278
279         mlx5_debug_qp_remove(dev, qp);
280
281         destroy_qprqsq_common(dev, qp);
282
283         memset(&in, 0, sizeof(in));
284         memset(&out, 0, sizeof(out));
285         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
286         in.qpn = cpu_to_be32(qp->qpn);
287         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
288         if (err)
289                 return err;
290
291         if (out.hdr.status)
292                 return mlx5_cmd_status_to_err(&out.hdr);
293
294         atomic_dec(&dev->num_qps);
295         return 0;
296 }
297 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
298
299 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
300                         enum mlx5_qp_state new_state,
301                         struct mlx5_modify_qp_mbox_in *in, int sqd_event,
302                         struct mlx5_core_qp *qp)
303 {
304         static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
305                 [MLX5_QP_STATE_RST] = {
306                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
307                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
308                         [MLX5_QP_STATE_INIT]    = MLX5_CMD_OP_RST2INIT_QP,
309                 },
310                 [MLX5_QP_STATE_INIT]  = {
311                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
312                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
313                         [MLX5_QP_STATE_INIT]    = MLX5_CMD_OP_INIT2INIT_QP,
314                         [MLX5_QP_STATE_RTR]     = MLX5_CMD_OP_INIT2RTR_QP,
315                 },
316                 [MLX5_QP_STATE_RTR]   = {
317                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
318                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
319                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_RTR2RTS_QP,
320                 },
321                 [MLX5_QP_STATE_RTS]   = {
322                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
323                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
324                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_RTS2RTS_QP,
325                 },
326                 [MLX5_QP_STATE_SQD] = {
327                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
328                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
329                 },
330                 [MLX5_QP_STATE_SQER] = {
331                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
332                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
333                         [MLX5_QP_STATE_RTS]     = MLX5_CMD_OP_SQERR2RTS_QP,
334                 },
335                 [MLX5_QP_STATE_ERR] = {
336                         [MLX5_QP_STATE_RST]     = MLX5_CMD_OP_2RST_QP,
337                         [MLX5_QP_STATE_ERR]     = MLX5_CMD_OP_2ERR_QP,
338                 }
339         };
340
341         struct mlx5_modify_qp_mbox_out out;
342         int err = 0;
343         u16 op;
344
345         if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
346             !optab[cur_state][new_state])
347                 return -EINVAL;
348
349         memset(&out, 0, sizeof(out));
350         op = optab[cur_state][new_state];
351         in->hdr.opcode = cpu_to_be16(op);
352         in->qpn = cpu_to_be32(qp->qpn);
353         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
354         if (err)
355                 return err;
356
357         return mlx5_cmd_status_to_err(&out.hdr);
358 }
359 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
360
361 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
362 {
363         struct mlx5_qp_table *table = &dev->priv.qp_table;
364
365         memset(table, 0, sizeof(*table));
366         spin_lock_init(&table->lock);
367         INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
368         mlx5_qp_debugfs_init(dev);
369 }
370
371 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
372 {
373         mlx5_qp_debugfs_cleanup(dev);
374 }
375
376 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
377                        struct mlx5_query_qp_mbox_out *out, int outlen)
378 {
379         struct mlx5_query_qp_mbox_in in;
380         int err;
381
382         memset(&in, 0, sizeof(in));
383         memset(out, 0, outlen);
384         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
385         in.qpn = cpu_to_be32(qp->qpn);
386         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
387         if (err)
388                 return err;
389
390         if (out->hdr.status)
391                 return mlx5_cmd_status_to_err(&out->hdr);
392
393         return err;
394 }
395 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
396
397 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
398 {
399         struct mlx5_alloc_xrcd_mbox_in in;
400         struct mlx5_alloc_xrcd_mbox_out out;
401         int err;
402
403         memset(&in, 0, sizeof(in));
404         memset(&out, 0, sizeof(out));
405         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
406         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
407         if (err)
408                 return err;
409
410         if (out.hdr.status)
411                 err = mlx5_cmd_status_to_err(&out.hdr);
412         else
413                 *xrcdn = be32_to_cpu(out.xrcdn);
414
415         return err;
416 }
417 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
418
419 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
420 {
421         struct mlx5_dealloc_xrcd_mbox_in in;
422         struct mlx5_dealloc_xrcd_mbox_out out;
423         int err;
424
425         memset(&in, 0, sizeof(in));
426         memset(&out, 0, sizeof(out));
427         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
428         in.xrcdn = cpu_to_be32(xrcdn);
429         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
430         if (err)
431                 return err;
432
433         if (out.hdr.status)
434                 err = mlx5_cmd_status_to_err(&out.hdr);
435
436         return err;
437 }
438 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
439
440 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
441 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
442                                 u8 flags, int error)
443 {
444         struct mlx5_page_fault_resume_mbox_in in;
445         struct mlx5_page_fault_resume_mbox_out out;
446         int err;
447
448         memset(&in, 0, sizeof(in));
449         memset(&out, 0, sizeof(out));
450         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
451         in.hdr.opmod = 0;
452         flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
453                   MLX5_PAGE_FAULT_RESUME_WRITE     |
454                   MLX5_PAGE_FAULT_RESUME_RDMA);
455         flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
456         in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
457                                    (flags << MLX5_QPN_BITS));
458         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
459         if (err)
460                 return err;
461
462         if (out.hdr.status)
463                 err = mlx5_cmd_status_to_err(&out.hdr);
464
465         return err;
466 }
467 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
468 #endif
469
470 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
471                                 struct mlx5_core_qp *rq)
472 {
473         int err;
474         u32 rqn;
475
476         err = mlx5_core_create_rq(dev, in, inlen, &rqn);
477         if (err)
478                 return err;
479
480         rq->qpn = rqn;
481         err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
482         if (err)
483                 goto err_destroy_rq;
484
485         return 0;
486
487 err_destroy_rq:
488         mlx5_core_destroy_rq(dev, rq->qpn);
489
490         return err;
491 }
492 EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
493
494 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
495                                   struct mlx5_core_qp *rq)
496 {
497         destroy_qprqsq_common(dev, rq);
498         mlx5_core_destroy_rq(dev, rq->qpn);
499 }
500 EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
501
502 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
503                                 struct mlx5_core_qp *sq)
504 {
505         int err;
506         u32 sqn;
507
508         err = mlx5_core_create_sq(dev, in, inlen, &sqn);
509         if (err)
510                 return err;
511
512         sq->qpn = sqn;
513         err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
514         if (err)
515                 goto err_destroy_sq;
516
517         return 0;
518
519 err_destroy_sq:
520         mlx5_core_destroy_sq(dev, sq->qpn);
521
522         return err;
523 }
524 EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
525
526 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
527                                   struct mlx5_core_qp *sq)
528 {
529         destroy_qprqsq_common(dev, sq);
530         mlx5_core_destroy_sq(dev, sq->qpn);
531 }
532 EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);