IB/mlx5: Add page fault handler for DC initiator WQE
[linux-2.6-block.git] / drivers / infiniband / hw / mlx5 / odp.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
36
37 #include "mlx5_ib.h"
38 #include "cmd.h"
39
40 #include <linux/mlx5/eq.h>
41
42 /* Contains the details of a pagefault. */
43 struct mlx5_pagefault {
44         u32                     bytes_committed;
45         u32                     token;
46         u8                      event_subtype;
47         u8                      type;
48         union {
49                 /* Initiator or send message responder pagefault details. */
50                 struct {
51                         /* Received packet size, only valid for responders. */
52                         u32     packet_size;
53                         /*
54                          * Number of resource holding WQE, depends on type.
55                          */
56                         u32     wq_num;
57                         /*
58                          * WQE index. Refers to either the send queue or
59                          * receive queue, according to event_subtype.
60                          */
61                         u16     wqe_index;
62                 } wqe;
63                 /* RDMA responder pagefault details */
64                 struct {
65                         u32     r_key;
66                         /*
67                          * Received packet size, minimal size page fault
68                          * resolution required for forward progress.
69                          */
70                         u32     packet_size;
71                         u32     rdma_op_len;
72                         u64     rdma_va;
73                 } rdma;
74         };
75
76         struct mlx5_ib_pf_eq    *eq;
77         struct work_struct      work;
78 };
79
80 #define MAX_PREFETCH_LEN (4*1024*1024U)
81
82 /* Timeout in ms to wait for an active mmu notifier to complete when handling
83  * a pagefault. */
84 #define MMU_NOTIFIER_TIMEOUT 1000
85
86 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
87 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
88 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
89 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
90 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
91
92 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
93
94 static u64 mlx5_imr_ksm_entries;
95
96 static int check_parent(struct ib_umem_odp *odp,
97                                struct mlx5_ib_mr *parent)
98 {
99         struct mlx5_ib_mr *mr = odp->private;
100
101         return mr && mr->parent == parent && !odp->dying;
102 }
103
104 static struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
105 {
106         if (WARN_ON(!mr || !is_odp_mr(mr)))
107                 return NULL;
108
109         return to_ib_umem_odp(mr->umem)->per_mm;
110 }
111
112 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
113 {
114         struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
115         struct ib_ucontext_per_mm *per_mm = odp->per_mm;
116         struct rb_node *rb;
117
118         down_read(&per_mm->umem_rwsem);
119         while (1) {
120                 rb = rb_next(&odp->interval_tree.rb);
121                 if (!rb)
122                         goto not_found;
123                 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
124                 if (check_parent(odp, parent))
125                         goto end;
126         }
127 not_found:
128         odp = NULL;
129 end:
130         up_read(&per_mm->umem_rwsem);
131         return odp;
132 }
133
134 static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
135                                       struct mlx5_ib_mr *parent)
136 {
137         struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
138         struct ib_umem_odp *odp;
139         struct rb_node *rb;
140
141         down_read(&per_mm->umem_rwsem);
142         odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
143         if (!odp)
144                 goto end;
145
146         while (1) {
147                 if (check_parent(odp, parent))
148                         goto end;
149                 rb = rb_next(&odp->interval_tree.rb);
150                 if (!rb)
151                         goto not_found;
152                 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
153                 if (ib_umem_start(odp) > start + length)
154                         goto not_found;
155         }
156 not_found:
157         odp = NULL;
158 end:
159         up_read(&per_mm->umem_rwsem);
160         return odp;
161 }
162
163 void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
164                            size_t nentries, struct mlx5_ib_mr *mr, int flags)
165 {
166         struct ib_pd *pd = mr->ibmr.pd;
167         struct mlx5_ib_dev *dev = to_mdev(pd->device);
168         struct ib_umem_odp *odp;
169         unsigned long va;
170         int i;
171
172         if (flags & MLX5_IB_UPD_XLT_ZAP) {
173                 for (i = 0; i < nentries; i++, pklm++) {
174                         pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
175                         pklm->key = cpu_to_be32(dev->null_mkey);
176                         pklm->va = 0;
177                 }
178                 return;
179         }
180
181         odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
182                          nentries * MLX5_IMR_MTT_SIZE, mr);
183
184         for (i = 0; i < nentries; i++, pklm++) {
185                 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
186                 va = (offset + i) * MLX5_IMR_MTT_SIZE;
187                 if (odp && odp->umem.address == va) {
188                         struct mlx5_ib_mr *mtt = odp->private;
189
190                         pklm->key = cpu_to_be32(mtt->ibmr.lkey);
191                         odp = odp_next(odp);
192                 } else {
193                         pklm->key = cpu_to_be32(dev->null_mkey);
194                 }
195                 mlx5_ib_dbg(dev, "[%d] va %lx key %x\n",
196                             i, va, be32_to_cpu(pklm->key));
197         }
198 }
199
200 static void mr_leaf_free_action(struct work_struct *work)
201 {
202         struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
203         int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
204         struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
205
206         mr->parent = NULL;
207         synchronize_srcu(&mr->dev->mr_srcu);
208
209         ib_umem_release(&odp->umem);
210         if (imr->live)
211                 mlx5_ib_update_xlt(imr, idx, 1, 0,
212                                    MLX5_IB_UPD_XLT_INDIRECT |
213                                    MLX5_IB_UPD_XLT_ATOMIC);
214         mlx5_mr_cache_free(mr->dev, mr);
215
216         if (atomic_dec_and_test(&imr->num_leaf_free))
217                 wake_up(&imr->q_leaf_free);
218 }
219
220 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
221                               unsigned long end)
222 {
223         struct mlx5_ib_mr *mr;
224         const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
225                                     sizeof(struct mlx5_mtt)) - 1;
226         u64 idx = 0, blk_start_idx = 0;
227         int in_block = 0;
228         u64 addr;
229
230         if (!umem_odp) {
231                 pr_err("invalidation called on NULL umem or non-ODP umem\n");
232                 return;
233         }
234
235         mr = umem_odp->private;
236
237         if (!mr || !mr->ibmr.pd)
238                 return;
239
240         start = max_t(u64, ib_umem_start(umem_odp), start);
241         end = min_t(u64, ib_umem_end(umem_odp), end);
242
243         /*
244          * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
245          * while we are doing the invalidation, no page fault will attempt to
246          * overwrite the same MTTs.  Concurent invalidations might race us,
247          * but they will write 0s as well, so no difference in the end result.
248          */
249
250         for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
251                 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
252                 /*
253                  * Strive to write the MTTs in chunks, but avoid overwriting
254                  * non-existing MTTs. The huristic here can be improved to
255                  * estimate the cost of another UMR vs. the cost of bigger
256                  * UMR.
257                  */
258                 if (umem_odp->dma_list[idx] &
259                     (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
260                         if (!in_block) {
261                                 blk_start_idx = idx;
262                                 in_block = 1;
263                         }
264                 } else {
265                         u64 umr_offset = idx & umr_block_mask;
266
267                         if (in_block && umr_offset == 0) {
268                                 mlx5_ib_update_xlt(mr, blk_start_idx,
269                                                    idx - blk_start_idx, 0,
270                                                    MLX5_IB_UPD_XLT_ZAP |
271                                                    MLX5_IB_UPD_XLT_ATOMIC);
272                                 in_block = 0;
273                         }
274                 }
275         }
276         if (in_block)
277                 mlx5_ib_update_xlt(mr, blk_start_idx,
278                                    idx - blk_start_idx + 1, 0,
279                                    MLX5_IB_UPD_XLT_ZAP |
280                                    MLX5_IB_UPD_XLT_ATOMIC);
281         /*
282          * We are now sure that the device will not access the
283          * memory. We can safely unmap it, and mark it as dirty if
284          * needed.
285          */
286
287         ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
288
289         if (unlikely(!umem_odp->npages && mr->parent &&
290                      !umem_odp->dying)) {
291                 WRITE_ONCE(umem_odp->dying, 1);
292                 atomic_inc(&mr->parent->num_leaf_free);
293                 schedule_work(&umem_odp->work);
294         }
295 }
296
297 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
298 {
299         struct ib_odp_caps *caps = &dev->odp_caps;
300
301         memset(caps, 0, sizeof(*caps));
302
303         if (!MLX5_CAP_GEN(dev->mdev, pg))
304                 return;
305
306         caps->general_caps = IB_ODP_SUPPORT;
307
308         if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
309                 dev->odp_max_size = U64_MAX;
310         else
311                 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
312
313         if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
314                 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
315
316         if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
317                 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
318
319         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
320                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
321
322         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
323                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
324
325         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
326                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
327
328         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
329                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
330
331         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
332                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
333
334         if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
335                 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
336
337         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
338                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
339
340         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
341                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
342
343         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
344                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
345
346         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
347                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
348
349         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
350                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
351
352         if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
353                 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
354
355         if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
356             MLX5_CAP_GEN(dev->mdev, null_mkey) &&
357             MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
358                 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
359
360         return;
361 }
362
363 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
364                                       struct mlx5_pagefault *pfault,
365                                       int error)
366 {
367         int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
368                      pfault->wqe.wq_num : pfault->token;
369         u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
370         u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = { };
371         int err;
372
373         MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
374         MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
375         MLX5_SET(page_fault_resume_in, in, token, pfault->token);
376         MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
377         MLX5_SET(page_fault_resume_in, in, error, !!error);
378
379         err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
380         if (err)
381                 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
382                             wq_num, err);
383 }
384
385 static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
386                                             struct ib_umem *umem,
387                                             bool ksm, int access_flags)
388 {
389         struct mlx5_ib_dev *dev = to_mdev(pd->device);
390         struct mlx5_ib_mr *mr;
391         int err;
392
393         mr = mlx5_mr_cache_alloc(dev, ksm ? MLX5_IMR_KSM_CACHE_ENTRY :
394                                             MLX5_IMR_MTT_CACHE_ENTRY);
395
396         if (IS_ERR(mr))
397                 return mr;
398
399         mr->ibmr.pd = pd;
400
401         mr->dev = dev;
402         mr->access_flags = access_flags;
403         mr->mmkey.iova = 0;
404         mr->umem = umem;
405
406         if (ksm) {
407                 err = mlx5_ib_update_xlt(mr, 0,
408                                          mlx5_imr_ksm_entries,
409                                          MLX5_KSM_PAGE_SHIFT,
410                                          MLX5_IB_UPD_XLT_INDIRECT |
411                                          MLX5_IB_UPD_XLT_ZAP |
412                                          MLX5_IB_UPD_XLT_ENABLE);
413
414         } else {
415                 err = mlx5_ib_update_xlt(mr, 0,
416                                          MLX5_IMR_MTT_ENTRIES,
417                                          PAGE_SHIFT,
418                                          MLX5_IB_UPD_XLT_ZAP |
419                                          MLX5_IB_UPD_XLT_ENABLE |
420                                          MLX5_IB_UPD_XLT_ATOMIC);
421         }
422
423         if (err)
424                 goto fail;
425
426         mr->ibmr.lkey = mr->mmkey.key;
427         mr->ibmr.rkey = mr->mmkey.key;
428
429         mr->live = 1;
430
431         mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
432                     mr->mmkey.key, dev->mdev, mr);
433
434         return mr;
435
436 fail:
437         mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
438         mlx5_mr_cache_free(dev, mr);
439
440         return ERR_PTR(err);
441 }
442
443 static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
444                                                 u64 io_virt, size_t bcnt)
445 {
446         struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
447         struct ib_umem_odp *odp, *result = NULL;
448         struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
449         u64 addr = io_virt & MLX5_IMR_MTT_MASK;
450         int nentries = 0, start_idx = 0, ret;
451         struct mlx5_ib_mr *mtt;
452
453         mutex_lock(&odp_mr->umem_mutex);
454         odp = odp_lookup(addr, 1, mr);
455
456         mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
457                     io_virt, bcnt, addr, odp);
458
459 next_mr:
460         if (likely(odp)) {
461                 if (nentries)
462                         nentries++;
463         } else {
464                 odp = ib_alloc_odp_umem(odp_mr, addr,
465                                         MLX5_IMR_MTT_SIZE);
466                 if (IS_ERR(odp)) {
467                         mutex_unlock(&odp_mr->umem_mutex);
468                         return ERR_CAST(odp);
469                 }
470
471                 mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
472                                         mr->access_flags);
473                 if (IS_ERR(mtt)) {
474                         mutex_unlock(&odp_mr->umem_mutex);
475                         ib_umem_release(&odp->umem);
476                         return ERR_CAST(mtt);
477                 }
478
479                 odp->private = mtt;
480                 mtt->umem = &odp->umem;
481                 mtt->mmkey.iova = addr;
482                 mtt->parent = mr;
483                 INIT_WORK(&odp->work, mr_leaf_free_action);
484
485                 if (!nentries)
486                         start_idx = addr >> MLX5_IMR_MTT_SHIFT;
487                 nentries++;
488         }
489
490         /* Return first odp if region not covered by single one */
491         if (likely(!result))
492                 result = odp;
493
494         addr += MLX5_IMR_MTT_SIZE;
495         if (unlikely(addr < io_virt + bcnt)) {
496                 odp = odp_next(odp);
497                 if (odp && odp->umem.address != addr)
498                         odp = NULL;
499                 goto next_mr;
500         }
501
502         if (unlikely(nentries)) {
503                 ret = mlx5_ib_update_xlt(mr, start_idx, nentries, 0,
504                                          MLX5_IB_UPD_XLT_INDIRECT |
505                                          MLX5_IB_UPD_XLT_ATOMIC);
506                 if (ret) {
507                         mlx5_ib_err(dev, "Failed to update PAS\n");
508                         result = ERR_PTR(ret);
509                 }
510         }
511
512         mutex_unlock(&odp_mr->umem_mutex);
513         return result;
514 }
515
516 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
517                                              struct ib_udata *udata,
518                                              int access_flags)
519 {
520         struct mlx5_ib_mr *imr;
521         struct ib_umem *umem;
522
523         umem = ib_umem_get(udata, 0, 0, access_flags, 0);
524         if (IS_ERR(umem))
525                 return ERR_CAST(umem);
526
527         imr = implicit_mr_alloc(&pd->ibpd, umem, 1, access_flags);
528         if (IS_ERR(imr)) {
529                 ib_umem_release(umem);
530                 return ERR_CAST(imr);
531         }
532
533         imr->umem = umem;
534         init_waitqueue_head(&imr->q_leaf_free);
535         atomic_set(&imr->num_leaf_free, 0);
536         atomic_set(&imr->num_pending_prefetch, 0);
537
538         return imr;
539 }
540
541 static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
542                         void *cookie)
543 {
544         struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
545
546         if (mr->parent != imr)
547                 return 0;
548
549         ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
550                                     ib_umem_end(umem_odp));
551
552         if (umem_odp->dying)
553                 return 0;
554
555         WRITE_ONCE(umem_odp->dying, 1);
556         atomic_inc(&imr->num_leaf_free);
557         schedule_work(&umem_odp->work);
558
559         return 0;
560 }
561
562 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
563 {
564         struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
565
566         down_read(&per_mm->umem_rwsem);
567         rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
568                                       mr_leaf_free, true, imr);
569         up_read(&per_mm->umem_rwsem);
570
571         wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
572 }
573
574 #define MLX5_PF_FLAGS_PREFETCH  BIT(0)
575 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
576 static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
577                         u64 io_virt, size_t bcnt, u32 *bytes_mapped,
578                         u32 flags)
579 {
580         int npages = 0, current_seq, page_shift, ret, np;
581         bool implicit = false;
582         struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
583         bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
584         bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
585         u64 access_mask;
586         u64 start_idx, page_mask;
587         struct ib_umem_odp *odp;
588         size_t size;
589
590         if (!odp_mr->page_list) {
591                 odp = implicit_mr_get_data(mr, io_virt, bcnt);
592
593                 if (IS_ERR(odp))
594                         return PTR_ERR(odp);
595                 mr = odp->private;
596                 implicit = true;
597         } else {
598                 odp = odp_mr;
599         }
600
601 next_mr:
602         size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt);
603
604         page_shift = odp->page_shift;
605         page_mask = ~(BIT(page_shift) - 1);
606         start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
607         access_mask = ODP_READ_ALLOWED_BIT;
608
609         if (prefetch && !downgrade && !mr->umem->writable) {
610                 /* prefetch with write-access must
611                  * be supported by the MR
612                  */
613                 ret = -EINVAL;
614                 goto out;
615         }
616
617         if (mr->umem->writable && !downgrade)
618                 access_mask |= ODP_WRITE_ALLOWED_BIT;
619
620         current_seq = READ_ONCE(odp->notifiers_seq);
621         /*
622          * Ensure the sequence number is valid for some time before we call
623          * gup.
624          */
625         smp_rmb();
626
627         ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
628                                         access_mask, current_seq);
629
630         if (ret < 0)
631                 goto out;
632
633         np = ret;
634
635         mutex_lock(&odp->umem_mutex);
636         if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
637                                         current_seq)) {
638                 /*
639                  * No need to check whether the MTTs really belong to
640                  * this MR, since ib_umem_odp_map_dma_pages already
641                  * checks this.
642                  */
643                 ret = mlx5_ib_update_xlt(mr, start_idx, np,
644                                          page_shift, MLX5_IB_UPD_XLT_ATOMIC);
645         } else {
646                 ret = -EAGAIN;
647         }
648         mutex_unlock(&odp->umem_mutex);
649
650         if (ret < 0) {
651                 if (ret != -EAGAIN)
652                         mlx5_ib_err(dev, "Failed to update mkey page tables\n");
653                 goto out;
654         }
655
656         if (bytes_mapped) {
657                 u32 new_mappings = (np << page_shift) -
658                         (io_virt - round_down(io_virt, 1 << page_shift));
659                 *bytes_mapped += min_t(u32, new_mappings, size);
660         }
661
662         npages += np << (page_shift - PAGE_SHIFT);
663         bcnt -= size;
664
665         if (unlikely(bcnt)) {
666                 struct ib_umem_odp *next;
667
668                 io_virt += size;
669                 next = odp_next(odp);
670                 if (unlikely(!next || next->umem.address != io_virt)) {
671                         mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
672                                     io_virt, next);
673                         return -EAGAIN;
674                 }
675                 odp = next;
676                 mr = odp->private;
677                 goto next_mr;
678         }
679
680         return npages;
681
682 out:
683         if (ret == -EAGAIN) {
684                 if (implicit || !odp->dying) {
685                         unsigned long timeout =
686                                 msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
687
688                         if (!wait_for_completion_timeout(
689                                         &odp->notifier_completion,
690                                         timeout)) {
691                                 mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
692                                              current_seq, odp->notifiers_seq, odp->notifiers_count);
693                         }
694                 } else {
695                         /* The MR is being killed, kill the QP as well. */
696                         ret = -EFAULT;
697                 }
698         }
699
700         return ret;
701 }
702
703 struct pf_frame {
704         struct pf_frame *next;
705         u32 key;
706         u64 io_virt;
707         size_t bcnt;
708         int depth;
709 };
710
711 static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
712 {
713         if (!mmkey)
714                 return false;
715         if (mmkey->type == MLX5_MKEY_MW)
716                 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
717         return mmkey->key == key;
718 }
719
720 static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
721 {
722         struct mlx5_ib_mw *mw;
723         struct mlx5_ib_devx_mr *devx_mr;
724
725         if (mmkey->type == MLX5_MKEY_MW) {
726                 mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
727                 return mw->ndescs;
728         }
729
730         devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
731                                mmkey);
732         return devx_mr->ndescs;
733 }
734
735 /*
736  * Handle a single data segment in a page-fault WQE or RDMA region.
737  *
738  * Returns number of OS pages retrieved on success. The caller may continue to
739  * the next data segment.
740  * Can return the following error codes:
741  * -EAGAIN to designate a temporary error. The caller will abort handling the
742  *  page fault and resolve it.
743  * -EFAULT when there's an error mapping the requested pages. The caller will
744  *  abort the page fault handling.
745  */
746 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
747                                          struct ib_pd *pd, u32 key,
748                                          u64 io_virt, size_t bcnt,
749                                          u32 *bytes_committed,
750                                          u32 *bytes_mapped, u32 flags)
751 {
752         int npages = 0, srcu_key, ret, i, outlen, cur_outlen = 0, depth = 0;
753         bool prefetch = flags & MLX5_PF_FLAGS_PREFETCH;
754         struct pf_frame *head = NULL, *frame;
755         struct mlx5_core_mkey *mmkey;
756         struct mlx5_ib_mr *mr;
757         struct mlx5_klm *pklm;
758         u32 *out = NULL;
759         size_t offset;
760         int ndescs;
761
762         srcu_key = srcu_read_lock(&dev->mr_srcu);
763
764         io_virt += *bytes_committed;
765         bcnt -= *bytes_committed;
766
767 next_mr:
768         mmkey = xa_load(&dev->mdev->priv.mkey_table, mlx5_base_mkey(key));
769         if (!mkey_is_eq(mmkey, key)) {
770                 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
771                 ret = -EFAULT;
772                 goto srcu_unlock;
773         }
774
775         if (prefetch && mmkey->type != MLX5_MKEY_MR) {
776                 mlx5_ib_dbg(dev, "prefetch is allowed only for MR\n");
777                 ret = -EINVAL;
778                 goto srcu_unlock;
779         }
780
781         switch (mmkey->type) {
782         case MLX5_MKEY_MR:
783                 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
784                 if (!mr->live || !mr->ibmr.pd) {
785                         mlx5_ib_dbg(dev, "got dead MR\n");
786                         ret = -EFAULT;
787                         goto srcu_unlock;
788                 }
789
790                 if (prefetch) {
791                         if (!is_odp_mr(mr) ||
792                             mr->ibmr.pd != pd) {
793                                 mlx5_ib_dbg(dev, "Invalid prefetch request: %s\n",
794                                             is_odp_mr(mr) ?  "MR is not ODP" :
795                                             "PD is not of the MR");
796                                 ret = -EINVAL;
797                                 goto srcu_unlock;
798                         }
799                 }
800
801                 if (!is_odp_mr(mr)) {
802                         mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
803                                     key);
804                         if (bytes_mapped)
805                                 *bytes_mapped += bcnt;
806                         ret = 0;
807                         goto srcu_unlock;
808                 }
809
810                 ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped, flags);
811                 if (ret < 0)
812                         goto srcu_unlock;
813
814                 npages += ret;
815                 ret = 0;
816                 break;
817
818         case MLX5_MKEY_MW:
819         case MLX5_MKEY_INDIRECT_DEVX:
820                 ndescs = get_indirect_num_descs(mmkey);
821
822                 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
823                         mlx5_ib_dbg(dev, "indirection level exceeded\n");
824                         ret = -EFAULT;
825                         goto srcu_unlock;
826                 }
827
828                 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
829                         sizeof(*pklm) * (ndescs - 2);
830
831                 if (outlen > cur_outlen) {
832                         kfree(out);
833                         out = kzalloc(outlen, GFP_KERNEL);
834                         if (!out) {
835                                 ret = -ENOMEM;
836                                 goto srcu_unlock;
837                         }
838                         cur_outlen = outlen;
839                 }
840
841                 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
842                                                        bsf0_klm0_pas_mtt0_1);
843
844                 ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
845                 if (ret)
846                         goto srcu_unlock;
847
848                 offset = io_virt - MLX5_GET64(query_mkey_out, out,
849                                               memory_key_mkey_entry.start_addr);
850
851                 for (i = 0; bcnt && i < ndescs; i++, pklm++) {
852                         if (offset >= be32_to_cpu(pklm->bcount)) {
853                                 offset -= be32_to_cpu(pklm->bcount);
854                                 continue;
855                         }
856
857                         frame = kzalloc(sizeof(*frame), GFP_KERNEL);
858                         if (!frame) {
859                                 ret = -ENOMEM;
860                                 goto srcu_unlock;
861                         }
862
863                         frame->key = be32_to_cpu(pklm->key);
864                         frame->io_virt = be64_to_cpu(pklm->va) + offset;
865                         frame->bcnt = min_t(size_t, bcnt,
866                                             be32_to_cpu(pklm->bcount) - offset);
867                         frame->depth = depth + 1;
868                         frame->next = head;
869                         head = frame;
870
871                         bcnt -= frame->bcnt;
872                         offset = 0;
873                 }
874                 break;
875
876         default:
877                 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
878                 ret = -EFAULT;
879                 goto srcu_unlock;
880         }
881
882         if (head) {
883                 frame = head;
884                 head = frame->next;
885
886                 key = frame->key;
887                 io_virt = frame->io_virt;
888                 bcnt = frame->bcnt;
889                 depth = frame->depth;
890                 kfree(frame);
891
892                 goto next_mr;
893         }
894
895 srcu_unlock:
896         while (head) {
897                 frame = head;
898                 head = frame->next;
899                 kfree(frame);
900         }
901         kfree(out);
902
903         srcu_read_unlock(&dev->mr_srcu, srcu_key);
904         *bytes_committed = 0;
905         return ret ? ret : npages;
906 }
907
908 /**
909  * Parse a series of data segments for page fault handling.
910  *
911  * @pfault contains page fault information.
912  * @wqe points at the first data segment in the WQE.
913  * @wqe_end points after the end of the WQE.
914  * @bytes_mapped receives the number of bytes that the function was able to
915  *               map. This allows the caller to decide intelligently whether
916  *               enough memory was mapped to resolve the page fault
917  *               successfully (e.g. enough for the next MTU, or the entire
918  *               WQE).
919  * @total_wqe_bytes receives the total data size of this WQE in bytes (minus
920  *                  the committed bytes).
921  *
922  * Returns the number of pages loaded if positive, zero for an empty WQE, or a
923  * negative error code.
924  */
925 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
926                                    struct mlx5_pagefault *pfault,
927                                    void *wqe,
928                                    void *wqe_end, u32 *bytes_mapped,
929                                    u32 *total_wqe_bytes, bool receive_queue)
930 {
931         int ret = 0, npages = 0;
932         u64 io_virt;
933         u32 key;
934         u32 byte_count;
935         size_t bcnt;
936         int inline_segment;
937
938         if (bytes_mapped)
939                 *bytes_mapped = 0;
940         if (total_wqe_bytes)
941                 *total_wqe_bytes = 0;
942
943         while (wqe < wqe_end) {
944                 struct mlx5_wqe_data_seg *dseg = wqe;
945
946                 io_virt = be64_to_cpu(dseg->addr);
947                 key = be32_to_cpu(dseg->lkey);
948                 byte_count = be32_to_cpu(dseg->byte_count);
949                 inline_segment = !!(byte_count &  MLX5_INLINE_SEG);
950                 bcnt           = byte_count & ~MLX5_INLINE_SEG;
951
952                 if (inline_segment) {
953                         bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
954                         wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
955                                      16);
956                 } else {
957                         wqe += sizeof(*dseg);
958                 }
959
960                 /* receive WQE end of sg list. */
961                 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
962                     io_virt == 0)
963                         break;
964
965                 if (!inline_segment && total_wqe_bytes) {
966                         *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
967                                         pfault->bytes_committed);
968                 }
969
970                 /* A zero length data segment designates a length of 2GB. */
971                 if (bcnt == 0)
972                         bcnt = 1U << 31;
973
974                 if (inline_segment || bcnt <= pfault->bytes_committed) {
975                         pfault->bytes_committed -=
976                                 min_t(size_t, bcnt,
977                                       pfault->bytes_committed);
978                         continue;
979                 }
980
981                 ret = pagefault_single_data_segment(dev, NULL, key,
982                                                     io_virt, bcnt,
983                                                     &pfault->bytes_committed,
984                                                     bytes_mapped, 0);
985                 if (ret < 0)
986                         break;
987                 npages += ret;
988         }
989
990         return ret < 0 ? ret : npages;
991 }
992
993 /*
994  * Parse initiator WQE. Advances the wqe pointer to point at the
995  * scatter-gather list, and set wqe_end to the end of the WQE.
996  */
997 static int mlx5_ib_mr_initiator_pfault_handler(
998         struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
999         struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1000 {
1001         struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1002         u16 wqe_index = pfault->wqe.wqe_index;
1003         struct mlx5_base_av *av;
1004         unsigned ds, opcode;
1005 #if defined(DEBUG)
1006         u32 ctrl_wqe_index, ctrl_qpn;
1007 #endif
1008         u32 qpn = qp->trans_qp.base.mqp.qpn;
1009
1010         ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1011         if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1012                 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1013                             ds, wqe_length);
1014                 return -EFAULT;
1015         }
1016
1017         if (ds == 0) {
1018                 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1019                             wqe_index, qpn);
1020                 return -EFAULT;
1021         }
1022
1023 #if defined(DEBUG)
1024         ctrl_wqe_index = (be32_to_cpu(ctrl->opmod_idx_opcode) &
1025                         MLX5_WQE_CTRL_WQE_INDEX_MASK) >>
1026                         MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
1027         if (wqe_index != ctrl_wqe_index) {
1028                 mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
1029                             wqe_index, qpn,
1030                             ctrl_wqe_index);
1031                 return -EFAULT;
1032         }
1033
1034         ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
1035                 MLX5_WQE_CTRL_QPN_SHIFT;
1036         if (qpn != ctrl_qpn) {
1037                 mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
1038                             wqe_index, qpn,
1039                             ctrl_qpn);
1040                 return -EFAULT;
1041         }
1042 #endif /* DEBUG */
1043
1044         *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1045         *wqe += sizeof(*ctrl);
1046
1047         opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1048                  MLX5_WQE_CTRL_OPCODE_MASK;
1049
1050         if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
1051                 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1052
1053         if (qp->ibqp.qp_type == IB_QPT_UD ||
1054             qp->qp_sub_type == MLX5_IB_QPT_DCI) {
1055                 av = *wqe;
1056                 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1057                         *wqe += sizeof(struct mlx5_av);
1058                 else
1059                         *wqe += sizeof(struct mlx5_base_av);
1060         }
1061
1062         switch (opcode) {
1063         case MLX5_OPCODE_RDMA_WRITE:
1064         case MLX5_OPCODE_RDMA_WRITE_IMM:
1065         case MLX5_OPCODE_RDMA_READ:
1066                 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1067                 break;
1068         case MLX5_OPCODE_ATOMIC_CS:
1069         case MLX5_OPCODE_ATOMIC_FA:
1070                 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1071                 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1072                 break;
1073         }
1074
1075         return 0;
1076 }
1077
1078 /*
1079  * Parse responder WQE and set wqe_end to the end of the WQE.
1080  */
1081 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1082                                                    struct mlx5_ib_srq *srq,
1083                                                    void **wqe, void **wqe_end,
1084                                                    int wqe_length)
1085 {
1086         int wqe_size = 1 << srq->msrq.wqe_shift;
1087
1088         if (wqe_size > wqe_length) {
1089                 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1090                 return -EFAULT;
1091         }
1092
1093         *wqe_end = *wqe + wqe_size;
1094         *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1095
1096         return 0;
1097 }
1098
1099 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1100                                                   struct mlx5_ib_qp *qp,
1101                                                   void *wqe, void **wqe_end,
1102                                                   int wqe_length)
1103 {
1104         struct mlx5_ib_wq *wq = &qp->rq;
1105         int wqe_size = 1 << wq->wqe_shift;
1106
1107         if (qp->wq_sig) {
1108                 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1109                 return -EFAULT;
1110         }
1111
1112         if (wqe_size > wqe_length) {
1113                 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1114                 return -EFAULT;
1115         }
1116
1117         *wqe_end = wqe + wqe_size;
1118
1119         return 0;
1120 }
1121
1122 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1123                                                        u32 wq_num, int pf_type)
1124 {
1125         struct mlx5_core_rsc_common *common = NULL;
1126         struct mlx5_core_srq *srq;
1127
1128         switch (pf_type) {
1129         case MLX5_WQE_PF_TYPE_RMP:
1130                 srq = mlx5_cmd_get_srq(dev, wq_num);
1131                 if (srq)
1132                         common = &srq->common;
1133                 break;
1134         case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1135         case MLX5_WQE_PF_TYPE_RESP:
1136         case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1137                 common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
1138                 break;
1139         default:
1140                 break;
1141         }
1142
1143         return common;
1144 }
1145
1146 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1147 {
1148         struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1149
1150         return to_mibqp(mqp);
1151 }
1152
1153 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1154 {
1155         struct mlx5_core_srq *msrq =
1156                 container_of(res, struct mlx5_core_srq, common);
1157
1158         return to_mibsrq(msrq);
1159 }
1160
1161 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1162                                           struct mlx5_pagefault *pfault)
1163 {
1164         bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1165         u16 wqe_index = pfault->wqe.wqe_index;
1166         void *wqe = NULL, *wqe_end = NULL;
1167         u32 bytes_mapped, total_wqe_bytes;
1168         struct mlx5_core_rsc_common *res;
1169         int resume_with_error = 1;
1170         struct mlx5_ib_qp *qp;
1171         size_t bytes_copied;
1172         int ret = 0;
1173
1174         res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1175         if (!res) {
1176                 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1177                 return;
1178         }
1179
1180         if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1181             res->res != MLX5_RES_XSRQ) {
1182                 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1183                             pfault->type);
1184                 goto resolve_page_fault;
1185         }
1186
1187         wqe = (void *)__get_free_page(GFP_KERNEL);
1188         if (!wqe) {
1189                 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1190                 goto resolve_page_fault;
1191         }
1192
1193         qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1194         if (qp && sq) {
1195                 ret = mlx5_ib_read_user_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1196                                                &bytes_copied);
1197                 if (ret)
1198                         goto read_user;
1199                 ret = mlx5_ib_mr_initiator_pfault_handler(
1200                         dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1201         } else if (qp && !sq) {
1202                 ret = mlx5_ib_read_user_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1203                                                &bytes_copied);
1204                 if (ret)
1205                         goto read_user;
1206                 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1207                         dev, qp, wqe, &wqe_end, bytes_copied);
1208         } else if (!qp) {
1209                 struct mlx5_ib_srq *srq = res_to_srq(res);
1210
1211                 ret = mlx5_ib_read_user_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1212                                                 &bytes_copied);
1213                 if (ret)
1214                         goto read_user;
1215                 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1216                         dev, srq, &wqe, &wqe_end, bytes_copied);
1217         }
1218
1219         if (ret < 0 || wqe >= wqe_end)
1220                 goto resolve_page_fault;
1221
1222         ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1223                                       &total_wqe_bytes, !sq);
1224         if (ret == -EAGAIN)
1225                 goto out;
1226
1227         if (ret < 0 || total_wqe_bytes > bytes_mapped)
1228                 goto resolve_page_fault;
1229
1230 out:
1231         ret = 0;
1232         resume_with_error = 0;
1233
1234 read_user:
1235         if (ret)
1236                 mlx5_ib_err(
1237                         dev,
1238                         "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1239                         ret, wqe_index, pfault->token);
1240
1241 resolve_page_fault:
1242         mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1243         mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1244                     pfault->wqe.wq_num, resume_with_error,
1245                     pfault->type);
1246         mlx5_core_res_put(res);
1247         free_page((unsigned long)wqe);
1248 }
1249
1250 static int pages_in_range(u64 address, u32 length)
1251 {
1252         return (ALIGN(address + length, PAGE_SIZE) -
1253                 (address & PAGE_MASK)) >> PAGE_SHIFT;
1254 }
1255
1256 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1257                                            struct mlx5_pagefault *pfault)
1258 {
1259         u64 address;
1260         u32 length;
1261         u32 prefetch_len = pfault->bytes_committed;
1262         int prefetch_activated = 0;
1263         u32 rkey = pfault->rdma.r_key;
1264         int ret;
1265
1266         /* The RDMA responder handler handles the page fault in two parts.
1267          * First it brings the necessary pages for the current packet
1268          * (and uses the pfault context), and then (after resuming the QP)
1269          * prefetches more pages. The second operation cannot use the pfault
1270          * context and therefore uses the dummy_pfault context allocated on
1271          * the stack */
1272         pfault->rdma.rdma_va += pfault->bytes_committed;
1273         pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1274                                          pfault->rdma.rdma_op_len);
1275         pfault->bytes_committed = 0;
1276
1277         address = pfault->rdma.rdma_va;
1278         length  = pfault->rdma.rdma_op_len;
1279
1280         /* For some operations, the hardware cannot tell the exact message
1281          * length, and in those cases it reports zero. Use prefetch
1282          * logic. */
1283         if (length == 0) {
1284                 prefetch_activated = 1;
1285                 length = pfault->rdma.packet_size;
1286                 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1287         }
1288
1289         ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1290                                             &pfault->bytes_committed, NULL,
1291                                             0);
1292         if (ret == -EAGAIN) {
1293                 /* We're racing with an invalidation, don't prefetch */
1294                 prefetch_activated = 0;
1295         } else if (ret < 0 || pages_in_range(address, length) > ret) {
1296                 mlx5_ib_page_fault_resume(dev, pfault, 1);
1297                 if (ret != -ENOENT)
1298                         mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1299                                     ret, pfault->token, pfault->type);
1300                 return;
1301         }
1302
1303         mlx5_ib_page_fault_resume(dev, pfault, 0);
1304         mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1305                     pfault->token, pfault->type,
1306                     prefetch_activated);
1307
1308         /* At this point, there might be a new pagefault already arriving in
1309          * the eq, switch to the dummy pagefault for the rest of the
1310          * processing. We're still OK with the objects being alive as the
1311          * work-queue is being fenced. */
1312
1313         if (prefetch_activated) {
1314                 u32 bytes_committed = 0;
1315
1316                 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1317                                                     prefetch_len,
1318                                                     &bytes_committed, NULL,
1319                                                     0);
1320                 if (ret < 0 && ret != -EAGAIN) {
1321                         mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1322                                     ret, pfault->token, address, prefetch_len);
1323                 }
1324         }
1325 }
1326
1327 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1328 {
1329         u8 event_subtype = pfault->event_subtype;
1330
1331         switch (event_subtype) {
1332         case MLX5_PFAULT_SUBTYPE_WQE:
1333                 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1334                 break;
1335         case MLX5_PFAULT_SUBTYPE_RDMA:
1336                 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1337                 break;
1338         default:
1339                 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1340                             event_subtype);
1341                 mlx5_ib_page_fault_resume(dev, pfault, 1);
1342         }
1343 }
1344
1345 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1346 {
1347         struct mlx5_pagefault *pfault = container_of(work,
1348                                                      struct mlx5_pagefault,
1349                                                      work);
1350         struct mlx5_ib_pf_eq *eq = pfault->eq;
1351
1352         mlx5_ib_pfault(eq->dev, pfault);
1353         mempool_free(pfault, eq->pool);
1354 }
1355
1356 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1357 {
1358         struct mlx5_eqe_page_fault *pf_eqe;
1359         struct mlx5_pagefault *pfault;
1360         struct mlx5_eqe *eqe;
1361         int cc = 0;
1362
1363         while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1364                 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1365                 if (!pfault) {
1366                         schedule_work(&eq->work);
1367                         break;
1368                 }
1369
1370                 pf_eqe = &eqe->data.page_fault;
1371                 pfault->event_subtype = eqe->sub_type;
1372                 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1373
1374                 mlx5_ib_dbg(eq->dev,
1375                             "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1376                             eqe->sub_type, pfault->bytes_committed);
1377
1378                 switch (eqe->sub_type) {
1379                 case MLX5_PFAULT_SUBTYPE_RDMA:
1380                         /* RDMA based event */
1381                         pfault->type =
1382                                 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1383                         pfault->token =
1384                                 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1385                                 MLX5_24BIT_MASK;
1386                         pfault->rdma.r_key =
1387                                 be32_to_cpu(pf_eqe->rdma.r_key);
1388                         pfault->rdma.packet_size =
1389                                 be16_to_cpu(pf_eqe->rdma.packet_length);
1390                         pfault->rdma.rdma_op_len =
1391                                 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1392                         pfault->rdma.rdma_va =
1393                                 be64_to_cpu(pf_eqe->rdma.rdma_va);
1394                         mlx5_ib_dbg(eq->dev,
1395                                     "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1396                                     pfault->type, pfault->token,
1397                                     pfault->rdma.r_key);
1398                         mlx5_ib_dbg(eq->dev,
1399                                     "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1400                                     pfault->rdma.rdma_op_len,
1401                                     pfault->rdma.rdma_va);
1402                         break;
1403
1404                 case MLX5_PFAULT_SUBTYPE_WQE:
1405                         /* WQE based event */
1406                         pfault->type =
1407                                 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1408                         pfault->token =
1409                                 be32_to_cpu(pf_eqe->wqe.token);
1410                         pfault->wqe.wq_num =
1411                                 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1412                                 MLX5_24BIT_MASK;
1413                         pfault->wqe.wqe_index =
1414                                 be16_to_cpu(pf_eqe->wqe.wqe_index);
1415                         pfault->wqe.packet_size =
1416                                 be16_to_cpu(pf_eqe->wqe.packet_length);
1417                         mlx5_ib_dbg(eq->dev,
1418                                     "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1419                                     pfault->type, pfault->token,
1420                                     pfault->wqe.wq_num,
1421                                     pfault->wqe.wqe_index);
1422                         break;
1423
1424                 default:
1425                         mlx5_ib_warn(eq->dev,
1426                                      "Unsupported page fault event sub-type: 0x%02hhx\n",
1427                                      eqe->sub_type);
1428                         /* Unsupported page faults should still be
1429                          * resolved by the page fault handler
1430                          */
1431                 }
1432
1433                 pfault->eq = eq;
1434                 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1435                 queue_work(eq->wq, &pfault->work);
1436
1437                 cc = mlx5_eq_update_cc(eq->core, ++cc);
1438         }
1439
1440         mlx5_eq_update_ci(eq->core, cc, 1);
1441 }
1442
1443 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1444                              void *data)
1445 {
1446         struct mlx5_ib_pf_eq *eq =
1447                 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1448         unsigned long flags;
1449
1450         if (spin_trylock_irqsave(&eq->lock, flags)) {
1451                 mlx5_ib_eq_pf_process(eq);
1452                 spin_unlock_irqrestore(&eq->lock, flags);
1453         } else {
1454                 schedule_work(&eq->work);
1455         }
1456
1457         return IRQ_HANDLED;
1458 }
1459
1460 /* mempool_refill() was proposed but unfortunately wasn't accepted
1461  * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1462  * Cheap workaround.
1463  */
1464 static void mempool_refill(mempool_t *pool)
1465 {
1466         while (pool->curr_nr < pool->min_nr)
1467                 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1468 }
1469
1470 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1471 {
1472         struct mlx5_ib_pf_eq *eq =
1473                 container_of(work, struct mlx5_ib_pf_eq, work);
1474
1475         mempool_refill(eq->pool);
1476
1477         spin_lock_irq(&eq->lock);
1478         mlx5_ib_eq_pf_process(eq);
1479         spin_unlock_irq(&eq->lock);
1480 }
1481
1482 enum {
1483         MLX5_IB_NUM_PF_EQE      = 0x1000,
1484         MLX5_IB_NUM_PF_DRAIN    = 64,
1485 };
1486
1487 static int
1488 mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1489 {
1490         struct mlx5_eq_param param = {};
1491         int err;
1492
1493         INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1494         spin_lock_init(&eq->lock);
1495         eq->dev = dev;
1496
1497         eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1498                                                sizeof(struct mlx5_pagefault));
1499         if (!eq->pool)
1500                 return -ENOMEM;
1501
1502         eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1503                                  WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1504                                  MLX5_NUM_CMD_EQE);
1505         if (!eq->wq) {
1506                 err = -ENOMEM;
1507                 goto err_mempool;
1508         }
1509
1510         eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1511         param = (struct mlx5_eq_param) {
1512                 .irq_index = 0,
1513                 .nent = MLX5_IB_NUM_PF_EQE,
1514         };
1515         param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1516         eq->core = mlx5_eq_create_generic(dev->mdev, &param);
1517         if (IS_ERR(eq->core)) {
1518                 err = PTR_ERR(eq->core);
1519                 goto err_wq;
1520         }
1521         err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1522         if (err) {
1523                 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1524                 goto err_eq;
1525         }
1526
1527         return 0;
1528 err_eq:
1529         mlx5_eq_destroy_generic(dev->mdev, eq->core);
1530 err_wq:
1531         destroy_workqueue(eq->wq);
1532 err_mempool:
1533         mempool_destroy(eq->pool);
1534         return err;
1535 }
1536
1537 static int
1538 mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1539 {
1540         int err;
1541
1542         mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1543         err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1544         cancel_work_sync(&eq->work);
1545         destroy_workqueue(eq->wq);
1546         mempool_destroy(eq->pool);
1547
1548         return err;
1549 }
1550
1551 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1552 {
1553         if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1554                 return;
1555
1556         switch (ent->order - 2) {
1557         case MLX5_IMR_MTT_CACHE_ENTRY:
1558                 ent->page = PAGE_SHIFT;
1559                 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1560                            sizeof(struct mlx5_mtt) /
1561                            MLX5_IB_UMR_OCTOWORD;
1562                 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1563                 ent->limit = 0;
1564                 break;
1565
1566         case MLX5_IMR_KSM_CACHE_ENTRY:
1567                 ent->page = MLX5_KSM_PAGE_SHIFT;
1568                 ent->xlt = mlx5_imr_ksm_entries *
1569                            sizeof(struct mlx5_klm) /
1570                            MLX5_IB_UMR_OCTOWORD;
1571                 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1572                 ent->limit = 0;
1573                 break;
1574         }
1575 }
1576
1577 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1578         .advise_mr = mlx5_ib_advise_mr,
1579 };
1580
1581 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1582 {
1583         int ret = 0;
1584
1585         if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
1586                 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1587
1588         if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1589                 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1590                 if (ret) {
1591                         mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1592                         return ret;
1593                 }
1594         }
1595
1596         if (!MLX5_CAP_GEN(dev->mdev, pg))
1597                 return ret;
1598
1599         ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
1600
1601         return ret;
1602 }
1603
1604 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1605 {
1606         if (!MLX5_CAP_GEN(dev->mdev, pg))
1607                 return;
1608
1609         mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
1610 }
1611
1612 int mlx5_ib_odp_init(void)
1613 {
1614         mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1615                                        MLX5_IMR_MTT_BITS);
1616
1617         return 0;
1618 }
1619
1620 struct prefetch_mr_work {
1621         struct work_struct work;
1622         struct ib_pd *pd;
1623         u32 pf_flags;
1624         u32 num_sge;
1625         struct ib_sge sg_list[0];
1626 };
1627
1628 static void num_pending_prefetch_dec(struct mlx5_ib_dev *dev,
1629                                      struct ib_sge *sg_list, u32 num_sge,
1630                                      u32 from)
1631 {
1632         u32 i;
1633         int srcu_key;
1634
1635         srcu_key = srcu_read_lock(&dev->mr_srcu);
1636
1637         for (i = from; i < num_sge; ++i) {
1638                 struct mlx5_core_mkey *mmkey;
1639                 struct mlx5_ib_mr *mr;
1640
1641                 mmkey = xa_load(&dev->mdev->priv.mkey_table,
1642                                 mlx5_base_mkey(sg_list[i].lkey));
1643                 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1644                 atomic_dec(&mr->num_pending_prefetch);
1645         }
1646
1647         srcu_read_unlock(&dev->mr_srcu, srcu_key);
1648 }
1649
1650 static bool num_pending_prefetch_inc(struct ib_pd *pd,
1651                                      struct ib_sge *sg_list, u32 num_sge)
1652 {
1653         struct mlx5_ib_dev *dev = to_mdev(pd->device);
1654         bool ret = true;
1655         u32 i;
1656
1657         for (i = 0; i < num_sge; ++i) {
1658                 struct mlx5_core_mkey *mmkey;
1659                 struct mlx5_ib_mr *mr;
1660
1661                 mmkey = xa_load(&dev->mdev->priv.mkey_table,
1662                                 mlx5_base_mkey(sg_list[i].lkey));
1663                 if (!mmkey || mmkey->key != sg_list[i].lkey) {
1664                         ret = false;
1665                         break;
1666                 }
1667
1668                 if (mmkey->type != MLX5_MKEY_MR) {
1669                         ret = false;
1670                         break;
1671                 }
1672
1673                 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1674
1675                 if (mr->ibmr.pd != pd) {
1676                         ret = false;
1677                         break;
1678                 }
1679
1680                 if (!mr->live) {
1681                         ret = false;
1682                         break;
1683                 }
1684
1685                 atomic_inc(&mr->num_pending_prefetch);
1686         }
1687
1688         if (!ret)
1689                 num_pending_prefetch_dec(dev, sg_list, i, 0);
1690
1691         return ret;
1692 }
1693
1694 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, u32 pf_flags,
1695                                     struct ib_sge *sg_list, u32 num_sge)
1696 {
1697         u32 i;
1698         int ret = 0;
1699         struct mlx5_ib_dev *dev = to_mdev(pd->device);
1700
1701         for (i = 0; i < num_sge; ++i) {
1702                 struct ib_sge *sg = &sg_list[i];
1703                 int bytes_committed = 0;
1704
1705                 ret = pagefault_single_data_segment(dev, pd, sg->lkey, sg->addr,
1706                                                     sg->length,
1707                                                     &bytes_committed, NULL,
1708                                                     pf_flags);
1709                 if (ret < 0)
1710                         break;
1711         }
1712
1713         return ret < 0 ? ret : 0;
1714 }
1715
1716 static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
1717 {
1718         struct prefetch_mr_work *w =
1719                 container_of(work, struct prefetch_mr_work, work);
1720
1721         if (ib_device_try_get(w->pd->device)) {
1722                 mlx5_ib_prefetch_sg_list(w->pd, w->pf_flags, w->sg_list,
1723                                          w->num_sge);
1724                 ib_device_put(w->pd->device);
1725         }
1726
1727         num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
1728                                  w->num_sge, 0);
1729         kfree(w);
1730 }
1731
1732 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1733                                enum ib_uverbs_advise_mr_advice advice,
1734                                u32 flags, struct ib_sge *sg_list, u32 num_sge)
1735 {
1736         struct mlx5_ib_dev *dev = to_mdev(pd->device);
1737         u32 pf_flags = MLX5_PF_FLAGS_PREFETCH;
1738         struct prefetch_mr_work *work;
1739         bool valid_req;
1740         int srcu_key;
1741
1742         if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1743                 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1744
1745         if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1746                 return mlx5_ib_prefetch_sg_list(pd, pf_flags, sg_list,
1747                                                 num_sge);
1748
1749         work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL);
1750         if (!work)
1751                 return -ENOMEM;
1752
1753         memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge));
1754
1755         /* It is guaranteed that the pd when work is executed is the pd when
1756          * work was queued since pd can't be destroyed while it holds MRs and
1757          * destroying a MR leads to flushing the workquque
1758          */
1759         work->pd = pd;
1760         work->pf_flags = pf_flags;
1761         work->num_sge = num_sge;
1762
1763         INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1764
1765         srcu_key = srcu_read_lock(&dev->mr_srcu);
1766
1767         valid_req = num_pending_prefetch_inc(pd, sg_list, num_sge);
1768         if (valid_req)
1769                 queue_work(system_unbound_wq, &work->work);
1770         else
1771                 kfree(work);
1772
1773         srcu_read_unlock(&dev->mr_srcu, srcu_key);
1774
1775         return valid_req ? 0 : -EINVAL;
1776 }