2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_umem.h>
34 #include <rdma/ib_umem_odp.h>
35 #include <linux/kernel.h>
36 #include <linux/dma-buf.h>
37 #include <linux/dma-resv.h>
43 #include <linux/mlx5/eq.h>
45 /* Contains the details of a pagefault. */
46 struct mlx5_pagefault {
52 /* Initiator or send message responder pagefault details. */
54 /* Received packet size, only valid for responders. */
57 * Number of resource holding WQE, depends on type.
61 * WQE index. Refers to either the send queue or
62 * receive queue, according to event_subtype.
66 /* RDMA responder pagefault details */
70 * Received packet size, minimal size page fault
71 * resolution required for forward progress.
79 struct mlx5_ib_pf_eq *eq;
80 struct work_struct work;
83 #define MAX_PREFETCH_LEN (4*1024*1024U)
85 /* Timeout in ms to wait for an active mmu notifier to complete when handling
87 #define MMU_NOTIFIER_TIMEOUT 1000
89 #define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
90 #define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
91 #define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
92 #define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
93 #define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
95 #define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
97 static u64 mlx5_imr_ksm_entries;
99 static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
100 struct mlx5_ib_mr *imr, int flags)
102 struct mlx5_klm *end = pklm + nentries;
104 if (flags & MLX5_IB_UPD_XLT_ZAP) {
105 for (; pklm != end; pklm++, idx++) {
106 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
107 pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
114 * The locking here is pretty subtle. Ideally the implicit_children
115 * xarray would be protected by the umem_mutex, however that is not
116 * possible. Instead this uses a weaker update-then-lock pattern:
119 * mutex_lock(umem_mutex)
120 * mlx5_ib_update_xlt()
121 * mutex_unlock(umem_mutex)
124 * ie any change the xarray must be followed by the locked update_xlt
127 * The umem_mutex provides the acquire/release semantic needed to make
128 * the xa_store() visible to a racing thread.
130 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
132 for (; pklm != end; pklm++, idx++) {
133 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
135 pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
137 pklm->key = cpu_to_be32(mtt->ibmr.lkey);
138 pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
140 pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
146 static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
148 u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
150 if (umem_dma & ODP_READ_ALLOWED_BIT)
151 mtt_entry |= MLX5_IB_MTT_READ;
152 if (umem_dma & ODP_WRITE_ALLOWED_BIT)
153 mtt_entry |= MLX5_IB_MTT_WRITE;
158 static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
159 struct mlx5_ib_mr *mr, int flags)
161 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
165 if (flags & MLX5_IB_UPD_XLT_ZAP)
168 for (i = 0; i < nentries; i++) {
169 pa = odp->dma_list[idx + i];
170 pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
174 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
175 struct mlx5_ib_mr *mr, int flags)
177 if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
178 populate_klm(xlt, idx, nentries, mr, flags);
180 populate_mtt(xlt, idx, nentries, mr, flags);
185 * This must be called after the mr has been removed from implicit_children.
186 * NOTE: The MR does not necessarily have to be
187 * empty here, parallel page faults could have raced with the free process and
190 static void free_implicit_child_mr_work(struct work_struct *work)
192 struct mlx5_ib_mr *mr =
193 container_of(work, struct mlx5_ib_mr, odp_destroy.work);
194 struct mlx5_ib_mr *imr = mr->parent;
195 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
196 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
198 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
200 mutex_lock(&odp_imr->umem_mutex);
201 mlx5_ib_update_xlt(mr->parent, ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT,
203 MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
204 mutex_unlock(&odp_imr->umem_mutex);
205 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
207 mlx5r_deref_odp_mkey(&imr->mmkey);
210 static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
212 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
213 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
214 struct mlx5_ib_mr *imr = mr->parent;
216 if (!refcount_inc_not_zero(&imr->mmkey.usecount))
219 xa_erase(&imr->implicit_children, idx);
221 /* Freeing a MR is a sleeping operation, so bounce to a work queue */
222 INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
223 queue_work(system_unbound_wq, &mr->odp_destroy.work);
226 static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
227 const struct mmu_notifier_range *range,
228 unsigned long cur_seq)
230 struct ib_umem_odp *umem_odp =
231 container_of(mni, struct ib_umem_odp, notifier);
232 struct mlx5_ib_mr *mr;
233 const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
234 sizeof(struct mlx5_mtt)) - 1;
235 u64 idx = 0, blk_start_idx = 0;
236 u64 invalidations = 0;
242 if (!mmu_notifier_range_blockable(range))
245 mutex_lock(&umem_odp->umem_mutex);
246 mmu_interval_set_seq(mni, cur_seq);
248 * If npages is zero then umem_odp->private may not be setup yet. This
249 * does not complete until after the first page is mapped for DMA.
251 if (!umem_odp->npages)
253 mr = umem_odp->private;
255 start = max_t(u64, ib_umem_start(umem_odp), range->start);
256 end = min_t(u64, ib_umem_end(umem_odp), range->end);
259 * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
260 * while we are doing the invalidation, no page fault will attempt to
261 * overwrite the same MTTs. Concurent invalidations might race us,
262 * but they will write 0s as well, so no difference in the end result.
264 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
265 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
267 * Strive to write the MTTs in chunks, but avoid overwriting
268 * non-existing MTTs. The huristic here can be improved to
269 * estimate the cost of another UMR vs. the cost of bigger
272 if (umem_odp->dma_list[idx] &
273 (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
279 /* Count page invalidations */
280 invalidations += idx - blk_start_idx + 1;
282 u64 umr_offset = idx & umr_block_mask;
284 if (in_block && umr_offset == 0) {
285 mlx5_ib_update_xlt(mr, blk_start_idx,
286 idx - blk_start_idx, 0,
287 MLX5_IB_UPD_XLT_ZAP |
288 MLX5_IB_UPD_XLT_ATOMIC);
294 mlx5_ib_update_xlt(mr, blk_start_idx,
295 idx - blk_start_idx + 1, 0,
296 MLX5_IB_UPD_XLT_ZAP |
297 MLX5_IB_UPD_XLT_ATOMIC);
299 mlx5_update_odp_stats(mr, invalidations, invalidations);
302 * We are now sure that the device will not access the
303 * memory. We can safely unmap it, and mark it as dirty if
307 ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
309 if (unlikely(!umem_odp->npages && mr->parent))
310 destroy_unused_implicit_child_mr(mr);
312 mutex_unlock(&umem_odp->umem_mutex);
316 const struct mmu_interval_notifier_ops mlx5_mn_ops = {
317 .invalidate = mlx5_ib_invalidate_range,
320 static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
322 struct ib_odp_caps *caps = &dev->odp_caps;
324 memset(caps, 0, sizeof(*caps));
326 if (!MLX5_CAP_GEN(dev->mdev, pg) ||
327 !mlx5_ib_can_load_pas_with_umr(dev, 0))
330 caps->general_caps = IB_ODP_SUPPORT;
332 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
333 dev->odp_max_size = U64_MAX;
335 dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
337 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
338 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
340 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
341 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
343 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
344 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
346 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
347 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
349 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
350 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
352 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
353 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
355 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
356 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
358 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
359 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
361 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
362 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
364 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
365 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
367 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
368 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
370 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
371 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
373 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
374 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
376 if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
377 caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
379 if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
380 MLX5_CAP_GEN(dev->mdev, null_mkey) &&
381 MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
382 !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
383 caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
386 static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
387 struct mlx5_pagefault *pfault,
390 int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
391 pfault->wqe.wq_num : pfault->token;
392 u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
395 MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
396 MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
397 MLX5_SET(page_fault_resume_in, in, token, pfault->token);
398 MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
399 MLX5_SET(page_fault_resume_in, in, error, !!error);
401 err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
403 mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
407 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
410 struct ib_umem_odp *odp;
411 struct mlx5_ib_mr *mr;
412 struct mlx5_ib_mr *ret;
415 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
416 idx * MLX5_IMR_MTT_SIZE,
417 MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
419 return ERR_CAST(odp);
421 mr = mlx5_mr_cache_alloc(
422 mr_to_mdev(imr), MLX5_IMR_MTT_CACHE_ENTRY, imr->access_flags);
424 ib_umem_odp_release(odp);
428 mr->ibmr.pd = imr->ibmr.pd;
429 mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
430 mr->umem = &odp->umem;
431 mr->ibmr.lkey = mr->mmkey.key;
432 mr->ibmr.rkey = mr->mmkey.key;
433 mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
438 * First refcount is owned by the xarray and second refconut
439 * is returned to the caller.
441 refcount_set(&mr->mmkey.usecount, 2);
443 err = mlx5_ib_update_xlt(mr, 0,
444 MLX5_IMR_MTT_ENTRIES,
446 MLX5_IB_UPD_XLT_ZAP |
447 MLX5_IB_UPD_XLT_ENABLE);
453 xa_lock(&imr->implicit_children);
454 ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
457 if (xa_is_err(ret)) {
458 ret = ERR_PTR(xa_err(ret));
462 * Another thread beat us to creating the child mr, use
465 refcount_inc(&ret->mmkey.usecount);
468 xa_unlock(&imr->implicit_children);
470 mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
474 xa_unlock(&imr->implicit_children);
476 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
480 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
483 struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
484 struct ib_umem_odp *umem_odp;
485 struct mlx5_ib_mr *imr;
488 if (!mlx5_ib_can_load_pas_with_umr(dev,
489 MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
490 return ERR_PTR(-EOPNOTSUPP);
492 umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
493 if (IS_ERR(umem_odp))
494 return ERR_CAST(umem_odp);
496 imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
498 ib_umem_odp_release(umem_odp);
502 imr->ibmr.pd = &pd->ibpd;
504 imr->umem = &umem_odp->umem;
505 imr->ibmr.lkey = imr->mmkey.key;
506 imr->ibmr.rkey = imr->mmkey.key;
507 imr->ibmr.device = &dev->ib_dev;
508 imr->umem = &umem_odp->umem;
509 imr->is_odp_implicit = true;
510 xa_init(&imr->implicit_children);
512 err = mlx5_ib_update_xlt(imr, 0,
513 mlx5_imr_ksm_entries,
515 MLX5_IB_UPD_XLT_INDIRECT |
516 MLX5_IB_UPD_XLT_ZAP |
517 MLX5_IB_UPD_XLT_ENABLE);
521 err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
525 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
528 mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
529 mlx5_ib_dereg_mr(&imr->ibmr, NULL);
533 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
535 struct mlx5_ib_mr *mtt;
539 * If this is an implicit MR it is already invalidated so we can just
540 * delete the children mkeys.
542 xa_for_each(&mr->implicit_children, idx, mtt) {
543 xa_erase(&mr->implicit_children, idx);
544 mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
548 #define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
549 #define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
550 #define MLX5_PF_FLAGS_ENABLE BIT(3)
551 static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
552 u64 user_va, size_t bcnt, u32 *bytes_mapped,
555 int page_shift, ret, np;
556 bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
559 bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
560 u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
562 if (flags & MLX5_PF_FLAGS_ENABLE)
563 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
565 page_shift = odp->page_shift;
566 start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
567 access_mask = ODP_READ_ALLOWED_BIT;
569 if (odp->umem.writable && !downgrade)
570 access_mask |= ODP_WRITE_ALLOWED_BIT;
572 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
577 * No need to check whether the MTTs really belong to this MR, since
578 * ib_umem_odp_map_dma_and_lock already checks this.
580 ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
581 mutex_unlock(&odp->umem_mutex);
585 mlx5_ib_err(mr_to_mdev(mr),
586 "Failed to update mkey page tables\n");
591 u32 new_mappings = (np << page_shift) -
592 (user_va - round_down(user_va, 1 << page_shift));
594 *bytes_mapped += min_t(u32, new_mappings, bcnt);
597 return np << (page_shift - PAGE_SHIFT);
603 static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
604 struct ib_umem_odp *odp_imr, u64 user_va,
605 size_t bcnt, u32 *bytes_mapped, u32 flags)
607 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
608 unsigned long upd_start_idx = end_idx + 1;
609 unsigned long upd_len = 0;
610 unsigned long npages = 0;
614 if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
615 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
618 /* Fault each child mr that intersects with our interval. */
620 unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
621 struct ib_umem_odp *umem_odp;
622 struct mlx5_ib_mr *mtt;
625 xa_lock(&imr->implicit_children);
626 mtt = xa_load(&imr->implicit_children, idx);
627 if (unlikely(!mtt)) {
628 xa_unlock(&imr->implicit_children);
629 mtt = implicit_get_child_mr(imr, idx);
634 upd_start_idx = min(upd_start_idx, idx);
635 upd_len = idx - upd_start_idx + 1;
637 refcount_inc(&mtt->mmkey.usecount);
638 xa_unlock(&imr->implicit_children);
641 umem_odp = to_ib_umem_odp(mtt->umem);
642 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
645 ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
646 bytes_mapped, flags);
648 mlx5r_deref_odp_mkey(&mtt->mmkey);
660 * Any time the implicit_children are changed we must perform an
661 * update of the xlt before exiting to ensure the HW and the
662 * implicit_children remains synchronized.
665 if (likely(!upd_len))
669 * Notice this is not strictly ordered right, the KSM is updated after
670 * the implicit_children is updated, so a parallel page fault could
671 * see a MR that is not yet visible in the KSM. This is similar to a
672 * parallel page fault seeing a MR that is being concurrently removed
673 * from the KSM. Both of these improbable situations are resolved
674 * safely by resuming the HW and then taking another page fault. The
675 * next pagefault handler will see the new information.
677 mutex_lock(&odp_imr->umem_mutex);
678 err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
679 MLX5_IB_UPD_XLT_INDIRECT |
680 MLX5_IB_UPD_XLT_ATOMIC);
681 mutex_unlock(&odp_imr->umem_mutex);
683 mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
689 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
690 u32 *bytes_mapped, u32 flags)
692 struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
695 unsigned int page_size;
697 if (flags & MLX5_PF_FLAGS_ENABLE)
698 xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
700 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
701 err = ib_umem_dmabuf_map_pages(umem_dmabuf);
703 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
707 page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
709 umem_dmabuf->umem.iova);
710 if (unlikely(page_size < PAGE_SIZE)) {
711 ib_umem_dmabuf_unmap_pages(umem_dmabuf);
714 err = mlx5_ib_update_mr_pas(mr, xlt_flags);
716 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
722 *bytes_mapped += bcnt;
724 return ib_umem_num_pages(mr->umem);
729 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
730 * not accessible, or the MR is no longer valid.
731 * -EAGAIN/-ENOMEM: The operation should be retried
733 * -EINVAL/others: General internal malfunction
734 * >0: Number of pages mapped
736 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
737 u32 *bytes_mapped, u32 flags)
739 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
741 if (unlikely(io_virt < mr->ibmr.iova))
744 if (mr->umem->is_dmabuf)
745 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
747 if (!odp->is_implicit_odp) {
750 if (check_add_overflow(io_virt - mr->ibmr.iova,
751 (u64)odp->umem.address, &user_va))
753 if (unlikely(user_va >= ib_umem_end(odp) ||
754 ib_umem_end(odp) - user_va < bcnt))
756 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
759 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
763 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
767 ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
768 mr->umem->length, NULL,
769 MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
770 return ret >= 0 ? 0 : ret;
773 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
777 ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
778 MLX5_PF_FLAGS_ENABLE);
780 return ret >= 0 ? 0 : ret;
784 struct pf_frame *next;
791 static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
795 if (mmkey->type == MLX5_MKEY_MW)
796 return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
797 return mmkey->key == key;
801 * Handle a single data segment in a page-fault WQE or RDMA region.
803 * Returns number of OS pages retrieved on success. The caller may continue to
804 * the next data segment.
805 * Can return the following error codes:
806 * -EAGAIN to designate a temporary error. The caller will abort handling the
807 * page fault and resolve it.
808 * -EFAULT when there's an error mapping the requested pages. The caller will
809 * abort the page fault handling.
811 static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
812 struct ib_pd *pd, u32 key,
813 u64 io_virt, size_t bcnt,
814 u32 *bytes_committed,
817 int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
818 struct pf_frame *head = NULL, *frame;
819 struct mlx5_ib_mkey *mmkey;
820 struct mlx5_ib_mr *mr;
821 struct mlx5_klm *pklm;
825 io_virt += *bytes_committed;
826 bcnt -= *bytes_committed;
829 xa_lock(&dev->odp_mkeys);
830 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
832 xa_unlock(&dev->odp_mkeys);
835 "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
838 *bytes_mapped += bcnt;
840 * The user could specify a SGL with multiple lkeys and only
841 * some of them are ODP. Treat the non-ODP ones as fully
847 refcount_inc(&mmkey->usecount);
848 xa_unlock(&dev->odp_mkeys);
850 if (!mkey_is_eq(mmkey, key)) {
851 mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
856 switch (mmkey->type) {
858 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
860 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
864 mlx5_update_odp_stats(mr, faults, ret);
871 case MLX5_MKEY_INDIRECT_DEVX:
872 if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
873 mlx5_ib_dbg(dev, "indirection level exceeded\n");
878 outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
879 sizeof(*pklm) * (mmkey->ndescs - 2);
881 if (outlen > cur_outlen) {
883 out = kzalloc(outlen, GFP_KERNEL);
891 pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
892 bsf0_klm0_pas_mtt0_1);
894 ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
898 offset = io_virt - MLX5_GET64(query_mkey_out, out,
899 memory_key_mkey_entry.start_addr);
901 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
902 if (offset >= be32_to_cpu(pklm->bcount)) {
903 offset -= be32_to_cpu(pklm->bcount);
907 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
913 frame->key = be32_to_cpu(pklm->key);
914 frame->io_virt = be64_to_cpu(pklm->va) + offset;
915 frame->bcnt = min_t(size_t, bcnt,
916 be32_to_cpu(pklm->bcount) - offset);
917 frame->depth = depth + 1;
927 mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
937 io_virt = frame->io_virt;
939 depth = frame->depth;
942 mlx5r_deref_odp_mkey(mmkey);
948 mlx5r_deref_odp_mkey(mmkey);
956 *bytes_committed = 0;
957 return ret ? ret : npages;
961 * Parse a series of data segments for page fault handling.
963 * @dev: Pointer to mlx5 IB device
964 * @pfault: contains page fault information.
965 * @wqe: points at the first data segment in the WQE.
966 * @wqe_end: points after the end of the WQE.
967 * @bytes_mapped: receives the number of bytes that the function was able to
968 * map. This allows the caller to decide intelligently whether
969 * enough memory was mapped to resolve the page fault
970 * successfully (e.g. enough for the next MTU, or the entire
972 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
973 * the committed bytes).
974 * @receive_queue: receive WQE end of sg list
976 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
977 * negative error code.
979 static int pagefault_data_segments(struct mlx5_ib_dev *dev,
980 struct mlx5_pagefault *pfault,
982 void *wqe_end, u32 *bytes_mapped,
983 u32 *total_wqe_bytes, bool receive_queue)
985 int ret = 0, npages = 0;
995 *total_wqe_bytes = 0;
997 while (wqe < wqe_end) {
998 struct mlx5_wqe_data_seg *dseg = wqe;
1000 io_virt = be64_to_cpu(dseg->addr);
1001 key = be32_to_cpu(dseg->lkey);
1002 byte_count = be32_to_cpu(dseg->byte_count);
1003 inline_segment = !!(byte_count & MLX5_INLINE_SEG);
1004 bcnt = byte_count & ~MLX5_INLINE_SEG;
1006 if (inline_segment) {
1007 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1008 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1011 wqe += sizeof(*dseg);
1014 /* receive WQE end of sg list. */
1015 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
1019 if (!inline_segment && total_wqe_bytes) {
1020 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1021 pfault->bytes_committed);
1024 /* A zero length data segment designates a length of 2GB. */
1028 if (inline_segment || bcnt <= pfault->bytes_committed) {
1029 pfault->bytes_committed -=
1031 pfault->bytes_committed);
1035 ret = pagefault_single_data_segment(dev, NULL, key,
1037 &pfault->bytes_committed,
1044 return ret < 0 ? ret : npages;
1048 * Parse initiator WQE. Advances the wqe pointer to point at the
1049 * scatter-gather list, and set wqe_end to the end of the WQE.
1051 static int mlx5_ib_mr_initiator_pfault_handler(
1052 struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
1053 struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
1055 struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
1056 u16 wqe_index = pfault->wqe.wqe_index;
1057 struct mlx5_base_av *av;
1058 unsigned ds, opcode;
1059 u32 qpn = qp->trans_qp.base.mqp.qpn;
1061 ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
1062 if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
1063 mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
1069 mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
1074 *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
1075 *wqe += sizeof(*ctrl);
1077 opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
1078 MLX5_WQE_CTRL_OPCODE_MASK;
1080 if (qp->type == IB_QPT_XRC_INI)
1081 *wqe += sizeof(struct mlx5_wqe_xrc_seg);
1083 if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
1085 if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
1086 *wqe += sizeof(struct mlx5_av);
1088 *wqe += sizeof(struct mlx5_base_av);
1092 case MLX5_OPCODE_RDMA_WRITE:
1093 case MLX5_OPCODE_RDMA_WRITE_IMM:
1094 case MLX5_OPCODE_RDMA_READ:
1095 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1097 case MLX5_OPCODE_ATOMIC_CS:
1098 case MLX5_OPCODE_ATOMIC_FA:
1099 *wqe += sizeof(struct mlx5_wqe_raddr_seg);
1100 *wqe += sizeof(struct mlx5_wqe_atomic_seg);
1108 * Parse responder WQE and set wqe_end to the end of the WQE.
1110 static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
1111 struct mlx5_ib_srq *srq,
1112 void **wqe, void **wqe_end,
1115 int wqe_size = 1 << srq->msrq.wqe_shift;
1117 if (wqe_size > wqe_length) {
1118 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1122 *wqe_end = *wqe + wqe_size;
1123 *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
1128 static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
1129 struct mlx5_ib_qp *qp,
1130 void *wqe, void **wqe_end,
1133 struct mlx5_ib_wq *wq = &qp->rq;
1134 int wqe_size = 1 << wq->wqe_shift;
1136 if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
1137 mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
1141 if (wqe_size > wqe_length) {
1142 mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
1146 *wqe_end = wqe + wqe_size;
1151 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
1152 u32 wq_num, int pf_type)
1154 struct mlx5_core_rsc_common *common = NULL;
1155 struct mlx5_core_srq *srq;
1158 case MLX5_WQE_PF_TYPE_RMP:
1159 srq = mlx5_cmd_get_srq(dev, wq_num);
1161 common = &srq->common;
1163 case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
1164 case MLX5_WQE_PF_TYPE_RESP:
1165 case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
1166 common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
1175 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
1177 struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
1179 return to_mibqp(mqp);
1182 static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
1184 struct mlx5_core_srq *msrq =
1185 container_of(res, struct mlx5_core_srq, common);
1187 return to_mibsrq(msrq);
1190 static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
1191 struct mlx5_pagefault *pfault)
1193 bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
1194 u16 wqe_index = pfault->wqe.wqe_index;
1195 void *wqe, *wqe_start = NULL, *wqe_end = NULL;
1196 u32 bytes_mapped, total_wqe_bytes;
1197 struct mlx5_core_rsc_common *res;
1198 int resume_with_error = 1;
1199 struct mlx5_ib_qp *qp;
1200 size_t bytes_copied;
1203 res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
1205 mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
1209 if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
1210 res->res != MLX5_RES_XSRQ) {
1211 mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
1213 goto resolve_page_fault;
1216 wqe_start = (void *)__get_free_page(GFP_KERNEL);
1218 mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
1219 goto resolve_page_fault;
1223 qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
1225 ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
1229 ret = mlx5_ib_mr_initiator_pfault_handler(
1230 dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
1231 } else if (qp && !sq) {
1232 ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
1236 ret = mlx5_ib_mr_responder_pfault_handler_rq(
1237 dev, qp, wqe, &wqe_end, bytes_copied);
1239 struct mlx5_ib_srq *srq = res_to_srq(res);
1241 ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
1245 ret = mlx5_ib_mr_responder_pfault_handler_srq(
1246 dev, srq, &wqe, &wqe_end, bytes_copied);
1249 if (ret < 0 || wqe >= wqe_end)
1250 goto resolve_page_fault;
1252 ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
1253 &total_wqe_bytes, !sq);
1257 if (ret < 0 || total_wqe_bytes > bytes_mapped)
1258 goto resolve_page_fault;
1262 resume_with_error = 0;
1268 "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
1269 ret, wqe_index, pfault->token);
1272 mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
1273 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
1274 pfault->wqe.wq_num, resume_with_error,
1276 mlx5_core_res_put(res);
1277 free_page((unsigned long)wqe_start);
1280 static int pages_in_range(u64 address, u32 length)
1282 return (ALIGN(address + length, PAGE_SIZE) -
1283 (address & PAGE_MASK)) >> PAGE_SHIFT;
1286 static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
1287 struct mlx5_pagefault *pfault)
1291 u32 prefetch_len = pfault->bytes_committed;
1292 int prefetch_activated = 0;
1293 u32 rkey = pfault->rdma.r_key;
1296 /* The RDMA responder handler handles the page fault in two parts.
1297 * First it brings the necessary pages for the current packet
1298 * (and uses the pfault context), and then (after resuming the QP)
1299 * prefetches more pages. The second operation cannot use the pfault
1300 * context and therefore uses the dummy_pfault context allocated on
1302 pfault->rdma.rdma_va += pfault->bytes_committed;
1303 pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
1304 pfault->rdma.rdma_op_len);
1305 pfault->bytes_committed = 0;
1307 address = pfault->rdma.rdma_va;
1308 length = pfault->rdma.rdma_op_len;
1310 /* For some operations, the hardware cannot tell the exact message
1311 * length, and in those cases it reports zero. Use prefetch
1314 prefetch_activated = 1;
1315 length = pfault->rdma.packet_size;
1316 prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
1319 ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
1320 &pfault->bytes_committed, NULL);
1321 if (ret == -EAGAIN) {
1322 /* We're racing with an invalidation, don't prefetch */
1323 prefetch_activated = 0;
1324 } else if (ret < 0 || pages_in_range(address, length) > ret) {
1325 mlx5_ib_page_fault_resume(dev, pfault, 1);
1327 mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
1328 ret, pfault->token, pfault->type);
1332 mlx5_ib_page_fault_resume(dev, pfault, 0);
1333 mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
1334 pfault->token, pfault->type,
1335 prefetch_activated);
1337 /* At this point, there might be a new pagefault already arriving in
1338 * the eq, switch to the dummy pagefault for the rest of the
1339 * processing. We're still OK with the objects being alive as the
1340 * work-queue is being fenced. */
1342 if (prefetch_activated) {
1343 u32 bytes_committed = 0;
1345 ret = pagefault_single_data_segment(dev, NULL, rkey, address,
1347 &bytes_committed, NULL);
1348 if (ret < 0 && ret != -EAGAIN) {
1349 mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
1350 ret, pfault->token, address, prefetch_len);
1355 static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
1357 u8 event_subtype = pfault->event_subtype;
1359 switch (event_subtype) {
1360 case MLX5_PFAULT_SUBTYPE_WQE:
1361 mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
1363 case MLX5_PFAULT_SUBTYPE_RDMA:
1364 mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
1367 mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
1369 mlx5_ib_page_fault_resume(dev, pfault, 1);
1373 static void mlx5_ib_eqe_pf_action(struct work_struct *work)
1375 struct mlx5_pagefault *pfault = container_of(work,
1376 struct mlx5_pagefault,
1378 struct mlx5_ib_pf_eq *eq = pfault->eq;
1380 mlx5_ib_pfault(eq->dev, pfault);
1381 mempool_free(pfault, eq->pool);
1384 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
1386 struct mlx5_eqe_page_fault *pf_eqe;
1387 struct mlx5_pagefault *pfault;
1388 struct mlx5_eqe *eqe;
1391 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
1392 pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
1394 schedule_work(&eq->work);
1398 pf_eqe = &eqe->data.page_fault;
1399 pfault->event_subtype = eqe->sub_type;
1400 pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
1402 mlx5_ib_dbg(eq->dev,
1403 "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
1404 eqe->sub_type, pfault->bytes_committed);
1406 switch (eqe->sub_type) {
1407 case MLX5_PFAULT_SUBTYPE_RDMA:
1408 /* RDMA based event */
1410 be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
1412 be32_to_cpu(pf_eqe->rdma.pftype_token) &
1414 pfault->rdma.r_key =
1415 be32_to_cpu(pf_eqe->rdma.r_key);
1416 pfault->rdma.packet_size =
1417 be16_to_cpu(pf_eqe->rdma.packet_length);
1418 pfault->rdma.rdma_op_len =
1419 be32_to_cpu(pf_eqe->rdma.rdma_op_len);
1420 pfault->rdma.rdma_va =
1421 be64_to_cpu(pf_eqe->rdma.rdma_va);
1422 mlx5_ib_dbg(eq->dev,
1423 "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
1424 pfault->type, pfault->token,
1425 pfault->rdma.r_key);
1426 mlx5_ib_dbg(eq->dev,
1427 "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
1428 pfault->rdma.rdma_op_len,
1429 pfault->rdma.rdma_va);
1432 case MLX5_PFAULT_SUBTYPE_WQE:
1433 /* WQE based event */
1435 (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
1437 be32_to_cpu(pf_eqe->wqe.token);
1438 pfault->wqe.wq_num =
1439 be32_to_cpu(pf_eqe->wqe.pftype_wq) &
1441 pfault->wqe.wqe_index =
1442 be16_to_cpu(pf_eqe->wqe.wqe_index);
1443 pfault->wqe.packet_size =
1444 be16_to_cpu(pf_eqe->wqe.packet_length);
1445 mlx5_ib_dbg(eq->dev,
1446 "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
1447 pfault->type, pfault->token,
1449 pfault->wqe.wqe_index);
1453 mlx5_ib_warn(eq->dev,
1454 "Unsupported page fault event sub-type: 0x%02hhx\n",
1456 /* Unsupported page faults should still be
1457 * resolved by the page fault handler
1462 INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
1463 queue_work(eq->wq, &pfault->work);
1465 cc = mlx5_eq_update_cc(eq->core, ++cc);
1468 mlx5_eq_update_ci(eq->core, cc, 1);
1471 static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
1474 struct mlx5_ib_pf_eq *eq =
1475 container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
1476 unsigned long flags;
1478 if (spin_trylock_irqsave(&eq->lock, flags)) {
1479 mlx5_ib_eq_pf_process(eq);
1480 spin_unlock_irqrestore(&eq->lock, flags);
1482 schedule_work(&eq->work);
1488 /* mempool_refill() was proposed but unfortunately wasn't accepted
1489 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
1492 static void mempool_refill(mempool_t *pool)
1494 while (pool->curr_nr < pool->min_nr)
1495 mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
1498 static void mlx5_ib_eq_pf_action(struct work_struct *work)
1500 struct mlx5_ib_pf_eq *eq =
1501 container_of(work, struct mlx5_ib_pf_eq, work);
1503 mempool_refill(eq->pool);
1505 spin_lock_irq(&eq->lock);
1506 mlx5_ib_eq_pf_process(eq);
1507 spin_unlock_irq(&eq->lock);
1511 MLX5_IB_NUM_PF_EQE = 0x1000,
1512 MLX5_IB_NUM_PF_DRAIN = 64,
1515 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1517 struct mlx5_eq_param param = {};
1520 mutex_lock(&dev->odp_eq_mutex);
1523 INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
1524 spin_lock_init(&eq->lock);
1527 eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
1528 sizeof(struct mlx5_pagefault));
1534 eq->wq = alloc_workqueue("mlx5_ib_page_fault",
1535 WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
1542 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
1543 param = (struct mlx5_eq_param) {
1544 .irq_index = MLX5_IRQ_EQ_CTRL,
1545 .nent = MLX5_IB_NUM_PF_EQE,
1547 param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
1548 if (!zalloc_cpumask_var(¶m.affinity, GFP_KERNEL)) {
1552 eq->core = mlx5_eq_create_generic(dev->mdev, ¶m);
1553 free_cpumask_var(param.affinity);
1554 if (IS_ERR(eq->core)) {
1555 err = PTR_ERR(eq->core);
1558 err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
1560 mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
1564 mutex_unlock(&dev->odp_eq_mutex);
1567 mlx5_eq_destroy_generic(dev->mdev, eq->core);
1570 destroy_workqueue(eq->wq);
1572 mempool_destroy(eq->pool);
1574 mutex_unlock(&dev->odp_eq_mutex);
1579 mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
1585 mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
1586 err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
1587 cancel_work_sync(&eq->work);
1588 destroy_workqueue(eq->wq);
1589 mempool_destroy(eq->pool);
1594 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
1596 if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1599 switch (ent->order - 2) {
1600 case MLX5_IMR_MTT_CACHE_ENTRY:
1601 ent->page = PAGE_SHIFT;
1602 ent->xlt = MLX5_IMR_MTT_ENTRIES *
1603 sizeof(struct mlx5_mtt) /
1604 MLX5_IB_UMR_OCTOWORD;
1605 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
1609 case MLX5_IMR_KSM_CACHE_ENTRY:
1610 ent->page = MLX5_KSM_PAGE_SHIFT;
1611 ent->xlt = mlx5_imr_ksm_entries *
1612 sizeof(struct mlx5_klm) /
1613 MLX5_IB_UMR_OCTOWORD;
1614 ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
1620 static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
1621 .advise_mr = mlx5_ib_advise_mr,
1624 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
1628 internal_fill_odp_caps(dev);
1630 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1633 ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
1635 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
1636 ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
1638 mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
1643 mutex_init(&dev->odp_eq_mutex);
1647 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
1649 if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
1652 mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
1655 int mlx5_ib_odp_init(void)
1657 mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
1663 struct prefetch_mr_work {
1664 struct work_struct work;
1669 struct mlx5_ib_mr *mr;
1674 static void destroy_prefetch_work(struct prefetch_mr_work *work)
1678 for (i = 0; i < work->num_sge; ++i)
1679 mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
1684 static struct mlx5_ib_mr *
1685 get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
1688 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1689 struct mlx5_ib_mr *mr = NULL;
1690 struct mlx5_ib_mkey *mmkey;
1692 xa_lock(&dev->odp_mkeys);
1693 mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
1694 if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
1697 mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
1699 if (mr->ibmr.pd != pd) {
1704 /* prefetch with write-access must be supported by the MR */
1705 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1706 !mr->umem->writable) {
1711 refcount_inc(&mmkey->usecount);
1713 xa_unlock(&dev->odp_mkeys);
1717 static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
1719 struct prefetch_mr_work *work =
1720 container_of(w, struct prefetch_mr_work, work);
1721 u32 bytes_mapped = 0;
1725 /* We rely on IB/core that work is executed if we have num_sge != 0 only. */
1726 WARN_ON(!work->num_sge);
1727 for (i = 0; i < work->num_sge; ++i) {
1728 ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
1729 work->frags[i].length, &bytes_mapped,
1733 mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
1736 destroy_prefetch_work(work);
1739 static bool init_prefetch_work(struct ib_pd *pd,
1740 enum ib_uverbs_advise_mr_advice advice,
1741 u32 pf_flags, struct prefetch_mr_work *work,
1742 struct ib_sge *sg_list, u32 num_sge)
1746 INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
1747 work->pf_flags = pf_flags;
1749 for (i = 0; i < num_sge; ++i) {
1750 work->frags[i].io_virt = sg_list[i].addr;
1751 work->frags[i].length = sg_list[i].length;
1753 get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1754 if (!work->frags[i].mr) {
1759 work->num_sge = num_sge;
1763 static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
1764 enum ib_uverbs_advise_mr_advice advice,
1765 u32 pf_flags, struct ib_sge *sg_list,
1768 u32 bytes_mapped = 0;
1772 for (i = 0; i < num_sge; ++i) {
1773 struct mlx5_ib_mr *mr;
1775 mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
1778 ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
1779 &bytes_mapped, pf_flags);
1781 mlx5r_deref_odp_mkey(&mr->mmkey);
1784 mlx5_update_odp_stats(mr, prefetch, ret);
1785 mlx5r_deref_odp_mkey(&mr->mmkey);
1791 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1792 enum ib_uverbs_advise_mr_advice advice,
1793 u32 flags, struct ib_sge *sg_list, u32 num_sge)
1796 struct prefetch_mr_work *work;
1798 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
1799 pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
1801 if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1802 pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
1804 if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
1805 return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
1808 work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
1812 if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
1813 destroy_prefetch_work(work);
1816 queue_work(system_unbound_wq, &work->work);