2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree_generic.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_umem.h>
46 #include <rdma/ib_umem_odp.h>
49 * The ib_umem list keeps track of memory regions for which the HW
50 * device request to receive notification when the related memory
53 * ib_umem_lock protects the list.
56 static u64 node_start(struct umem_odp_node *n)
58 struct ib_umem_odp *umem_odp =
59 container_of(n, struct ib_umem_odp, interval_tree);
61 return ib_umem_start(&umem_odp->umem);
64 /* Note that the representation of the intervals in the interval tree
65 * considers the ending point as contained in the interval, while the
66 * function ib_umem_end returns the first address which is not contained
69 static u64 node_last(struct umem_odp_node *n)
71 struct ib_umem_odp *umem_odp =
72 container_of(n, struct ib_umem_odp, interval_tree);
74 return ib_umem_end(&umem_odp->umem) - 1;
77 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
78 node_start, node_last, static, rbt_ib_umem)
80 static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
82 mutex_lock(&umem_odp->umem_mutex);
83 if (umem_odp->notifiers_count++ == 0)
85 * Initialize the completion object for waiting on
86 * notifiers. Since notifier_count is zero, no one should be
89 reinit_completion(&umem_odp->notifier_completion);
90 mutex_unlock(&umem_odp->umem_mutex);
93 static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
95 mutex_lock(&umem_odp->umem_mutex);
97 * This sequence increase will notify the QP page fault that the page
98 * that is going to be mapped in the spte could have been freed.
100 ++umem_odp->notifiers_seq;
101 if (--umem_odp->notifiers_count == 0)
102 complete_all(&umem_odp->notifier_completion);
103 mutex_unlock(&umem_odp->umem_mutex);
106 static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
107 u64 start, u64 end, void *cookie)
109 struct ib_umem *umem = &umem_odp->umem;
112 * Increase the number of notifiers running, to
113 * prevent any further fault handling on this MR.
115 ib_umem_notifier_start_account(umem_odp);
117 /* Make sure that the fact the umem is dying is out before we release
118 * all pending page faults. */
120 complete_all(&umem_odp->notifier_completion);
121 umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
126 static void ib_umem_notifier_release(struct mmu_notifier *mn,
127 struct mm_struct *mm)
129 struct ib_ucontext_per_mm *per_mm =
130 container_of(mn, struct ib_ucontext_per_mm, mn);
132 if (!per_mm->context->invalidate_range)
135 down_read(&per_mm->umem_rwsem);
136 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0,
138 ib_umem_notifier_release_trampoline,
141 up_read(&per_mm->umem_rwsem);
144 static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
145 u64 end, void *cookie)
147 ib_umem_notifier_start_account(item);
148 item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
149 ib_umem_notifier_end_account(item);
153 static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
154 u64 start, u64 end, void *cookie)
156 ib_umem_notifier_start_account(item);
157 item->umem.context->invalidate_range(item, start, end);
161 static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
162 struct mm_struct *mm,
167 struct ib_ucontext_per_mm *per_mm =
168 container_of(mn, struct ib_ucontext_per_mm, mn);
171 if (!per_mm->context->invalidate_range)
175 down_read(&per_mm->umem_rwsem);
176 else if (!down_read_trylock(&per_mm->umem_rwsem))
179 return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
180 invalidate_range_start_trampoline,
184 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
185 u64 end, void *cookie)
187 ib_umem_notifier_end_account(item);
191 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
192 struct mm_struct *mm,
196 struct ib_ucontext_per_mm *per_mm =
197 container_of(mn, struct ib_ucontext_per_mm, mn);
199 if (!per_mm->context->invalidate_range)
202 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
204 invalidate_range_end_trampoline, true, NULL);
205 up_read(&per_mm->umem_rwsem);
208 static const struct mmu_notifier_ops ib_umem_notifiers = {
209 .release = ib_umem_notifier_release,
210 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
211 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
214 static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
216 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
217 struct ib_umem *umem = &umem_odp->umem;
219 down_write(&per_mm->umem_rwsem);
220 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
221 rbt_ib_umem_insert(&umem_odp->interval_tree,
223 up_write(&per_mm->umem_rwsem);
226 static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
228 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
229 struct ib_umem *umem = &umem_odp->umem;
231 down_write(&per_mm->umem_rwsem);
232 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
233 rbt_ib_umem_remove(&umem_odp->interval_tree,
235 complete_all(&umem_odp->notifier_completion);
237 up_write(&per_mm->umem_rwsem);
240 static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
241 struct mm_struct *mm)
243 struct ib_ucontext_per_mm *per_mm;
246 per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
248 return ERR_PTR(-ENOMEM);
250 per_mm->context = ctx;
252 per_mm->umem_tree = RB_ROOT_CACHED;
253 init_rwsem(&per_mm->umem_rwsem);
256 per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
259 WARN_ON(mm != current->mm);
261 per_mm->mn.ops = &ib_umem_notifiers;
262 ret = mmu_notifier_register(&per_mm->mn, per_mm->mm);
264 dev_err(&ctx->device->dev,
265 "Failed to register mmu_notifier %d\n", ret);
269 list_add(&per_mm->ucontext_list, &ctx->per_mm_list);
273 put_pid(per_mm->tgid);
278 static int get_per_mm(struct ib_umem_odp *umem_odp)
280 struct ib_ucontext *ctx = umem_odp->umem.context;
281 struct ib_ucontext_per_mm *per_mm;
284 * Generally speaking we expect only one or two per_mm in this list,
285 * so no reason to optimize this search today.
287 mutex_lock(&ctx->per_mm_list_lock);
288 list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
289 if (per_mm->mm == umem_odp->umem.owning_mm)
293 per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
294 if (IS_ERR(per_mm)) {
295 mutex_unlock(&ctx->per_mm_list_lock);
296 return PTR_ERR(per_mm);
300 umem_odp->per_mm = per_mm;
301 per_mm->odp_mrs_count++;
302 mutex_unlock(&ctx->per_mm_list_lock);
307 void put_per_mm(struct ib_umem_odp *umem_odp)
309 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
310 struct ib_ucontext *ctx = umem_odp->umem.context;
313 mutex_lock(&ctx->per_mm_list_lock);
314 umem_odp->per_mm = NULL;
315 per_mm->odp_mrs_count--;
316 need_free = per_mm->odp_mrs_count == 0;
318 list_del(&per_mm->ucontext_list);
319 mutex_unlock(&ctx->per_mm_list_lock);
324 mmu_notifier_unregister(&per_mm->mn, per_mm->mm);
325 put_pid(per_mm->tgid);
329 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
330 unsigned long addr, size_t size)
332 struct ib_ucontext *ctx = per_mm->context;
333 struct ib_umem_odp *odp_data;
334 struct ib_umem *umem;
335 int pages = size >> PAGE_SHIFT;
338 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
340 return ERR_PTR(-ENOMEM);
341 umem = &odp_data->umem;
344 umem->address = addr;
345 umem->page_shift = PAGE_SHIFT;
348 odp_data->per_mm = per_mm;
350 mutex_init(&odp_data->umem_mutex);
351 init_completion(&odp_data->notifier_completion);
353 odp_data->page_list =
354 vzalloc(array_size(pages, sizeof(*odp_data->page_list)));
355 if (!odp_data->page_list) {
361 vzalloc(array_size(pages, sizeof(*odp_data->dma_list)));
362 if (!odp_data->dma_list) {
368 * Caller must ensure that the umem_odp that the per_mm came from
369 * cannot be freed during the call to ib_alloc_odp_umem.
371 mutex_lock(&ctx->per_mm_list_lock);
372 per_mm->odp_mrs_count++;
373 mutex_unlock(&ctx->per_mm_list_lock);
374 add_umem_to_per_mm(odp_data);
379 vfree(odp_data->page_list);
384 EXPORT_SYMBOL(ib_alloc_odp_umem);
386 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
388 struct ib_umem *umem = &umem_odp->umem;
390 * NOTE: This must called in a process context where umem->owning_mm
393 struct mm_struct *mm = umem->owning_mm;
396 if (access & IB_ACCESS_HUGETLB) {
397 struct vm_area_struct *vma;
400 down_read(&mm->mmap_sem);
401 vma = find_vma(mm, ib_umem_start(umem));
402 if (!vma || !is_vm_hugetlb_page(vma)) {
403 up_read(&mm->mmap_sem);
407 umem->page_shift = huge_page_shift(h);
408 up_read(&mm->mmap_sem);
414 mutex_init(&umem_odp->umem_mutex);
416 init_completion(&umem_odp->notifier_completion);
418 if (ib_umem_num_pages(umem)) {
419 umem_odp->page_list =
420 vzalloc(array_size(sizeof(*umem_odp->page_list),
421 ib_umem_num_pages(umem)));
422 if (!umem_odp->page_list)
426 vzalloc(array_size(sizeof(*umem_odp->dma_list),
427 ib_umem_num_pages(umem)));
428 if (!umem_odp->dma_list) {
434 ret_val = get_per_mm(umem_odp);
437 add_umem_to_per_mm(umem_odp);
442 vfree(umem_odp->dma_list);
444 vfree(umem_odp->page_list);
448 void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
450 struct ib_umem *umem = &umem_odp->umem;
453 * Ensure that no more pages are mapped in the umem.
455 * It is the driver's responsibility to ensure, before calling us,
456 * that the hardware will not attempt to access the MR any more.
458 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
461 remove_umem_from_per_mm(umem_odp);
462 put_per_mm(umem_odp);
463 vfree(umem_odp->dma_list);
464 vfree(umem_odp->page_list);
468 * Map for DMA and insert a single page into the on-demand paging page tables.
470 * @umem: the umem to insert the page to.
471 * @page_index: index in the umem to add the page to.
472 * @page: the page struct to map and add.
473 * @access_mask: access permissions needed for this page.
474 * @current_seq: sequence number for synchronization with invalidations.
475 * the sequence number is taken from
476 * umem_odp->notifiers_seq.
478 * The function returns -EFAULT if the DMA mapping operation fails. It returns
479 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
481 * The page is released via put_page even if the operation failed. For
482 * on-demand pinning, the page is released whenever it isn't stored in the
485 static int ib_umem_odp_map_dma_single_page(
486 struct ib_umem_odp *umem_odp,
490 unsigned long current_seq)
492 struct ib_umem *umem = &umem_odp->umem;
493 struct ib_device *dev = umem->context->device;
496 int remove_existing_mapping = 0;
500 * Note: we avoid writing if seq is different from the initial seq, to
501 * handle case of a racing notifier. This check also allows us to bail
502 * early if we have a notifier running in parallel with us.
504 if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
508 if (!(umem_odp->dma_list[page_index])) {
509 dma_addr = ib_dma_map_page(dev,
511 0, BIT(umem->page_shift),
513 if (ib_dma_mapping_error(dev, dma_addr)) {
517 umem_odp->dma_list[page_index] = dma_addr | access_mask;
518 umem_odp->page_list[page_index] = page;
521 } else if (umem_odp->page_list[page_index] == page) {
522 umem_odp->dma_list[page_index] |= access_mask;
524 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
525 umem_odp->page_list[page_index], page);
526 /* Better remove the mapping now, to prevent any further
528 remove_existing_mapping = 1;
532 /* On Demand Paging - avoid pinning the page */
533 if (umem->context->invalidate_range || !stored_page)
536 if (remove_existing_mapping && umem->context->invalidate_range) {
537 invalidate_page_trampoline(
539 ib_umem_start(umem) + (page_index >> umem->page_shift),
540 ib_umem_start(umem) + ((page_index + 1) >>
550 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
552 * Pins the range of pages passed in the argument, and maps them to
553 * DMA addresses. The DMA addresses of the mapped pages is updated in
554 * umem_odp->dma_list.
556 * Returns the number of pages mapped in success, negative error code
558 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
559 * the function from completing its task.
560 * An -ENOENT error code indicates that userspace process is being terminated
561 * and mm was already destroyed.
562 * @umem_odp: the umem to map and pin
563 * @user_virt: the address from which we need to map.
564 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
565 * bigger due to alignment, and may also be smaller in case of an error
566 * pinning or mapping a page. The actual pages mapped is returned in
568 * @access_mask: bit mask of the requested access permissions for the given
570 * @current_seq: the MMU notifiers sequance value for synchronization with
571 * invalidations. the sequance number is read from
572 * umem_odp->notifiers_seq before calling this function
574 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
575 u64 bcnt, u64 access_mask,
576 unsigned long current_seq)
578 struct ib_umem *umem = &umem_odp->umem;
579 struct task_struct *owning_process = NULL;
580 struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
581 struct page **local_page_list = NULL;
583 int j, k, ret = 0, start_idx, npages = 0, page_shift;
584 unsigned int flags = 0;
587 if (access_mask == 0)
590 if (user_virt < ib_umem_start(umem) ||
591 user_virt + bcnt > ib_umem_end(umem))
594 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
595 if (!local_page_list)
598 page_shift = umem->page_shift;
599 page_mask = ~(BIT(page_shift) - 1);
600 off = user_virt & (~page_mask);
601 user_virt = user_virt & page_mask;
602 bcnt += off; /* Charge for the first page offset as well. */
605 * owning_process is allowed to be NULL, this means somehow the mm is
606 * existing beyond the lifetime of the originating process.. Presumably
607 * mmget_not_zero will fail in this case.
609 owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
610 if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
615 if (access_mask & ODP_WRITE_ALLOWED_BIT)
618 start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
622 const size_t gup_num_pages = min_t(size_t,
623 (bcnt + BIT(page_shift) - 1) >> page_shift,
624 PAGE_SIZE / sizeof(struct page *));
626 down_read(&owning_mm->mmap_sem);
628 * Note: this might result in redundent page getting. We can
629 * avoid this by checking dma_list to be 0 before calling
630 * get_user_pages. However, this make the code much more
631 * complex (and doesn't gain us much performance in most use
634 npages = get_user_pages_remote(owning_process, owning_mm,
635 user_virt, gup_num_pages,
636 flags, local_page_list, NULL, NULL);
637 up_read(&owning_mm->mmap_sem);
642 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
643 mutex_lock(&umem_odp->umem_mutex);
644 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
645 if (user_virt & ~page_mask) {
647 if (page_to_phys(local_page_list[j]) != p) {
651 put_page(local_page_list[j]);
655 ret = ib_umem_odp_map_dma_single_page(
656 umem_odp, k, local_page_list[j],
657 access_mask, current_seq);
661 p = page_to_phys(local_page_list[j]);
664 mutex_unlock(&umem_odp->umem_mutex);
667 /* Release left over pages when handling errors. */
668 for (++j; j < npages; ++j)
669 put_page(local_page_list[j]);
675 if (npages < 0 && k == start_idx)
684 put_task_struct(owning_process);
685 free_page((unsigned long)local_page_list);
688 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
690 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
693 struct ib_umem *umem = &umem_odp->umem;
696 struct ib_device *dev = umem->context->device;
698 virt = max_t(u64, virt, ib_umem_start(umem));
699 bound = min_t(u64, bound, ib_umem_end(umem));
700 /* Note that during the run of this function, the
701 * notifiers_count of the MR is > 0, preventing any racing
702 * faults from completion. We might be racing with other
703 * invalidations, so we must make sure we free each page only
705 mutex_lock(&umem_odp->umem_mutex);
706 for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
707 idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
708 if (umem_odp->page_list[idx]) {
709 struct page *page = umem_odp->page_list[idx];
710 dma_addr_t dma = umem_odp->dma_list[idx];
711 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
715 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
717 if (dma & ODP_WRITE_ALLOWED_BIT) {
718 struct page *head_page = compound_head(page);
720 * set_page_dirty prefers being called with
721 * the page lock. However, MMU notifiers are
722 * called sometimes with and sometimes without
723 * the lock. We rely on the umem_mutex instead
724 * to prevent other mmu notifiers from
725 * continuing and allowing the page mapping to
728 set_page_dirty(head_page);
730 /* on demand pinning support */
731 if (!umem->context->invalidate_range)
733 umem_odp->page_list[idx] = NULL;
734 umem_odp->dma_list[idx] = 0;
738 mutex_unlock(&umem_odp->umem_mutex);
740 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
742 /* @last is not a part of the interval. See comment for function
745 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
752 struct umem_odp_node *node, *next;
753 struct ib_umem_odp *umem;
755 if (unlikely(start == last))
758 for (node = rbt_ib_umem_iter_first(root, start, last - 1);
760 /* TODO move the blockable decision up to the callback */
763 next = rbt_ib_umem_iter_next(node, start, last - 1);
764 umem = container_of(node, struct ib_umem_odp, interval_tree);
765 ret_val = cb(umem, start, last, cookie) || ret_val;
770 EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
772 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
773 u64 addr, u64 length)
775 struct umem_odp_node *node;
777 node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
779 return container_of(node, struct ib_umem_odp, interval_tree);
783 EXPORT_SYMBOL(rbt_ib_umem_lookup);