2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree_generic.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_umem.h>
46 #include <rdma/ib_umem_odp.h>
49 * The ib_umem list keeps track of memory regions for which the HW
50 * device request to receive notification when the related memory
53 * ib_umem_lock protects the list.
56 static u64 node_start(struct umem_odp_node *n)
58 struct ib_umem_odp *umem_odp =
59 container_of(n, struct ib_umem_odp, interval_tree);
61 return ib_umem_start(&umem_odp->umem);
64 /* Note that the representation of the intervals in the interval tree
65 * considers the ending point as contained in the interval, while the
66 * function ib_umem_end returns the first address which is not contained
69 static u64 node_last(struct umem_odp_node *n)
71 struct ib_umem_odp *umem_odp =
72 container_of(n, struct ib_umem_odp, interval_tree);
74 return ib_umem_end(&umem_odp->umem) - 1;
77 INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
78 node_start, node_last, static, rbt_ib_umem)
80 static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
82 mutex_lock(&umem_odp->umem_mutex);
83 if (umem_odp->notifiers_count++ == 0)
85 * Initialize the completion object for waiting on
86 * notifiers. Since notifier_count is zero, no one should be
89 reinit_completion(&umem_odp->notifier_completion);
90 mutex_unlock(&umem_odp->umem_mutex);
93 static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
95 mutex_lock(&umem_odp->umem_mutex);
97 * This sequence increase will notify the QP page fault that the page
98 * that is going to be mapped in the spte could have been freed.
100 ++umem_odp->notifiers_seq;
101 if (--umem_odp->notifiers_count == 0)
102 complete_all(&umem_odp->notifier_completion);
103 mutex_unlock(&umem_odp->umem_mutex);
106 static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
107 u64 start, u64 end, void *cookie)
109 struct ib_umem *umem = &umem_odp->umem;
112 * Increase the number of notifiers running, to
113 * prevent any further fault handling on this MR.
115 ib_umem_notifier_start_account(umem_odp);
117 /* Make sure that the fact the umem is dying is out before we release
118 * all pending page faults. */
120 complete_all(&umem_odp->notifier_completion);
121 umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
126 static void ib_umem_notifier_release(struct mmu_notifier *mn,
127 struct mm_struct *mm)
129 struct ib_ucontext_per_mm *per_mm =
130 container_of(mn, struct ib_ucontext_per_mm, mn);
132 down_read(&per_mm->umem_rwsem);
134 rbt_ib_umem_for_each_in_range(
135 &per_mm->umem_tree, 0, ULLONG_MAX,
136 ib_umem_notifier_release_trampoline, true, NULL);
137 up_read(&per_mm->umem_rwsem);
140 static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
141 u64 start, u64 end, void *cookie)
143 ib_umem_notifier_start_account(item);
144 item->umem.context->invalidate_range(item, start, end);
148 static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
149 struct mm_struct *mm,
154 struct ib_ucontext_per_mm *per_mm =
155 container_of(mn, struct ib_ucontext_per_mm, mn);
158 down_read(&per_mm->umem_rwsem);
159 else if (!down_read_trylock(&per_mm->umem_rwsem))
162 if (!per_mm->active) {
163 up_read(&per_mm->umem_rwsem);
165 * At this point active is permanently set and visible to this
166 * CPU without a lock, that fact is relied on to skip the unlock
172 return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
173 invalidate_range_start_trampoline,
177 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
178 u64 end, void *cookie)
180 ib_umem_notifier_end_account(item);
184 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
185 struct mm_struct *mm,
189 struct ib_ucontext_per_mm *per_mm =
190 container_of(mn, struct ib_ucontext_per_mm, mn);
192 if (unlikely(!per_mm->active))
195 rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
197 invalidate_range_end_trampoline, true, NULL);
198 up_read(&per_mm->umem_rwsem);
201 static const struct mmu_notifier_ops ib_umem_notifiers = {
202 .release = ib_umem_notifier_release,
203 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
204 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
207 static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
209 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
210 struct ib_umem *umem = &umem_odp->umem;
212 down_write(&per_mm->umem_rwsem);
213 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
214 rbt_ib_umem_insert(&umem_odp->interval_tree,
216 up_write(&per_mm->umem_rwsem);
219 static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
221 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
222 struct ib_umem *umem = &umem_odp->umem;
224 down_write(&per_mm->umem_rwsem);
225 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
226 rbt_ib_umem_remove(&umem_odp->interval_tree,
228 complete_all(&umem_odp->notifier_completion);
230 up_write(&per_mm->umem_rwsem);
233 static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
234 struct mm_struct *mm)
236 struct ib_ucontext_per_mm *per_mm;
239 per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
241 return ERR_PTR(-ENOMEM);
243 per_mm->context = ctx;
245 per_mm->umem_tree = RB_ROOT_CACHED;
246 init_rwsem(&per_mm->umem_rwsem);
247 per_mm->active = ctx->invalidate_range;
250 per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
253 WARN_ON(mm != current->mm);
255 per_mm->mn.ops = &ib_umem_notifiers;
256 ret = mmu_notifier_register(&per_mm->mn, per_mm->mm);
258 dev_err(&ctx->device->dev,
259 "Failed to register mmu_notifier %d\n", ret);
263 list_add(&per_mm->ucontext_list, &ctx->per_mm_list);
267 put_pid(per_mm->tgid);
272 static int get_per_mm(struct ib_umem_odp *umem_odp)
274 struct ib_ucontext *ctx = umem_odp->umem.context;
275 struct ib_ucontext_per_mm *per_mm;
278 * Generally speaking we expect only one or two per_mm in this list,
279 * so no reason to optimize this search today.
281 mutex_lock(&ctx->per_mm_list_lock);
282 list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
283 if (per_mm->mm == umem_odp->umem.owning_mm)
287 per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
288 if (IS_ERR(per_mm)) {
289 mutex_unlock(&ctx->per_mm_list_lock);
290 return PTR_ERR(per_mm);
294 umem_odp->per_mm = per_mm;
295 per_mm->odp_mrs_count++;
296 mutex_unlock(&ctx->per_mm_list_lock);
301 static void free_per_mm(struct rcu_head *rcu)
303 kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
306 void put_per_mm(struct ib_umem_odp *umem_odp)
308 struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
309 struct ib_ucontext *ctx = umem_odp->umem.context;
312 mutex_lock(&ctx->per_mm_list_lock);
313 umem_odp->per_mm = NULL;
314 per_mm->odp_mrs_count--;
315 need_free = per_mm->odp_mrs_count == 0;
317 list_del(&per_mm->ucontext_list);
318 mutex_unlock(&ctx->per_mm_list_lock);
324 * NOTE! mmu_notifier_unregister() can happen between a start/end
325 * callback, resulting in an start/end, and thus an unbalanced
326 * lock. This doesn't really matter to us since we are about to kfree
327 * the memory that holds the lock, however LOCKDEP doesn't like this.
329 down_write(&per_mm->umem_rwsem);
330 per_mm->active = false;
331 up_write(&per_mm->umem_rwsem);
333 WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
334 mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
335 put_pid(per_mm->tgid);
336 mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
339 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
340 unsigned long addr, size_t size)
342 struct ib_ucontext *ctx = per_mm->context;
343 struct ib_umem_odp *odp_data;
344 struct ib_umem *umem;
345 int pages = size >> PAGE_SHIFT;
348 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
350 return ERR_PTR(-ENOMEM);
351 umem = &odp_data->umem;
354 umem->address = addr;
355 umem->page_shift = PAGE_SHIFT;
358 odp_data->per_mm = per_mm;
360 mutex_init(&odp_data->umem_mutex);
361 init_completion(&odp_data->notifier_completion);
363 odp_data->page_list =
364 vzalloc(array_size(pages, sizeof(*odp_data->page_list)));
365 if (!odp_data->page_list) {
371 vzalloc(array_size(pages, sizeof(*odp_data->dma_list)));
372 if (!odp_data->dma_list) {
378 * Caller must ensure that the umem_odp that the per_mm came from
379 * cannot be freed during the call to ib_alloc_odp_umem.
381 mutex_lock(&ctx->per_mm_list_lock);
382 per_mm->odp_mrs_count++;
383 mutex_unlock(&ctx->per_mm_list_lock);
384 add_umem_to_per_mm(odp_data);
389 vfree(odp_data->page_list);
394 EXPORT_SYMBOL(ib_alloc_odp_umem);
396 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
398 struct ib_umem *umem = &umem_odp->umem;
400 * NOTE: This must called in a process context where umem->owning_mm
403 struct mm_struct *mm = umem->owning_mm;
406 if (access & IB_ACCESS_HUGETLB) {
407 struct vm_area_struct *vma;
410 down_read(&mm->mmap_sem);
411 vma = find_vma(mm, ib_umem_start(umem));
412 if (!vma || !is_vm_hugetlb_page(vma)) {
413 up_read(&mm->mmap_sem);
417 umem->page_shift = huge_page_shift(h);
418 up_read(&mm->mmap_sem);
424 mutex_init(&umem_odp->umem_mutex);
426 init_completion(&umem_odp->notifier_completion);
428 if (ib_umem_num_pages(umem)) {
429 umem_odp->page_list =
430 vzalloc(array_size(sizeof(*umem_odp->page_list),
431 ib_umem_num_pages(umem)));
432 if (!umem_odp->page_list)
436 vzalloc(array_size(sizeof(*umem_odp->dma_list),
437 ib_umem_num_pages(umem)));
438 if (!umem_odp->dma_list) {
444 ret_val = get_per_mm(umem_odp);
447 add_umem_to_per_mm(umem_odp);
452 vfree(umem_odp->dma_list);
454 vfree(umem_odp->page_list);
458 void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
460 struct ib_umem *umem = &umem_odp->umem;
463 * Ensure that no more pages are mapped in the umem.
465 * It is the driver's responsibility to ensure, before calling us,
466 * that the hardware will not attempt to access the MR any more.
468 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
471 remove_umem_from_per_mm(umem_odp);
472 put_per_mm(umem_odp);
473 vfree(umem_odp->dma_list);
474 vfree(umem_odp->page_list);
478 * Map for DMA and insert a single page into the on-demand paging page tables.
480 * @umem: the umem to insert the page to.
481 * @page_index: index in the umem to add the page to.
482 * @page: the page struct to map and add.
483 * @access_mask: access permissions needed for this page.
484 * @current_seq: sequence number for synchronization with invalidations.
485 * the sequence number is taken from
486 * umem_odp->notifiers_seq.
488 * The function returns -EFAULT if the DMA mapping operation fails. It returns
489 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
491 * The page is released via put_page even if the operation failed. For
492 * on-demand pinning, the page is released whenever it isn't stored in the
495 static int ib_umem_odp_map_dma_single_page(
496 struct ib_umem_odp *umem_odp,
500 unsigned long current_seq)
502 struct ib_umem *umem = &umem_odp->umem;
503 struct ib_device *dev = umem->context->device;
506 int remove_existing_mapping = 0;
510 * Note: we avoid writing if seq is different from the initial seq, to
511 * handle case of a racing notifier. This check also allows us to bail
512 * early if we have a notifier running in parallel with us.
514 if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
518 if (!(umem_odp->dma_list[page_index])) {
519 dma_addr = ib_dma_map_page(dev,
521 0, BIT(umem->page_shift),
523 if (ib_dma_mapping_error(dev, dma_addr)) {
527 umem_odp->dma_list[page_index] = dma_addr | access_mask;
528 umem_odp->page_list[page_index] = page;
531 } else if (umem_odp->page_list[page_index] == page) {
532 umem_odp->dma_list[page_index] |= access_mask;
534 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
535 umem_odp->page_list[page_index], page);
536 /* Better remove the mapping now, to prevent any further
538 remove_existing_mapping = 1;
542 /* On Demand Paging - avoid pinning the page */
543 if (umem->context->invalidate_range || !stored_page)
546 if (remove_existing_mapping && umem->context->invalidate_range) {
547 ib_umem_notifier_start_account(umem_odp);
548 umem->context->invalidate_range(
550 ib_umem_start(umem) + (page_index << umem->page_shift),
551 ib_umem_start(umem) +
552 ((page_index + 1) << umem->page_shift));
553 ib_umem_notifier_end_account(umem_odp);
561 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
563 * Pins the range of pages passed in the argument, and maps them to
564 * DMA addresses. The DMA addresses of the mapped pages is updated in
565 * umem_odp->dma_list.
567 * Returns the number of pages mapped in success, negative error code
569 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
570 * the function from completing its task.
571 * An -ENOENT error code indicates that userspace process is being terminated
572 * and mm was already destroyed.
573 * @umem_odp: the umem to map and pin
574 * @user_virt: the address from which we need to map.
575 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
576 * bigger due to alignment, and may also be smaller in case of an error
577 * pinning or mapping a page. The actual pages mapped is returned in
579 * @access_mask: bit mask of the requested access permissions for the given
581 * @current_seq: the MMU notifiers sequance value for synchronization with
582 * invalidations. the sequance number is read from
583 * umem_odp->notifiers_seq before calling this function
585 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
586 u64 bcnt, u64 access_mask,
587 unsigned long current_seq)
589 struct ib_umem *umem = &umem_odp->umem;
590 struct task_struct *owning_process = NULL;
591 struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
592 struct page **local_page_list = NULL;
594 int j, k, ret = 0, start_idx, npages = 0, page_shift;
595 unsigned int flags = 0;
598 if (access_mask == 0)
601 if (user_virt < ib_umem_start(umem) ||
602 user_virt + bcnt > ib_umem_end(umem))
605 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
606 if (!local_page_list)
609 page_shift = umem->page_shift;
610 page_mask = ~(BIT(page_shift) - 1);
611 off = user_virt & (~page_mask);
612 user_virt = user_virt & page_mask;
613 bcnt += off; /* Charge for the first page offset as well. */
616 * owning_process is allowed to be NULL, this means somehow the mm is
617 * existing beyond the lifetime of the originating process.. Presumably
618 * mmget_not_zero will fail in this case.
620 owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
621 if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
626 if (access_mask & ODP_WRITE_ALLOWED_BIT)
629 start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
633 const size_t gup_num_pages = min_t(size_t,
634 (bcnt + BIT(page_shift) - 1) >> page_shift,
635 PAGE_SIZE / sizeof(struct page *));
637 down_read(&owning_mm->mmap_sem);
639 * Note: this might result in redundent page getting. We can
640 * avoid this by checking dma_list to be 0 before calling
641 * get_user_pages. However, this make the code much more
642 * complex (and doesn't gain us much performance in most use
645 npages = get_user_pages_remote(owning_process, owning_mm,
646 user_virt, gup_num_pages,
647 flags, local_page_list, NULL, NULL);
648 up_read(&owning_mm->mmap_sem);
653 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
654 mutex_lock(&umem_odp->umem_mutex);
655 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
656 if (user_virt & ~page_mask) {
658 if (page_to_phys(local_page_list[j]) != p) {
662 put_page(local_page_list[j]);
666 ret = ib_umem_odp_map_dma_single_page(
667 umem_odp, k, local_page_list[j],
668 access_mask, current_seq);
672 p = page_to_phys(local_page_list[j]);
675 mutex_unlock(&umem_odp->umem_mutex);
678 /* Release left over pages when handling errors. */
679 for (++j; j < npages; ++j)
680 put_page(local_page_list[j]);
686 if (npages < 0 && k == start_idx)
695 put_task_struct(owning_process);
696 free_page((unsigned long)local_page_list);
699 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
701 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
704 struct ib_umem *umem = &umem_odp->umem;
707 struct ib_device *dev = umem->context->device;
709 virt = max_t(u64, virt, ib_umem_start(umem));
710 bound = min_t(u64, bound, ib_umem_end(umem));
711 /* Note that during the run of this function, the
712 * notifiers_count of the MR is > 0, preventing any racing
713 * faults from completion. We might be racing with other
714 * invalidations, so we must make sure we free each page only
716 mutex_lock(&umem_odp->umem_mutex);
717 for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
718 idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
719 if (umem_odp->page_list[idx]) {
720 struct page *page = umem_odp->page_list[idx];
721 dma_addr_t dma = umem_odp->dma_list[idx];
722 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
726 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
728 if (dma & ODP_WRITE_ALLOWED_BIT) {
729 struct page *head_page = compound_head(page);
731 * set_page_dirty prefers being called with
732 * the page lock. However, MMU notifiers are
733 * called sometimes with and sometimes without
734 * the lock. We rely on the umem_mutex instead
735 * to prevent other mmu notifiers from
736 * continuing and allowing the page mapping to
739 set_page_dirty(head_page);
741 /* on demand pinning support */
742 if (!umem->context->invalidate_range)
744 umem_odp->page_list[idx] = NULL;
745 umem_odp->dma_list[idx] = 0;
749 mutex_unlock(&umem_odp->umem_mutex);
751 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
753 /* @last is not a part of the interval. See comment for function
756 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
763 struct umem_odp_node *node, *next;
764 struct ib_umem_odp *umem;
766 if (unlikely(start == last))
769 for (node = rbt_ib_umem_iter_first(root, start, last - 1);
771 /* TODO move the blockable decision up to the callback */
774 next = rbt_ib_umem_iter_next(node, start, last - 1);
775 umem = container_of(node, struct ib_umem_odp, interval_tree);
776 ret_val = cb(umem, start, last, cookie) || ret_val;
781 EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
783 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
784 u64 addr, u64 length)
786 struct umem_odp_node *node;
788 node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
790 return container_of(node, struct ib_umem_odp, interval_tree);
794 EXPORT_SYMBOL(rbt_ib_umem_lookup);