1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/sched/task.h>
26 #include "amdgpu_sync.h"
27 #include "amdgpu_object.h"
28 #include "amdgpu_vm.h"
29 #include "amdgpu_mn.h"
31 #include "amdgpu_xgmi.h"
34 #include "kfd_migrate.h"
35 #include "kfd_smi_events.h"
40 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
42 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
44 /* Long enough to ensure no retry fault comes after svm range is restored and
45 * page table is updated.
47 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC)
49 /* Giant svm range split into smaller ranges based on this, it is decided using
50 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to
53 static uint64_t max_svm_range_pages;
55 struct criu_svm_metadata {
56 struct list_head list;
57 struct kfd_criu_svm_range_priv_data data;
60 static void svm_range_evict_svm_bo_worker(struct work_struct *work);
62 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
63 const struct mmu_notifier_range *range,
64 unsigned long cur_seq);
66 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
67 uint64_t *bo_s, uint64_t *bo_l);
68 static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
69 .invalidate = svm_range_cpu_invalidate_pagetables,
73 * svm_range_unlink - unlink svm_range from lists and interval tree
74 * @prange: svm range structure to be removed
76 * Remove the svm_range from the svms and svm_bo lists and the svms
79 * Context: The caller must hold svms->lock
81 static void svm_range_unlink(struct svm_range *prange)
83 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
84 prange, prange->start, prange->last);
87 spin_lock(&prange->svm_bo->list_lock);
88 list_del(&prange->svm_bo_list);
89 spin_unlock(&prange->svm_bo->list_lock);
92 list_del(&prange->list);
93 if (prange->it_node.start != 0 && prange->it_node.last != 0)
94 interval_tree_remove(&prange->it_node, &prange->svms->objects);
98 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
100 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
101 prange, prange->start, prange->last);
103 mmu_interval_notifier_insert_locked(&prange->notifier, mm,
104 prange->start << PAGE_SHIFT,
105 prange->npages << PAGE_SHIFT,
110 * svm_range_add_to_svms - add svm range to svms
111 * @prange: svm range structure to be added
113 * Add the svm range to svms interval tree and link list
115 * Context: The caller must hold svms->lock
117 static void svm_range_add_to_svms(struct svm_range *prange)
119 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
120 prange, prange->start, prange->last);
122 list_move_tail(&prange->list, &prange->svms->list);
123 prange->it_node.start = prange->start;
124 prange->it_node.last = prange->last;
125 interval_tree_insert(&prange->it_node, &prange->svms->objects);
128 static void svm_range_remove_notifier(struct svm_range *prange)
130 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n",
131 prange->svms, prange,
132 prange->notifier.interval_tree.start >> PAGE_SHIFT,
133 prange->notifier.interval_tree.last >> PAGE_SHIFT);
135 if (prange->notifier.interval_tree.start != 0 &&
136 prange->notifier.interval_tree.last != 0)
137 mmu_interval_notifier_remove(&prange->notifier);
141 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
143 return dma_addr && !dma_mapping_error(dev, dma_addr) &&
144 !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
148 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
149 unsigned long offset, unsigned long npages,
150 unsigned long *hmm_pfns, uint32_t gpuidx)
152 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
153 dma_addr_t *addr = prange->dma_addr[gpuidx];
154 struct device *dev = adev->dev;
159 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL);
162 prange->dma_addr[gpuidx] = addr;
166 for (i = 0; i < npages; i++) {
167 if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
168 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
170 page = hmm_pfn_to_page(hmm_pfns[i]);
171 if (is_zone_device_page(page)) {
172 struct amdgpu_device *bo_adev =
173 amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
175 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) +
176 bo_adev->vm_manager.vram_base_offset -
177 bo_adev->kfd.dev->pgmap.range.start;
178 addr[i] |= SVM_RANGE_VRAM_DOMAIN;
179 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
182 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
183 r = dma_mapping_error(dev, addr[i]);
185 dev_err(dev, "failed %d dma_map_page\n", r);
188 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
189 addr[i] >> PAGE_SHIFT, page_to_pfn(page));
195 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap,
196 unsigned long offset, unsigned long npages,
197 unsigned long *hmm_pfns)
199 struct kfd_process *p;
203 p = container_of(prange->svms, struct kfd_process, svms);
205 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
206 struct kfd_process_device *pdd;
208 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
209 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
211 pr_debug("failed to find device idx %d\n", gpuidx);
215 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages,
224 void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
225 unsigned long offset, unsigned long npages)
227 enum dma_data_direction dir = DMA_BIDIRECTIONAL;
233 for (i = offset; i < offset + npages; i++) {
234 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
236 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
237 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
242 void svm_range_free_dma_mappings(struct svm_range *prange)
244 struct kfd_process_device *pdd;
245 dma_addr_t *dma_addr;
247 struct kfd_process *p;
250 p = container_of(prange->svms, struct kfd_process, svms);
252 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
253 dma_addr = prange->dma_addr[gpuidx];
257 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
259 pr_debug("failed to find device idx %d\n", gpuidx);
262 dev = &pdd->dev->pdev->dev;
263 svm_range_dma_unmap(dev, dma_addr, 0, prange->npages);
265 prange->dma_addr[gpuidx] = NULL;
269 static void svm_range_free(struct svm_range *prange, bool update_mem_usage)
271 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT;
272 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
274 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange,
275 prange->start, prange->last);
277 svm_range_vram_node_free(prange);
278 svm_range_free_dma_mappings(prange);
280 if (update_mem_usage && !p->xnack_enabled) {
281 pr_debug("unreserve mem limit: %lld\n", size);
282 amdgpu_amdkfd_unreserve_mem_limit(NULL, size,
283 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR);
285 mutex_destroy(&prange->lock);
286 mutex_destroy(&prange->migrate_mutex);
291 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc,
292 uint8_t *granularity, uint32_t *flags)
294 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
295 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
298 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT;
302 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
303 uint64_t last, bool update_mem_usage)
305 uint64_t size = last - start + 1;
306 struct svm_range *prange;
307 struct kfd_process *p;
309 prange = kzalloc(sizeof(*prange), GFP_KERNEL);
313 p = container_of(svms, struct kfd_process, svms);
314 if (!p->xnack_enabled && update_mem_usage &&
315 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT,
316 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) {
317 pr_info("SVM mapping failed, exceeds resident system memory limit\n");
321 prange->npages = size;
323 prange->start = start;
325 INIT_LIST_HEAD(&prange->list);
326 INIT_LIST_HEAD(&prange->update_list);
327 INIT_LIST_HEAD(&prange->svm_bo_list);
328 INIT_LIST_HEAD(&prange->deferred_list);
329 INIT_LIST_HEAD(&prange->child_list);
330 atomic_set(&prange->invalid, 0);
331 prange->validate_timestamp = 0;
332 mutex_init(&prange->migrate_mutex);
333 mutex_init(&prange->lock);
335 if (p->xnack_enabled)
336 bitmap_copy(prange->bitmap_access, svms->bitmap_supported,
339 svm_range_set_default_attributes(&prange->preferred_loc,
340 &prange->prefetch_loc,
341 &prange->granularity, &prange->flags);
343 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last);
348 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo)
350 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref))
356 static void svm_range_bo_release(struct kref *kref)
358 struct svm_range_bo *svm_bo;
360 svm_bo = container_of(kref, struct svm_range_bo, kref);
361 pr_debug("svm_bo 0x%p\n", svm_bo);
363 spin_lock(&svm_bo->list_lock);
364 while (!list_empty(&svm_bo->range_list)) {
365 struct svm_range *prange =
366 list_first_entry(&svm_bo->range_list,
367 struct svm_range, svm_bo_list);
368 /* list_del_init tells a concurrent svm_range_vram_node_new when
369 * it's safe to reuse the svm_bo pointer and svm_bo_list head.
371 list_del_init(&prange->svm_bo_list);
372 spin_unlock(&svm_bo->list_lock);
374 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
375 prange->start, prange->last);
376 mutex_lock(&prange->lock);
377 prange->svm_bo = NULL;
378 mutex_unlock(&prange->lock);
380 spin_lock(&svm_bo->list_lock);
382 spin_unlock(&svm_bo->list_lock);
383 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
384 /* We're not in the eviction worker.
385 * Signal the fence and synchronize with any
386 * pending eviction work.
388 dma_fence_signal(&svm_bo->eviction_fence->base);
389 cancel_work_sync(&svm_bo->eviction_work);
391 dma_fence_put(&svm_bo->eviction_fence->base);
392 amdgpu_bo_unref(&svm_bo->bo);
396 static void svm_range_bo_wq_release(struct work_struct *work)
398 struct svm_range_bo *svm_bo;
400 svm_bo = container_of(work, struct svm_range_bo, release_work);
401 svm_range_bo_release(&svm_bo->kref);
404 static void svm_range_bo_release_async(struct kref *kref)
406 struct svm_range_bo *svm_bo;
408 svm_bo = container_of(kref, struct svm_range_bo, kref);
409 pr_debug("svm_bo 0x%p\n", svm_bo);
410 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
411 schedule_work(&svm_bo->release_work);
414 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
416 kref_put(&svm_bo->kref, svm_range_bo_release_async);
419 static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
422 kref_put(&svm_bo->kref, svm_range_bo_release);
426 svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
428 struct amdgpu_device *bo_adev;
430 mutex_lock(&prange->lock);
431 if (!prange->svm_bo) {
432 mutex_unlock(&prange->lock);
435 if (prange->ttm_res) {
436 /* We still have a reference, all is well */
437 mutex_unlock(&prange->lock);
440 if (svm_bo_ref_unless_zero(prange->svm_bo)) {
442 * Migrate from GPU to GPU, remove range from source bo_adev
443 * svm_bo range list, and return false to allocate svm_bo from
446 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
447 if (bo_adev != adev) {
448 mutex_unlock(&prange->lock);
450 spin_lock(&prange->svm_bo->list_lock);
451 list_del_init(&prange->svm_bo_list);
452 spin_unlock(&prange->svm_bo->list_lock);
454 svm_range_bo_unref(prange->svm_bo);
457 if (READ_ONCE(prange->svm_bo->evicting)) {
459 struct svm_range_bo *svm_bo;
460 /* The BO is getting evicted,
461 * we need to get a new one
463 mutex_unlock(&prange->lock);
464 svm_bo = prange->svm_bo;
465 f = dma_fence_get(&svm_bo->eviction_fence->base);
466 svm_range_bo_unref(prange->svm_bo);
467 /* wait for the fence to avoid long spin-loop
468 * at list_empty_careful
470 dma_fence_wait(f, false);
473 /* The BO was still around and we got
474 * a new reference to it
476 mutex_unlock(&prange->lock);
477 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
478 prange->svms, prange->start, prange->last);
480 prange->ttm_res = prange->svm_bo->bo->tbo.resource;
485 mutex_unlock(&prange->lock);
488 /* We need a new svm_bo. Spin-loop to wait for concurrent
489 * svm_range_bo_release to finish removing this range from
490 * its range list. After this, it is safe to reuse the
491 * svm_bo pointer and svm_bo_list head.
493 while (!list_empty_careful(&prange->svm_bo_list))
499 static struct svm_range_bo *svm_range_bo_new(void)
501 struct svm_range_bo *svm_bo;
503 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL);
507 kref_init(&svm_bo->kref);
508 INIT_LIST_HEAD(&svm_bo->range_list);
509 spin_lock_init(&svm_bo->list_lock);
515 svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
518 struct amdgpu_bo_param bp;
519 struct svm_range_bo *svm_bo;
520 struct amdgpu_bo_user *ubo;
521 struct amdgpu_bo *bo;
522 struct kfd_process *p;
523 struct mm_struct *mm;
526 p = container_of(prange->svms, struct kfd_process, svms);
527 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms,
528 prange->start, prange->last);
530 if (svm_range_validate_svm_bo(adev, prange))
533 svm_bo = svm_range_bo_new();
535 pr_debug("failed to alloc svm bo\n");
538 mm = get_task_mm(p->lead_thread);
540 pr_debug("failed to get mm\n");
544 svm_bo->svms = prange->svms;
545 svm_bo->eviction_fence =
546 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
550 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker);
551 svm_bo->evicting = 0;
552 memset(&bp, 0, sizeof(bp));
553 bp.size = prange->npages * PAGE_SIZE;
554 bp.byte_align = PAGE_SIZE;
555 bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
556 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
557 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0;
558 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE;
559 bp.type = ttm_bo_type_device;
562 r = amdgpu_bo_create_user(adev, &bp, &ubo);
564 pr_debug("failed %d to create bo\n", r);
565 goto create_bo_failed;
568 r = amdgpu_bo_reserve(bo, true);
570 pr_debug("failed %d to reserve bo\n", r);
571 goto reserve_bo_failed;
574 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
576 pr_debug("failed %d to reserve bo\n", r);
577 amdgpu_bo_unreserve(bo);
578 goto reserve_bo_failed;
580 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true);
582 amdgpu_bo_unreserve(bo);
585 prange->svm_bo = svm_bo;
586 prange->ttm_res = bo->tbo.resource;
589 spin_lock(&svm_bo->list_lock);
590 list_add(&prange->svm_bo_list, &svm_bo->range_list);
591 spin_unlock(&svm_bo->list_lock);
596 amdgpu_bo_unref(&bo);
598 dma_fence_put(&svm_bo->eviction_fence->base);
600 prange->ttm_res = NULL;
605 void svm_range_vram_node_free(struct svm_range *prange)
607 svm_range_bo_unref(prange->svm_bo);
608 prange->ttm_res = NULL;
611 struct amdgpu_device *
612 svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id)
614 struct kfd_process_device *pdd;
615 struct kfd_process *p;
618 p = container_of(prange->svms, struct kfd_process, svms);
620 gpu_idx = kfd_process_gpuidx_from_gpuid(p, gpu_id);
622 pr_debug("failed to get device by id 0x%x\n", gpu_id);
625 pdd = kfd_process_device_from_gpuidx(p, gpu_idx);
627 pr_debug("failed to get device by idx 0x%x\n", gpu_idx);
631 return pdd->dev->adev;
634 struct kfd_process_device *
635 svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev)
637 struct kfd_process *p;
638 int32_t gpu_idx, gpuid;
641 p = container_of(prange->svms, struct kfd_process, svms);
643 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx);
645 pr_debug("failed to get device id by adev %p\n", adev);
649 return kfd_process_device_from_gpuidx(p, gpu_idx);
652 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo)
654 struct ttm_operation_ctx ctx = { false, false };
656 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
658 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
662 svm_range_check_attr(struct kfd_process *p,
663 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
667 for (i = 0; i < nattr; i++) {
668 uint32_t val = attrs[i].value;
669 int gpuidx = MAX_GPU_INSTANCE;
671 switch (attrs[i].type) {
672 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
673 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM &&
674 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED)
675 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
677 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
678 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM)
679 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
681 case KFD_IOCTL_SVM_ATTR_ACCESS:
682 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
683 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
684 gpuidx = kfd_process_gpuidx_from_gpuid(p, val);
686 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
688 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
690 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
693 pr_debug("unknown attr type 0x%x\n", attrs[i].type);
698 pr_debug("no GPU 0x%x found\n", val);
700 } else if (gpuidx < MAX_GPU_INSTANCE &&
701 !test_bit(gpuidx, p->svms.bitmap_supported)) {
702 pr_debug("GPU 0x%x not supported\n", val);
711 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
712 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
713 bool *update_mapping)
718 for (i = 0; i < nattr; i++) {
719 switch (attrs[i].type) {
720 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
721 prange->preferred_loc = attrs[i].value;
723 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
724 prange->prefetch_loc = attrs[i].value;
726 case KFD_IOCTL_SVM_ATTR_ACCESS:
727 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
728 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
729 *update_mapping = true;
730 gpuidx = kfd_process_gpuidx_from_gpuid(p,
732 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
733 bitmap_clear(prange->bitmap_access, gpuidx, 1);
734 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
735 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
736 bitmap_set(prange->bitmap_access, gpuidx, 1);
737 bitmap_clear(prange->bitmap_aip, gpuidx, 1);
739 bitmap_clear(prange->bitmap_access, gpuidx, 1);
740 bitmap_set(prange->bitmap_aip, gpuidx, 1);
743 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
744 *update_mapping = true;
745 prange->flags |= attrs[i].value;
747 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
748 *update_mapping = true;
749 prange->flags &= ~attrs[i].value;
751 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
752 prange->granularity = attrs[i].value;
755 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
761 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange,
762 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
767 for (i = 0; i < nattr; i++) {
768 switch (attrs[i].type) {
769 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
770 if (prange->preferred_loc != attrs[i].value)
773 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
774 /* Prefetch should always trigger a migration even
775 * if the value of the attribute didn't change.
778 case KFD_IOCTL_SVM_ATTR_ACCESS:
779 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
780 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
781 gpuidx = kfd_process_gpuidx_from_gpuid(p,
783 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
784 if (test_bit(gpuidx, prange->bitmap_access) ||
785 test_bit(gpuidx, prange->bitmap_aip))
787 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) {
788 if (!test_bit(gpuidx, prange->bitmap_access))
791 if (!test_bit(gpuidx, prange->bitmap_aip))
795 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
796 if ((prange->flags & attrs[i].value) != attrs[i].value)
799 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
800 if ((prange->flags & attrs[i].value) != 0)
803 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
804 if (prange->granularity != attrs[i].value)
808 WARN_ONCE(1, "svm_range_check_attrs wasn't called?");
816 * svm_range_debug_dump - print all range information from svms
817 * @svms: svm range list header
819 * debug output svm range start, end, prefetch location from svms
820 * interval tree and link list
822 * Context: The caller must hold svms->lock
824 static void svm_range_debug_dump(struct svm_range_list *svms)
826 struct interval_tree_node *node;
827 struct svm_range *prange;
829 pr_debug("dump svms 0x%p list\n", svms);
830 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
832 list_for_each_entry(prange, &svms->list, list) {
833 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
834 prange, prange->start, prange->npages,
835 prange->start + prange->npages - 1,
839 pr_debug("dump svms 0x%p interval tree\n", svms);
840 pr_debug("range\tstart\tpage\tend\t\tlocation\n");
841 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL);
843 prange = container_of(node, struct svm_range, it_node);
844 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n",
845 prange, prange->start, prange->npages,
846 prange->start + prange->npages - 1,
848 node = interval_tree_iter_next(node, 0, ~0ULL);
853 svm_range_split_array(void *ppnew, void *ppold, size_t size,
854 uint64_t old_start, uint64_t old_n,
855 uint64_t new_start, uint64_t new_n)
857 unsigned char *new, *old, *pold;
862 pold = *(unsigned char **)ppold;
866 new = kvmalloc_array(new_n, size, GFP_KERNEL);
870 d = (new_start - old_start) * size;
871 memcpy(new, pold + d, new_n * size);
873 old = kvmalloc_array(old_n, size, GFP_KERNEL);
879 d = (new_start == old_start) ? new_n * size : 0;
880 memcpy(old, pold + d, old_n * size);
883 *(void **)ppold = old;
884 *(void **)ppnew = new;
890 svm_range_split_pages(struct svm_range *new, struct svm_range *old,
891 uint64_t start, uint64_t last)
893 uint64_t npages = last - start + 1;
896 for (i = 0; i < MAX_GPU_INSTANCE; i++) {
897 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i],
898 sizeof(*old->dma_addr[i]), old->start,
899 npages, new->start, new->npages);
908 svm_range_split_nodes(struct svm_range *new, struct svm_range *old,
909 uint64_t start, uint64_t last)
911 uint64_t npages = last - start + 1;
913 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n",
914 new->svms, new, new->start, start, last);
916 if (new->start == old->start) {
917 new->offset = old->offset;
918 old->offset += new->npages;
920 new->offset = old->offset + npages;
923 new->svm_bo = svm_range_bo_ref(old->svm_bo);
924 new->ttm_res = old->ttm_res;
926 spin_lock(&new->svm_bo->list_lock);
927 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
928 spin_unlock(&new->svm_bo->list_lock);
934 * svm_range_split_adjust - split range and adjust
937 * @old: the old range
938 * @start: the old range adjust to start address in pages
939 * @last: the old range adjust to last address in pages
941 * Copy system memory dma_addr or vram ttm_res in old range to new
942 * range from new_start up to size new->npages, the remaining old range is from
946 * 0 - OK, -ENOMEM - out of memory
949 svm_range_split_adjust(struct svm_range *new, struct svm_range *old,
950 uint64_t start, uint64_t last)
954 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n",
955 new->svms, new->start, old->start, old->last, start, last);
957 if (new->start < old->start ||
958 new->last > old->last) {
959 WARN_ONCE(1, "invalid new range start or last\n");
963 r = svm_range_split_pages(new, old, start, last);
967 if (old->actual_loc && old->ttm_res) {
968 r = svm_range_split_nodes(new, old, start, last);
973 old->npages = last - start + 1;
976 new->flags = old->flags;
977 new->preferred_loc = old->preferred_loc;
978 new->prefetch_loc = old->prefetch_loc;
979 new->actual_loc = old->actual_loc;
980 new->granularity = old->granularity;
981 new->mapped_to_gpu = old->mapped_to_gpu;
982 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
983 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
989 * svm_range_split - split a range in 2 ranges
991 * @prange: the svm range to split
992 * @start: the remaining range start address in pages
993 * @last: the remaining range last address in pages
994 * @new: the result new range generated
997 * case 1: if start == prange->start
998 * prange ==> prange[start, last]
999 * new range [last + 1, prange->last]
1001 * case 2: if last == prange->last
1002 * prange ==> prange[start, last]
1003 * new range [prange->start, start - 1]
1006 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last
1009 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
1010 struct svm_range **new)
1012 uint64_t old_start = prange->start;
1013 uint64_t old_last = prange->last;
1014 struct svm_range_list *svms;
1017 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms,
1018 old_start, old_last, start, last);
1020 if (old_start != start && old_last != last)
1022 if (start < old_start || last > old_last)
1025 svms = prange->svms;
1026 if (old_start == start)
1027 *new = svm_range_new(svms, last + 1, old_last, false);
1029 *new = svm_range_new(svms, old_start, start - 1, false);
1033 r = svm_range_split_adjust(*new, prange, start, last);
1035 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n",
1036 r, old_start, old_last, start, last);
1037 svm_range_free(*new, false);
1045 svm_range_split_tail(struct svm_range *prange,
1046 uint64_t new_last, struct list_head *insert_list)
1048 struct svm_range *tail;
1049 int r = svm_range_split(prange, prange->start, new_last, &tail);
1052 list_add(&tail->list, insert_list);
1057 svm_range_split_head(struct svm_range *prange,
1058 uint64_t new_start, struct list_head *insert_list)
1060 struct svm_range *head;
1061 int r = svm_range_split(prange, new_start, prange->last, &head);
1064 list_add(&head->list, insert_list);
1069 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
1070 struct svm_range *pchild, enum svm_work_list_ops op)
1072 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n",
1073 pchild, pchild->start, pchild->last, prange, op);
1075 pchild->work_item.mm = mm;
1076 pchild->work_item.op = op;
1077 list_add_tail(&pchild->child_list, &prange->child_list);
1081 * svm_range_split_by_granularity - collect ranges within granularity boundary
1083 * @p: the process with svms list
1085 * @addr: the vm fault address in pages, to split the prange
1086 * @parent: parent range if prange is from child list
1087 * @prange: prange to split
1089 * Trims @prange to be a single aligned block of prange->granularity if
1090 * possible. The head and tail are added to the child_list in @parent.
1092 * Context: caller must hold mmap_read_lock and prange->lock
1095 * 0 - OK, otherwise error code
1098 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
1099 unsigned long addr, struct svm_range *parent,
1100 struct svm_range *prange)
1102 struct svm_range *head, *tail;
1103 unsigned long start, last, size;
1106 /* Align splited range start and size to granularity size, then a single
1107 * PTE will be used for whole range, this reduces the number of PTE
1108 * updated and the L1 TLB space used for translation.
1110 size = 1UL << prange->granularity;
1111 start = ALIGN_DOWN(addr, size);
1112 last = ALIGN(addr + 1, size) - 1;
1114 pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
1115 prange->svms, prange->start, prange->last, start, last, size);
1117 if (start > prange->start) {
1118 r = svm_range_split(prange, start, prange->last, &head);
1121 svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
1124 if (last < prange->last) {
1125 r = svm_range_split(prange, prange->start, last, &tail);
1128 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
1131 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
1132 if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
1133 prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
1134 pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
1135 prange, prange->start, prange->last,
1136 SVM_OP_ADD_RANGE_AND_MAP);
1142 svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
1145 struct amdgpu_device *bo_adev;
1146 uint32_t flags = prange->flags;
1147 uint32_t mapping_flags = 0;
1149 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN);
1150 bool coherent = flags & KFD_IOCTL_SVM_FLAG_COHERENT;
1152 if (domain == SVM_RANGE_VRAM_DOMAIN)
1153 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1155 switch (KFD_GC_VERSION(adev->kfd.dev)) {
1156 case IP_VERSION(9, 4, 1):
1157 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1158 if (bo_adev == adev) {
1159 mapping_flags |= coherent ?
1160 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1162 mapping_flags |= coherent ?
1163 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1164 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1168 mapping_flags |= coherent ?
1169 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1172 case IP_VERSION(9, 4, 2):
1173 if (domain == SVM_RANGE_VRAM_DOMAIN) {
1174 if (bo_adev == adev) {
1175 mapping_flags |= coherent ?
1176 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
1177 if (adev->gmc.xgmi.connected_to_cpu)
1180 mapping_flags |= coherent ?
1181 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1182 if (amdgpu_xgmi_same_hive(adev, bo_adev))
1186 mapping_flags |= coherent ?
1187 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1191 mapping_flags |= coherent ?
1192 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
1195 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
1197 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
1198 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
1199 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
1200 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
1202 pte_flags = AMDGPU_PTE_VALID;
1203 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM;
1204 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1206 pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags);
1211 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
1212 uint64_t start, uint64_t last,
1213 struct dma_fence **fence)
1215 uint64_t init_pte_value = 0;
1217 pr_debug("[0x%llx 0x%llx]\n", start, last);
1219 return amdgpu_vm_update_range(adev, vm, false, true, true, NULL, start,
1220 last, init_pte_value, 0, 0, NULL, NULL,
1225 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
1226 unsigned long last, uint32_t trigger)
1228 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1229 struct kfd_process_device *pdd;
1230 struct dma_fence *fence = NULL;
1231 struct kfd_process *p;
1235 if (!prange->mapped_to_gpu) {
1236 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n",
1237 prange, prange->start, prange->last);
1241 if (prange->start == start && prange->last == last) {
1242 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange);
1243 prange->mapped_to_gpu = false;
1246 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
1248 p = container_of(prange->svms, struct kfd_process, svms);
1250 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1251 pr_debug("unmap from gpu idx 0x%x\n", gpuidx);
1252 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1254 pr_debug("failed to find device idx %d\n", gpuidx);
1258 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid,
1259 start, last, trigger);
1261 r = svm_range_unmap_from_gpu(pdd->dev->adev,
1262 drm_priv_to_vm(pdd->drm_priv),
1263 start, last, &fence);
1268 r = dma_fence_wait(fence, false);
1269 dma_fence_put(fence);
1274 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT);
1281 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
1282 unsigned long offset, unsigned long npages, bool readonly,
1283 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
1284 struct dma_fence **fence, bool flush_tlb)
1286 struct amdgpu_device *adev = pdd->dev->adev;
1287 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
1289 unsigned long last_start;
1294 last_start = prange->start + offset;
1296 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms,
1297 last_start, last_start + npages - 1, readonly);
1299 for (i = offset; i < offset + npages; i++) {
1300 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
1301 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
1303 /* Collect all pages in the same address range and memory domain
1304 * that can be mapped with a single call to update mapping.
1306 if (i < offset + npages - 1 &&
1307 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
1310 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
1311 last_start, prange->start + i, last_domain ? "GPU" : "CPU");
1313 pte_flags = svm_range_get_pte_flags(adev, prange, last_domain);
1315 pte_flags &= ~AMDGPU_PTE_WRITEABLE;
1317 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n",
1318 prange->svms, last_start, prange->start + i,
1319 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
1322 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
1323 last_start, prange->start + i,
1325 (last_start - prange->start) << PAGE_SHIFT,
1326 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0,
1327 NULL, dma_addr, &vm->last_update);
1329 for (j = last_start - prange->start; j <= i; j++)
1330 dma_addr[j] |= last_domain;
1333 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
1336 last_start = prange->start + i + 1;
1339 r = amdgpu_vm_update_pdes(adev, vm, false);
1341 pr_debug("failed %d to update directories 0x%lx\n", r,
1347 *fence = dma_fence_get(vm->last_update);
1354 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
1355 unsigned long npages, bool readonly,
1356 unsigned long *bitmap, bool wait, bool flush_tlb)
1358 struct kfd_process_device *pdd;
1359 struct amdgpu_device *bo_adev;
1360 struct kfd_process *p;
1361 struct dma_fence *fence = NULL;
1365 if (prange->svm_bo && prange->ttm_res)
1366 bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
1370 p = container_of(prange->svms, struct kfd_process, svms);
1371 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
1372 pr_debug("mapping to gpu idx 0x%x\n", gpuidx);
1373 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1375 pr_debug("failed to find device idx %d\n", gpuidx);
1379 pdd = kfd_bind_process_to_device(pdd->dev, p);
1383 if (bo_adev && pdd->dev->adev != bo_adev &&
1384 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
1385 pr_debug("cannot map to device idx %d\n", gpuidx);
1389 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
1390 prange->dma_addr[gpuidx],
1391 bo_adev, wait ? &fence : NULL,
1397 r = dma_fence_wait(fence, false);
1398 dma_fence_put(fence);
1401 pr_debug("failed %d to dma fence wait\n", r);
1406 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
1412 struct svm_validate_context {
1413 struct kfd_process *process;
1414 struct svm_range *prange;
1416 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
1417 struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
1418 struct list_head validate_list;
1419 struct ww_acquire_ctx ticket;
1422 static int svm_range_reserve_bos(struct svm_validate_context *ctx)
1424 struct kfd_process_device *pdd;
1425 struct amdgpu_vm *vm;
1429 INIT_LIST_HEAD(&ctx->validate_list);
1430 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1431 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1433 pr_debug("failed to find device idx %d\n", gpuidx);
1436 vm = drm_priv_to_vm(pdd->drm_priv);
1438 ctx->tv[gpuidx].bo = &vm->root.bo->tbo;
1439 ctx->tv[gpuidx].num_shared = 4;
1440 list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
1443 r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
1446 pr_debug("failed %d to reserve bo\n", r);
1450 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) {
1451 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx);
1453 pr_debug("failed to find device idx %d\n", gpuidx);
1458 r = amdgpu_vm_validate_pt_bos(pdd->dev->adev,
1459 drm_priv_to_vm(pdd->drm_priv),
1460 svm_range_bo_validate, NULL);
1462 pr_debug("failed %d validate pt bos\n", r);
1470 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1474 static void svm_range_unreserve_bos(struct svm_validate_context *ctx)
1476 ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list);
1479 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
1481 struct kfd_process_device *pdd;
1483 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
1485 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev);
1489 * Validation+GPU mapping with concurrent invalidation (MMU notifiers)
1491 * To prevent concurrent destruction or change of range attributes, the
1492 * svm_read_lock must be held. The caller must not hold the svm_write_lock
1493 * because that would block concurrent evictions and lead to deadlocks. To
1494 * serialize concurrent migrations or validations of the same range, the
1495 * prange->migrate_mutex must be held.
1497 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its
1500 * The following sequence ensures race-free validation and GPU mapping:
1502 * 1. Reserve page table (and SVM BO if range is in VRAM)
1503 * 2. hmm_range_fault to get page addresses (if system memory)
1504 * 3. DMA-map pages (if system memory)
1505 * 4-a. Take notifier lock
1506 * 4-b. Check that pages still valid (mmu_interval_read_retry)
1507 * 4-c. Check that the range was not split or otherwise invalidated
1508 * 4-d. Update GPU page table
1509 * 4.e. Release notifier lock
1510 * 5. Release page table (and SVM BO) reservation
1512 static int svm_range_validate_and_map(struct mm_struct *mm,
1513 struct svm_range *prange, int32_t gpuidx,
1514 bool intr, bool wait, bool flush_tlb)
1516 struct svm_validate_context ctx;
1517 unsigned long start, end, addr;
1518 struct kfd_process *p;
1523 ctx.process = container_of(prange->svms, struct kfd_process, svms);
1524 ctx.prange = prange;
1527 if (gpuidx < MAX_GPU_INSTANCE) {
1528 bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE);
1529 bitmap_set(ctx.bitmap, gpuidx, 1);
1530 } else if (ctx.process->xnack_enabled) {
1531 bitmap_copy(ctx.bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
1533 /* If prefetch range to GPU, or GPU retry fault migrate range to
1534 * GPU, which has ACCESS attribute to the range, create mapping
1537 if (prange->actual_loc) {
1538 gpuidx = kfd_process_gpuidx_from_gpuid(ctx.process,
1539 prange->actual_loc);
1541 WARN_ONCE(1, "failed get device by id 0x%x\n",
1542 prange->actual_loc);
1545 if (test_bit(gpuidx, prange->bitmap_access))
1546 bitmap_set(ctx.bitmap, gpuidx, 1);
1549 bitmap_or(ctx.bitmap, prange->bitmap_access,
1550 prange->bitmap_aip, MAX_GPU_INSTANCE);
1553 if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
1554 if (!prange->mapped_to_gpu)
1557 bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
1560 if (prange->actual_loc && !prange->ttm_res) {
1561 /* This should never happen. actual_loc gets set by
1562 * svm_migrate_ram_to_vram after allocating a BO.
1564 WARN_ONCE(1, "VRAM BO missing during validation\n");
1568 svm_range_reserve_bos(&ctx);
1570 p = container_of(prange->svms, struct kfd_process, svms);
1571 owner = kfd_svm_page_owner(p, find_first_bit(ctx.bitmap,
1573 for_each_set_bit(idx, ctx.bitmap, MAX_GPU_INSTANCE) {
1574 if (kfd_svm_page_owner(p, idx) != owner) {
1580 start = prange->start << PAGE_SHIFT;
1581 end = (prange->last + 1) << PAGE_SHIFT;
1582 for (addr = start; addr < end && !r; ) {
1583 struct hmm_range *hmm_range;
1584 struct vm_area_struct *vma;
1586 unsigned long offset;
1587 unsigned long npages;
1590 vma = find_vma(mm, addr);
1591 if (!vma || addr < vma->vm_start) {
1595 readonly = !(vma->vm_flags & VM_WRITE);
1597 next = min(vma->vm_end, end);
1598 npages = (next - addr) >> PAGE_SHIFT;
1599 WRITE_ONCE(p->svms.faulting_task, current);
1600 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
1601 addr, npages, &hmm_range,
1602 readonly, true, owner);
1603 WRITE_ONCE(p->svms.faulting_task, NULL);
1605 pr_debug("failed %d to get svm range pages\n", r);
1609 offset = (addr - start) >> PAGE_SHIFT;
1610 r = svm_range_dma_map(prange, ctx.bitmap, offset, npages,
1611 hmm_range->hmm_pfns);
1613 pr_debug("failed %d to dma map range\n", r);
1617 svm_range_lock(prange);
1618 if (amdgpu_hmm_range_get_pages_done(hmm_range)) {
1619 pr_debug("hmm update the range, need validate again\n");
1623 if (!list_empty(&prange->child_list)) {
1624 pr_debug("range split by unmap in parallel, validate again\n");
1629 r = svm_range_map_to_gpus(prange, offset, npages, readonly,
1630 ctx.bitmap, wait, flush_tlb);
1633 svm_range_unlock(prange);
1639 prange->validated_once = true;
1640 prange->mapped_to_gpu = true;
1644 svm_range_unreserve_bos(&ctx);
1647 prange->validate_timestamp = ktime_get_boottime();
1653 * svm_range_list_lock_and_flush_work - flush pending deferred work
1655 * @svms: the svm range list
1656 * @mm: the mm structure
1658 * Context: Returns with mmap write lock held, pending deferred work flushed
1662 svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
1663 struct mm_struct *mm)
1666 flush_work(&svms->deferred_list_work);
1667 mmap_write_lock(mm);
1669 if (list_empty(&svms->deferred_range_list))
1671 mmap_write_unlock(mm);
1672 pr_debug("retry flush\n");
1673 goto retry_flush_work;
1676 static void svm_range_restore_work(struct work_struct *work)
1678 struct delayed_work *dwork = to_delayed_work(work);
1679 struct amdkfd_process_info *process_info;
1680 struct svm_range_list *svms;
1681 struct svm_range *prange;
1682 struct kfd_process *p;
1683 struct mm_struct *mm;
1688 svms = container_of(dwork, struct svm_range_list, restore_work);
1689 evicted_ranges = atomic_read(&svms->evicted_ranges);
1690 if (!evicted_ranges)
1693 pr_debug("restore svm ranges\n");
1695 p = container_of(svms, struct kfd_process, svms);
1696 process_info = p->kgd_process_info;
1698 /* Keep mm reference when svm_range_validate_and_map ranges */
1699 mm = get_task_mm(p->lead_thread);
1701 pr_debug("svms 0x%p process mm gone\n", svms);
1705 mutex_lock(&process_info->lock);
1706 svm_range_list_lock_and_flush_work(svms, mm);
1707 mutex_lock(&svms->lock);
1709 evicted_ranges = atomic_read(&svms->evicted_ranges);
1711 list_for_each_entry(prange, &svms->list, list) {
1712 invalid = atomic_read(&prange->invalid);
1716 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n",
1717 prange->svms, prange, prange->start, prange->last,
1721 * If range is migrating, wait for migration is done.
1723 mutex_lock(&prange->migrate_mutex);
1725 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
1726 false, true, false);
1728 pr_debug("failed %d to map 0x%lx to gpus\n", r,
1731 mutex_unlock(&prange->migrate_mutex);
1733 goto out_reschedule;
1735 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid)
1736 goto out_reschedule;
1739 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) !=
1741 goto out_reschedule;
1745 r = kgd2kfd_resume_mm(mm);
1747 /* No recovery from this failure. Probably the CP is
1748 * hanging. No point trying again.
1750 pr_debug("failed %d to resume KFD\n", r);
1753 pr_debug("restore svm ranges successfully\n");
1756 mutex_unlock(&svms->lock);
1757 mmap_write_unlock(mm);
1758 mutex_unlock(&process_info->lock);
1760 /* If validation failed, reschedule another attempt */
1761 if (evicted_ranges) {
1762 pr_debug("reschedule to restore svm range\n");
1763 schedule_delayed_work(&svms->restore_work,
1764 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1766 kfd_smi_event_queue_restore_rescheduled(mm);
1772 * svm_range_evict - evict svm range
1773 * @prange: svm range structure
1774 * @mm: current process mm_struct
1775 * @start: starting process queue number
1776 * @last: last process queue number
1778 * Stop all queues of the process to ensure GPU doesn't access the memory, then
1779 * return to let CPU evict the buffer and proceed CPU pagetable update.
1781 * Don't need use lock to sync cpu pagetable invalidation with GPU execution.
1782 * If invalidation happens while restore work is running, restore work will
1783 * restart to ensure to get the latest CPU pages mapping to GPU, then start
1787 svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
1788 unsigned long start, unsigned long last,
1789 enum mmu_notifier_event event)
1791 struct svm_range_list *svms = prange->svms;
1792 struct svm_range *pchild;
1793 struct kfd_process *p;
1796 p = container_of(svms, struct kfd_process, svms);
1798 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
1799 svms, prange->start, prange->last, start, last);
1801 if (!p->xnack_enabled ||
1802 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) {
1804 bool mapped = prange->mapped_to_gpu;
1806 list_for_each_entry(pchild, &prange->child_list, child_list) {
1807 if (!pchild->mapped_to_gpu)
1810 mutex_lock_nested(&pchild->lock, 1);
1811 if (pchild->start <= last && pchild->last >= start) {
1812 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n",
1813 pchild->start, pchild->last);
1814 atomic_inc(&pchild->invalid);
1816 mutex_unlock(&pchild->lock);
1822 if (prange->start <= last && prange->last >= start)
1823 atomic_inc(&prange->invalid);
1825 evicted_ranges = atomic_inc_return(&svms->evicted_ranges);
1826 if (evicted_ranges != 1)
1829 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n",
1830 prange->svms, prange->start, prange->last);
1832 /* First eviction, stop the queues */
1833 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
1835 pr_debug("failed to quiesce KFD\n");
1837 pr_debug("schedule to restore svm %p ranges\n", svms);
1838 schedule_delayed_work(&svms->restore_work,
1839 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS));
1844 if (event == MMU_NOTIFY_MIGRATE)
1845 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE;
1847 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY;
1849 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n",
1850 prange->svms, start, last);
1851 list_for_each_entry(pchild, &prange->child_list, child_list) {
1852 mutex_lock_nested(&pchild->lock, 1);
1853 s = max(start, pchild->start);
1854 l = min(last, pchild->last);
1856 svm_range_unmap_from_gpus(pchild, s, l, trigger);
1857 mutex_unlock(&pchild->lock);
1859 s = max(start, prange->start);
1860 l = min(last, prange->last);
1862 svm_range_unmap_from_gpus(prange, s, l, trigger);
1868 static struct svm_range *svm_range_clone(struct svm_range *old)
1870 struct svm_range *new;
1872 new = svm_range_new(old->svms, old->start, old->last, false);
1877 new->ttm_res = old->ttm_res;
1878 new->offset = old->offset;
1879 new->svm_bo = svm_range_bo_ref(old->svm_bo);
1880 spin_lock(&new->svm_bo->list_lock);
1881 list_add(&new->svm_bo_list, &new->svm_bo->range_list);
1882 spin_unlock(&new->svm_bo->list_lock);
1884 new->flags = old->flags;
1885 new->preferred_loc = old->preferred_loc;
1886 new->prefetch_loc = old->prefetch_loc;
1887 new->actual_loc = old->actual_loc;
1888 new->granularity = old->granularity;
1889 new->mapped_to_gpu = old->mapped_to_gpu;
1890 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE);
1891 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE);
1896 void svm_range_set_max_pages(struct amdgpu_device *adev)
1899 uint64_t pages, _pages;
1901 /* 1/32 VRAM size in pages */
1902 pages = adev->gmc.real_vram_size >> 17;
1903 pages = clamp(pages, 1ULL << 9, 1ULL << 18);
1904 pages = rounddown_pow_of_two(pages);
1906 max_pages = READ_ONCE(max_svm_range_pages);
1907 _pages = min_not_zero(max_pages, pages);
1908 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages);
1912 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last,
1913 uint64_t max_pages, struct list_head *insert_list,
1914 struct list_head *update_list)
1916 struct svm_range *prange;
1919 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n",
1920 max_pages, start, last);
1922 while (last >= start) {
1923 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1);
1925 prange = svm_range_new(svms, start, l, true);
1928 list_add(&prange->list, insert_list);
1929 list_add(&prange->update_list, update_list);
1937 * svm_range_add - add svm range and handle overlap
1938 * @p: the range add to this process svms
1939 * @start: page size aligned
1940 * @size: page size aligned
1941 * @nattr: number of attributes
1942 * @attrs: array of attributes
1943 * @update_list: output, the ranges need validate and update GPU mapping
1944 * @insert_list: output, the ranges need insert to svms
1945 * @remove_list: output, the ranges are replaced and need remove from svms
1947 * Check if the virtual address range has overlap with any existing ranges,
1948 * split partly overlapping ranges and add new ranges in the gaps. All changes
1949 * should be applied to the range_list and interval tree transactionally. If
1950 * any range split or allocation fails, the entire update fails. Therefore any
1951 * existing overlapping svm_ranges are cloned and the original svm_ranges left
1954 * If the transaction succeeds, the caller can update and insert clones and
1955 * new ranges, then free the originals.
1957 * Otherwise the caller can free the clones and new ranges, while the old
1958 * svm_ranges remain unchanged.
1960 * Context: Process context, caller must hold svms->lock
1963 * 0 - OK, otherwise error code
1966 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
1967 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
1968 struct list_head *update_list, struct list_head *insert_list,
1969 struct list_head *remove_list)
1971 unsigned long last = start + size - 1UL;
1972 struct svm_range_list *svms = &p->svms;
1973 struct interval_tree_node *node;
1974 struct svm_range *prange;
1975 struct svm_range *tmp;
1976 struct list_head new_list;
1979 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
1981 INIT_LIST_HEAD(update_list);
1982 INIT_LIST_HEAD(insert_list);
1983 INIT_LIST_HEAD(remove_list);
1984 INIT_LIST_HEAD(&new_list);
1986 node = interval_tree_iter_first(&svms->objects, start, last);
1988 struct interval_tree_node *next;
1989 unsigned long next_start;
1991 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start,
1994 prange = container_of(node, struct svm_range, it_node);
1995 next = interval_tree_iter_next(node, start, last);
1996 next_start = min(node->last, last) + 1;
1998 if (svm_range_is_same_attrs(p, prange, nattr, attrs)) {
2000 } else if (node->start < start || node->last > last) {
2001 /* node intersects the update range and its attributes
2002 * will change. Clone and split it, apply updates only
2003 * to the overlapping part
2005 struct svm_range *old = prange;
2007 prange = svm_range_clone(old);
2013 list_add(&old->update_list, remove_list);
2014 list_add(&prange->list, insert_list);
2015 list_add(&prange->update_list, update_list);
2017 if (node->start < start) {
2018 pr_debug("change old range start\n");
2019 r = svm_range_split_head(prange, start,
2024 if (node->last > last) {
2025 pr_debug("change old range last\n");
2026 r = svm_range_split_tail(prange, last,
2032 /* The node is contained within start..last,
2035 list_add(&prange->update_list, update_list);
2038 /* insert a new node if needed */
2039 if (node->start > start) {
2040 r = svm_range_split_new(svms, start, node->start - 1,
2041 READ_ONCE(max_svm_range_pages),
2042 &new_list, update_list);
2051 /* add a final range at the end if needed */
2053 r = svm_range_split_new(svms, start, last,
2054 READ_ONCE(max_svm_range_pages),
2055 &new_list, update_list);
2059 list_for_each_entry_safe(prange, tmp, insert_list, list)
2060 svm_range_free(prange, false);
2061 list_for_each_entry_safe(prange, tmp, &new_list, list)
2062 svm_range_free(prange, true);
2064 list_splice(&new_list, insert_list);
2071 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
2072 struct svm_range *prange)
2074 unsigned long start;
2077 start = prange->notifier.interval_tree.start >> PAGE_SHIFT;
2078 last = prange->notifier.interval_tree.last >> PAGE_SHIFT;
2080 if (prange->start == start && prange->last == last)
2083 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n",
2084 prange->svms, prange, start, last, prange->start,
2087 if (start != 0 && last != 0) {
2088 interval_tree_remove(&prange->it_node, &prange->svms->objects);
2089 svm_range_remove_notifier(prange);
2091 prange->it_node.start = prange->start;
2092 prange->it_node.last = prange->last;
2094 interval_tree_insert(&prange->it_node, &prange->svms->objects);
2095 svm_range_add_notifier_locked(mm, prange);
2099 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange,
2100 struct mm_struct *mm)
2102 switch (prange->work_item.op) {
2104 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2105 svms, prange, prange->start, prange->last);
2107 case SVM_OP_UNMAP_RANGE:
2108 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2109 svms, prange, prange->start, prange->last);
2110 svm_range_unlink(prange);
2111 svm_range_remove_notifier(prange);
2112 svm_range_free(prange, true);
2114 case SVM_OP_UPDATE_RANGE_NOTIFIER:
2115 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2116 svms, prange, prange->start, prange->last);
2117 svm_range_update_notifier_and_interval_tree(mm, prange);
2119 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP:
2120 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n",
2121 svms, prange, prange->start, prange->last);
2122 svm_range_update_notifier_and_interval_tree(mm, prange);
2123 /* TODO: implement deferred validation and mapping */
2125 case SVM_OP_ADD_RANGE:
2126 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange,
2127 prange->start, prange->last);
2128 svm_range_add_to_svms(prange);
2129 svm_range_add_notifier_locked(mm, prange);
2131 case SVM_OP_ADD_RANGE_AND_MAP:
2132 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms,
2133 prange, prange->start, prange->last);
2134 svm_range_add_to_svms(prange);
2135 svm_range_add_notifier_locked(mm, prange);
2136 /* TODO: implement deferred validation and mapping */
2139 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange,
2140 prange->work_item.op);
2144 static void svm_range_drain_retry_fault(struct svm_range_list *svms)
2146 struct kfd_process_device *pdd;
2147 struct kfd_process *p;
2151 p = container_of(svms, struct kfd_process, svms);
2154 drain = atomic_read(&svms->drain_pagefaults);
2158 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
2163 pr_debug("drain retry fault gpu %d svms %p\n", i, svms);
2165 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev,
2166 &pdd->dev->adev->irq.ih1);
2167 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
2169 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
2173 static void svm_range_deferred_list_work(struct work_struct *work)
2175 struct svm_range_list *svms;
2176 struct svm_range *prange;
2177 struct mm_struct *mm;
2179 svms = container_of(work, struct svm_range_list, deferred_list_work);
2180 pr_debug("enter svms 0x%p\n", svms);
2182 spin_lock(&svms->deferred_list_lock);
2183 while (!list_empty(&svms->deferred_range_list)) {
2184 prange = list_first_entry(&svms->deferred_range_list,
2185 struct svm_range, deferred_list);
2186 spin_unlock(&svms->deferred_list_lock);
2188 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
2189 prange->start, prange->last, prange->work_item.op);
2191 mm = prange->work_item.mm;
2193 mmap_write_lock(mm);
2195 /* Checking for the need to drain retry faults must be inside
2196 * mmap write lock to serialize with munmap notifiers.
2198 if (unlikely(atomic_read(&svms->drain_pagefaults))) {
2199 mmap_write_unlock(mm);
2200 svm_range_drain_retry_fault(svms);
2204 /* Remove from deferred_list must be inside mmap write lock, for
2206 * 1. unmap_from_cpu may change work_item.op and add the range
2207 * to deferred_list again, cause use after free bug.
2208 * 2. svm_range_list_lock_and_flush_work may hold mmap write
2209 * lock and continue because deferred_list is empty, but
2210 * deferred_list work is actually waiting for mmap lock.
2212 spin_lock(&svms->deferred_list_lock);
2213 list_del_init(&prange->deferred_list);
2214 spin_unlock(&svms->deferred_list_lock);
2216 mutex_lock(&svms->lock);
2217 mutex_lock(&prange->migrate_mutex);
2218 while (!list_empty(&prange->child_list)) {
2219 struct svm_range *pchild;
2221 pchild = list_first_entry(&prange->child_list,
2222 struct svm_range, child_list);
2223 pr_debug("child prange 0x%p op %d\n", pchild,
2224 pchild->work_item.op);
2225 list_del_init(&pchild->child_list);
2226 svm_range_handle_list_op(svms, pchild, mm);
2228 mutex_unlock(&prange->migrate_mutex);
2230 svm_range_handle_list_op(svms, prange, mm);
2231 mutex_unlock(&svms->lock);
2232 mmap_write_unlock(mm);
2234 /* Pairs with mmget in svm_range_add_list_work */
2237 spin_lock(&svms->deferred_list_lock);
2239 spin_unlock(&svms->deferred_list_lock);
2240 pr_debug("exit svms 0x%p\n", svms);
2244 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2245 struct mm_struct *mm, enum svm_work_list_ops op)
2247 spin_lock(&svms->deferred_list_lock);
2248 /* if prange is on the deferred list */
2249 if (!list_empty(&prange->deferred_list)) {
2250 pr_debug("update exist prange 0x%p work op %d\n", prange, op);
2251 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
2252 if (op != SVM_OP_NULL &&
2253 prange->work_item.op != SVM_OP_UNMAP_RANGE)
2254 prange->work_item.op = op;
2256 prange->work_item.op = op;
2258 /* Pairs with mmput in deferred_list_work */
2260 prange->work_item.mm = mm;
2261 list_add_tail(&prange->deferred_list,
2262 &prange->svms->deferred_range_list);
2263 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n",
2264 prange, prange->start, prange->last, op);
2266 spin_unlock(&svms->deferred_list_lock);
2269 void schedule_deferred_list_work(struct svm_range_list *svms)
2271 spin_lock(&svms->deferred_list_lock);
2272 if (!list_empty(&svms->deferred_range_list))
2273 schedule_work(&svms->deferred_list_work);
2274 spin_unlock(&svms->deferred_list_lock);
2278 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent,
2279 struct svm_range *prange, unsigned long start,
2282 struct svm_range *head;
2283 struct svm_range *tail;
2285 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2286 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange,
2287 prange->start, prange->last);
2290 if (start > prange->last || last < prange->start)
2293 head = tail = prange;
2294 if (start > prange->start)
2295 svm_range_split(prange, prange->start, start - 1, &tail);
2296 if (last < tail->last)
2297 svm_range_split(tail, last + 1, tail->last, &head);
2299 if (head != prange && tail != prange) {
2300 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2301 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
2302 } else if (tail != prange) {
2303 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE);
2304 } else if (head != prange) {
2305 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE);
2306 } else if (parent != prange) {
2307 prange->work_item.op = SVM_OP_UNMAP_RANGE;
2312 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2313 unsigned long start, unsigned long last)
2315 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU;
2316 struct svm_range_list *svms;
2317 struct svm_range *pchild;
2318 struct kfd_process *p;
2322 p = kfd_lookup_process_by_mm(mm);
2327 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
2328 prange, prange->start, prange->last, start, last);
2330 /* Make sure pending page faults are drained in the deferred worker
2331 * before the range is freed to avoid straggler interrupts on
2332 * unmapped memory causing "phantom faults".
2334 atomic_inc(&svms->drain_pagefaults);
2336 unmap_parent = start <= prange->start && last >= prange->last;
2338 list_for_each_entry(pchild, &prange->child_list, child_list) {
2339 mutex_lock_nested(&pchild->lock, 1);
2340 s = max(start, pchild->start);
2341 l = min(last, pchild->last);
2343 svm_range_unmap_from_gpus(pchild, s, l, trigger);
2344 svm_range_unmap_split(mm, prange, pchild, start, last);
2345 mutex_unlock(&pchild->lock);
2347 s = max(start, prange->start);
2348 l = min(last, prange->last);
2350 svm_range_unmap_from_gpus(prange, s, l, trigger);
2351 svm_range_unmap_split(mm, prange, prange, start, last);
2354 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
2356 svm_range_add_list_work(svms, prange, mm,
2357 SVM_OP_UPDATE_RANGE_NOTIFIER);
2358 schedule_deferred_list_work(svms);
2360 kfd_unref_process(p);
2364 * svm_range_cpu_invalidate_pagetables - interval notifier callback
2365 * @mni: mmu_interval_notifier struct
2366 * @range: mmu_notifier_range struct
2367 * @cur_seq: value to pass to mmu_interval_set_seq()
2369 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it
2370 * is from migration, or CPU page invalidation callback.
2372 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed
2373 * work thread, and split prange if only part of prange is unmapped.
2375 * For invalidation event, if GPU retry fault is not enabled, evict the queues,
2376 * then schedule svm_range_restore_work to update GPU mapping and resume queues.
2377 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will
2378 * update GPU mapping to recover.
2380 * Context: mmap lock, notifier_invalidate_start lock are held
2381 * for invalidate event, prange lock is held if this is from migration
2384 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
2385 const struct mmu_notifier_range *range,
2386 unsigned long cur_seq)
2388 struct svm_range *prange;
2389 unsigned long start;
2392 if (range->event == MMU_NOTIFY_RELEASE)
2394 if (!mmget_not_zero(mni->mm))
2397 start = mni->interval_tree.start;
2398 last = mni->interval_tree.last;
2399 start = max(start, range->start) >> PAGE_SHIFT;
2400 last = min(last, range->end - 1) >> PAGE_SHIFT;
2401 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n",
2402 start, last, range->start >> PAGE_SHIFT,
2403 (range->end - 1) >> PAGE_SHIFT,
2404 mni->interval_tree.start >> PAGE_SHIFT,
2405 mni->interval_tree.last >> PAGE_SHIFT, range->event);
2407 prange = container_of(mni, struct svm_range, notifier);
2409 svm_range_lock(prange);
2410 mmu_interval_set_seq(mni, cur_seq);
2412 switch (range->event) {
2413 case MMU_NOTIFY_UNMAP:
2414 svm_range_unmap_from_cpu(mni->mm, prange, start, last);
2417 svm_range_evict(prange, mni->mm, start, last, range->event);
2421 svm_range_unlock(prange);
2428 * svm_range_from_addr - find svm range from fault address
2429 * @svms: svm range list header
2430 * @addr: address to search range interval tree, in pages
2431 * @parent: parent range if range is on child list
2433 * Context: The caller must hold svms->lock
2435 * Return: the svm_range found or NULL
2438 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr,
2439 struct svm_range **parent)
2441 struct interval_tree_node *node;
2442 struct svm_range *prange;
2443 struct svm_range *pchild;
2445 node = interval_tree_iter_first(&svms->objects, addr, addr);
2449 prange = container_of(node, struct svm_range, it_node);
2450 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n",
2451 addr, prange->start, prange->last, node->start, node->last);
2453 if (addr >= prange->start && addr <= prange->last) {
2458 list_for_each_entry(pchild, &prange->child_list, child_list)
2459 if (addr >= pchild->start && addr <= pchild->last) {
2460 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n",
2461 addr, pchild->start, pchild->last);
2470 /* svm_range_best_restore_location - decide the best fault restore location
2471 * @prange: svm range structure
2472 * @adev: the GPU on which vm fault happened
2474 * This is only called when xnack is on, to decide the best location to restore
2475 * the range mapping after GPU vm fault. Caller uses the best location to do
2476 * migration if actual loc is not best location, then update GPU page table
2477 * mapping to the best location.
2479 * If the preferred loc is accessible by faulting GPU, use preferred loc.
2480 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu
2481 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then
2482 * if range actual loc is cpu, best_loc is cpu
2483 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is
2485 * Otherwise, GPU no access, best_loc is -1.
2488 * -1 means vm fault GPU no access
2489 * 0 for CPU or GPU id
2492 svm_range_best_restore_location(struct svm_range *prange,
2493 struct amdgpu_device *adev,
2496 struct amdgpu_device *bo_adev, *preferred_adev;
2497 struct kfd_process *p;
2501 p = container_of(prange->svms, struct kfd_process, svms);
2503 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx);
2505 pr_debug("failed to get gpuid from kgd\n");
2509 if (prange->preferred_loc == gpuid ||
2510 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) {
2511 return prange->preferred_loc;
2512 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
2513 preferred_adev = svm_range_get_adev_by_id(prange,
2514 prange->preferred_loc);
2515 if (amdgpu_xgmi_same_hive(adev, preferred_adev))
2516 return prange->preferred_loc;
2520 if (test_bit(*gpuidx, prange->bitmap_access))
2523 if (test_bit(*gpuidx, prange->bitmap_aip)) {
2524 if (!prange->actual_loc)
2527 bo_adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
2528 if (amdgpu_xgmi_same_hive(adev, bo_adev))
2529 return prange->actual_loc;
2538 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
2539 unsigned long *start, unsigned long *last,
2540 bool *is_heap_stack)
2542 struct vm_area_struct *vma;
2543 struct interval_tree_node *node;
2544 unsigned long start_limit, end_limit;
2546 vma = find_vma(p->mm, addr << PAGE_SHIFT);
2547 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2548 pr_debug("VMA does not exist in address [0x%llx]\n", addr);
2552 *is_heap_stack = (vma->vm_start <= vma->vm_mm->brk &&
2553 vma->vm_end >= vma->vm_mm->start_brk) ||
2554 (vma->vm_start <= vma->vm_mm->start_stack &&
2555 vma->vm_end >= vma->vm_mm->start_stack);
2557 start_limit = max(vma->vm_start >> PAGE_SHIFT,
2558 (unsigned long)ALIGN_DOWN(addr, 2UL << 8));
2559 end_limit = min(vma->vm_end >> PAGE_SHIFT,
2560 (unsigned long)ALIGN(addr + 1, 2UL << 8));
2561 /* First range that starts after the fault address */
2562 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX);
2564 end_limit = min(end_limit, node->start);
2565 /* Last range that ends before the fault address */
2566 node = container_of(rb_prev(&node->rb),
2567 struct interval_tree_node, rb);
2569 /* Last range must end before addr because
2570 * there was no range after addr
2572 node = container_of(rb_last(&p->svms.objects.rb_root),
2573 struct interval_tree_node, rb);
2576 if (node->last >= addr) {
2577 WARN(1, "Overlap with prev node and page fault addr\n");
2580 start_limit = max(start_limit, node->last + 1);
2583 *start = start_limit;
2584 *last = end_limit - 1;
2586 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n",
2587 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT,
2588 *start, *last, *is_heap_stack);
2594 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
2595 uint64_t *bo_s, uint64_t *bo_l)
2597 struct amdgpu_bo_va_mapping *mapping;
2598 struct interval_tree_node *node;
2599 struct amdgpu_bo *bo = NULL;
2600 unsigned long userptr;
2604 for (i = 0; i < p->n_pdds; i++) {
2605 struct amdgpu_vm *vm;
2607 if (!p->pdds[i]->drm_priv)
2610 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
2611 r = amdgpu_bo_reserve(vm->root.bo, false);
2615 /* Check userptr by searching entire vm->va interval tree */
2616 node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
2618 mapping = container_of((struct rb_node *)node,
2619 struct amdgpu_bo_va_mapping, rb);
2620 bo = mapping->bo_va->base.bo;
2622 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
2623 start << PAGE_SHIFT,
2626 node = interval_tree_iter_next(node, 0, ~0ULL);
2630 pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
2633 *bo_s = userptr >> PAGE_SHIFT;
2634 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
2636 amdgpu_bo_unreserve(vm->root.bo);
2639 amdgpu_bo_unreserve(vm->root.bo);
2645 svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
2646 struct kfd_process *p,
2647 struct mm_struct *mm,
2650 struct svm_range *prange = NULL;
2651 unsigned long start, last;
2652 uint32_t gpuid, gpuidx;
2658 if (svm_range_get_range_boundaries(p, addr, &start, &last,
2662 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
2663 if (r != -EADDRINUSE)
2664 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
2666 if (r == -EADDRINUSE) {
2667 if (addr >= bo_s && addr <= bo_l)
2670 /* Create one page svm range if 2MB range overlapping */
2675 prange = svm_range_new(&p->svms, start, last, true);
2677 pr_debug("Failed to create prange in address [0x%llx]\n", addr);
2680 if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) {
2681 pr_debug("failed to get gpuid from kgd\n");
2682 svm_range_free(prange, true);
2687 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM;
2689 svm_range_add_to_svms(prange);
2690 svm_range_add_notifier_locked(mm, prange);
2695 /* svm_range_skip_recover - decide if prange can be recovered
2696 * @prange: svm range structure
2698 * GPU vm retry fault handle skip recover the range for cases:
2699 * 1. prange is on deferred list to be removed after unmap, it is stale fault,
2700 * deferred list work will drain the stale fault before free the prange.
2701 * 2. prange is on deferred list to add interval notifier after split, or
2702 * 3. prange is child range, it is split from parent prange, recover later
2703 * after interval notifier is added.
2705 * Return: true to skip recover, false to recover
2707 static bool svm_range_skip_recover(struct svm_range *prange)
2709 struct svm_range_list *svms = prange->svms;
2711 spin_lock(&svms->deferred_list_lock);
2712 if (list_empty(&prange->deferred_list) &&
2713 list_empty(&prange->child_list)) {
2714 spin_unlock(&svms->deferred_list_lock);
2717 spin_unlock(&svms->deferred_list_lock);
2719 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) {
2720 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n",
2721 svms, prange, prange->start, prange->last);
2724 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP ||
2725 prange->work_item.op == SVM_OP_ADD_RANGE) {
2726 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n",
2727 svms, prange, prange->start, prange->last);
2734 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
2737 struct kfd_process_device *pdd;
2739 /* fault is on different page of same range
2740 * or fault is skipped to recover later
2741 * or fault is on invalid virtual address
2743 if (gpuidx == MAX_GPU_INSTANCE) {
2747 r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx);
2752 /* fault is recovered
2753 * or fault cannot recover because GPU no access on the range
2755 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
2757 WRITE_ONCE(pdd->faults, pdd->faults + 1);
2761 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
2763 unsigned long requested = VM_READ;
2766 requested |= VM_WRITE;
2768 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
2770 return (vma->vm_flags & requested) == requested;
2774 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2775 uint64_t addr, bool write_fault)
2777 struct mm_struct *mm = NULL;
2778 struct svm_range_list *svms;
2779 struct svm_range *prange;
2780 struct kfd_process *p;
2781 ktime_t timestamp = ktime_get_boottime();
2783 int32_t gpuidx = MAX_GPU_INSTANCE;
2784 bool write_locked = false;
2785 struct vm_area_struct *vma;
2786 bool migration = false;
2789 if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
2790 pr_debug("device does not support SVM\n");
2794 p = kfd_lookup_process_by_pasid(pasid);
2796 pr_debug("kfd process not founded pasid 0x%x\n", pasid);
2801 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
2803 if (atomic_read(&svms->drain_pagefaults)) {
2804 pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
2809 if (!p->xnack_enabled) {
2810 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
2815 /* p->lead_thread is available as kfd_process_wq_release flush the work
2816 * before releasing task ref.
2818 mm = get_task_mm(p->lead_thread);
2820 pr_debug("svms 0x%p failed to get mm\n", svms);
2827 mutex_lock(&svms->lock);
2828 prange = svm_range_from_addr(svms, addr, NULL);
2830 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n",
2832 if (!write_locked) {
2833 /* Need the write lock to create new range with MMU notifier.
2834 * Also flush pending deferred work to make sure the interval
2835 * tree is up to date before we add a new range
2837 mutex_unlock(&svms->lock);
2838 mmap_read_unlock(mm);
2839 mmap_write_lock(mm);
2840 write_locked = true;
2841 goto retry_write_locked;
2843 prange = svm_range_create_unregistered_range(adev, p, mm, addr);
2845 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n",
2847 mmap_write_downgrade(mm);
2849 goto out_unlock_svms;
2853 mmap_write_downgrade(mm);
2855 mutex_lock(&prange->migrate_mutex);
2857 if (svm_range_skip_recover(prange)) {
2858 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2860 goto out_unlock_range;
2863 /* skip duplicate vm fault on different pages of same range */
2864 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp,
2865 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) {
2866 pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
2867 svms, prange->start, prange->last);
2869 goto out_unlock_range;
2872 /* __do_munmap removed VMA, return success as we are handling stale
2875 vma = find_vma(mm, addr << PAGE_SHIFT);
2876 if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
2877 pr_debug("address 0x%llx VMA is removed\n", addr);
2879 goto out_unlock_range;
2882 if (!svm_fault_allowed(vma, write_fault)) {
2883 pr_debug("fault addr 0x%llx no %s permission\n", addr,
2884 write_fault ? "write" : "read");
2886 goto out_unlock_range;
2889 best_loc = svm_range_best_restore_location(prange, adev, &gpuidx);
2890 if (best_loc == -1) {
2891 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n",
2892 svms, prange->start, prange->last);
2894 goto out_unlock_range;
2897 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n",
2898 svms, prange->start, prange->last, best_loc,
2899 prange->actual_loc);
2901 kfd_smi_event_page_fault_start(adev->kfd.dev, p->lead_thread->pid, addr,
2902 write_fault, timestamp);
2904 if (prange->actual_loc != best_loc) {
2907 r = svm_migrate_to_vram(prange, best_loc, mm,
2908 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2910 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n",
2912 /* Fallback to system memory if migration to
2915 if (prange->actual_loc)
2916 r = svm_migrate_vram_to_ram(prange, mm,
2917 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2922 r = svm_migrate_vram_to_ram(prange, mm,
2923 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
2926 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n",
2927 r, svms, prange->start, prange->last);
2928 goto out_unlock_range;
2932 r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
2934 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
2935 r, svms, prange->start, prange->last);
2937 kfd_smi_event_page_fault_end(adev->kfd.dev, p->lead_thread->pid, addr,
2941 mutex_unlock(&prange->migrate_mutex);
2943 mutex_unlock(&svms->lock);
2944 mmap_read_unlock(mm);
2946 svm_range_count_fault(adev, p, gpuidx);
2950 kfd_unref_process(p);
2953 pr_debug("recover vm fault later\n");
2954 amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
2960 void svm_range_list_fini(struct kfd_process *p)
2962 struct svm_range *prange;
2963 struct svm_range *next;
2965 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms);
2967 cancel_delayed_work_sync(&p->svms.restore_work);
2969 /* Ensure list work is finished before process is destroyed */
2970 flush_work(&p->svms.deferred_list_work);
2973 * Ensure no retry fault comes in afterwards, as page fault handler will
2974 * not find kfd process and take mm lock to recover fault.
2976 atomic_inc(&p->svms.drain_pagefaults);
2977 svm_range_drain_retry_fault(&p->svms);
2979 list_for_each_entry_safe(prange, next, &p->svms.list, list) {
2980 svm_range_unlink(prange);
2981 svm_range_remove_notifier(prange);
2982 svm_range_free(prange, true);
2985 mutex_destroy(&p->svms.lock);
2987 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms);
2990 int svm_range_list_init(struct kfd_process *p)
2992 struct svm_range_list *svms = &p->svms;
2995 svms->objects = RB_ROOT_CACHED;
2996 mutex_init(&svms->lock);
2997 INIT_LIST_HEAD(&svms->list);
2998 atomic_set(&svms->evicted_ranges, 0);
2999 atomic_set(&svms->drain_pagefaults, 0);
3000 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
3001 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
3002 INIT_LIST_HEAD(&svms->deferred_range_list);
3003 INIT_LIST_HEAD(&svms->criu_svm_metadata_list);
3004 spin_lock_init(&svms->deferred_list_lock);
3006 for (i = 0; i < p->n_pdds; i++)
3007 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev))
3008 bitmap_set(svms->bitmap_supported, i, 1);
3014 * svm_range_check_vm - check if virtual address range mapped already
3015 * @p: current kfd_process
3016 * @start: range start address, in pages
3017 * @last: range last address, in pages
3018 * @bo_s: mapping start address in pages if address range already mapped
3019 * @bo_l: mapping last address in pages if address range already mapped
3021 * The purpose is to avoid virtual address ranges already allocated by
3022 * kfd_ioctl_alloc_memory_of_gpu ioctl.
3023 * It looks for each pdd in the kfd_process.
3025 * Context: Process context
3027 * Return 0 - OK, if the range is not mapped.
3028 * Otherwise error code:
3029 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
3030 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
3031 * a signal. Release all buffer reservations and return to user-space.
3034 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
3035 uint64_t *bo_s, uint64_t *bo_l)
3037 struct amdgpu_bo_va_mapping *mapping;
3038 struct interval_tree_node *node;
3042 for (i = 0; i < p->n_pdds; i++) {
3043 struct amdgpu_vm *vm;
3045 if (!p->pdds[i]->drm_priv)
3048 vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
3049 r = amdgpu_bo_reserve(vm->root.bo, false);
3053 node = interval_tree_iter_first(&vm->va, start, last);
3055 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
3057 mapping = container_of((struct rb_node *)node,
3058 struct amdgpu_bo_va_mapping, rb);
3060 *bo_s = mapping->start;
3061 *bo_l = mapping->last;
3063 amdgpu_bo_unreserve(vm->root.bo);
3066 amdgpu_bo_unreserve(vm->root.bo);
3073 * svm_range_is_valid - check if virtual address range is valid
3074 * @p: current kfd_process
3075 * @start: range start address, in pages
3076 * @size: range size, in pages
3078 * Valid virtual address range means it belongs to one or more VMAs
3080 * Context: Process context
3083 * 0 - OK, otherwise error code
3086 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
3088 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
3089 struct vm_area_struct *vma;
3091 unsigned long start_unchg = start;
3093 start <<= PAGE_SHIFT;
3094 end = start + (size << PAGE_SHIFT);
3096 vma = find_vma(p->mm, start);
3097 if (!vma || start < vma->vm_start ||
3098 (vma->vm_flags & device_vma))
3100 start = min(end, vma->vm_end);
3101 } while (start < end);
3103 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
3108 * svm_range_best_prefetch_location - decide the best prefetch location
3109 * @prange: svm range structure
3112 * If range map to single GPU, the best prefetch location is prefetch_loc, which
3113 * can be CPU or GPU.
3115 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on
3116 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise
3117 * the best prefetch location is always CPU, because GPU can not have coherent
3118 * mapping VRAM of other GPUs even with large-BAR PCIe connection.
3121 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is
3122 * prefetch_loc, other GPU access will generate vm fault and trigger migration.
3124 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same
3125 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best
3126 * prefetch location is always CPU.
3128 * Context: Process context
3131 * 0 for CPU or GPU id
3134 svm_range_best_prefetch_location(struct svm_range *prange)
3136 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
3137 uint32_t best_loc = prange->prefetch_loc;
3138 struct kfd_process_device *pdd;
3139 struct amdgpu_device *bo_adev;
3140 struct kfd_process *p;
3143 p = container_of(prange->svms, struct kfd_process, svms);
3145 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED)
3148 bo_adev = svm_range_get_adev_by_id(prange, best_loc);
3150 WARN_ONCE(1, "failed to get device by id 0x%x\n", best_loc);
3155 if (p->xnack_enabled)
3156 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
3158 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
3161 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
3162 pdd = kfd_process_device_from_gpuidx(p, gpuidx);
3164 pr_debug("failed to get device by idx 0x%x\n", gpuidx);
3168 if (pdd->dev->adev == bo_adev)
3171 if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) {
3178 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n",
3179 p->xnack_enabled, &p->svms, prange->start, prange->last,
3185 /* FIXME: This is a workaround for page locking bug when some pages are
3186 * invalid during migration to VRAM
3188 void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
3191 struct hmm_range *hmm_range;
3194 if (prange->validated_once)
3197 r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
3198 prange->start << PAGE_SHIFT,
3199 prange->npages, &hmm_range,
3200 false, true, owner);
3202 amdgpu_hmm_range_get_pages_done(hmm_range);
3203 prange->validated_once = true;
3207 /* svm_range_trigger_migration - start page migration if prefetch loc changed
3208 * @mm: current process mm_struct
3209 * @prange: svm range structure
3210 * @migrated: output, true if migration is triggered
3212 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range
3214 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range
3217 * If GPU vm fault retry is not enabled, migration interact with MMU notifier
3219 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict
3220 * stops all queues, schedule restore work
3221 * 2. svm_range_restore_work wait for migration is done by
3222 * a. svm_range_validate_vram takes prange->migrate_mutex
3223 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns
3224 * 3. restore work update mappings of GPU, resume all queues.
3226 * Context: Process context
3229 * 0 - OK, otherwise - error code of migration
3232 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
3239 best_loc = svm_range_best_prefetch_location(prange);
3241 if (best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3242 best_loc == prange->actual_loc)
3246 r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3251 r = svm_migrate_to_vram(prange, best_loc, mm, KFD_MIGRATE_TRIGGER_PREFETCH);
3257 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
3262 if (dma_fence_is_signaled(&fence->base))
3265 if (fence->svm_bo) {
3266 WRITE_ONCE(fence->svm_bo->evicting, 1);
3267 schedule_work(&fence->svm_bo->eviction_work);
3273 static void svm_range_evict_svm_bo_worker(struct work_struct *work)
3275 struct svm_range_bo *svm_bo;
3276 struct kfd_process *p;
3277 struct mm_struct *mm;
3280 svm_bo = container_of(work, struct svm_range_bo, eviction_work);
3281 if (!svm_bo_ref_unless_zero(svm_bo))
3282 return; /* svm_bo was freed while eviction was pending */
3284 /* svm_range_bo_release destroys this worker thread. So during
3285 * the lifetime of this thread, kfd_process and mm will be valid.
3287 p = container_of(svm_bo->svms, struct kfd_process, svms);
3293 spin_lock(&svm_bo->list_lock);
3294 while (!list_empty(&svm_bo->range_list) && !r) {
3295 struct svm_range *prange =
3296 list_first_entry(&svm_bo->range_list,
3297 struct svm_range, svm_bo_list);
3300 list_del_init(&prange->svm_bo_list);
3301 spin_unlock(&svm_bo->list_lock);
3303 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms,
3304 prange->start, prange->last);
3306 mutex_lock(&prange->migrate_mutex);
3308 r = svm_migrate_vram_to_ram(prange,
3309 svm_bo->eviction_fence->mm,
3310 KFD_MIGRATE_TRIGGER_TTM_EVICTION);
3311 } while (!r && prange->actual_loc && --retries);
3313 if (!r && prange->actual_loc)
3314 pr_info_once("Migration failed during eviction");
3316 if (!prange->actual_loc) {
3317 mutex_lock(&prange->lock);
3318 prange->svm_bo = NULL;
3319 mutex_unlock(&prange->lock);
3321 mutex_unlock(&prange->migrate_mutex);
3323 spin_lock(&svm_bo->list_lock);
3325 spin_unlock(&svm_bo->list_lock);
3326 mmap_read_unlock(mm);
3328 dma_fence_signal(&svm_bo->eviction_fence->base);
3330 /* This is the last reference to svm_bo, after svm_range_vram_node_free
3331 * has been called in svm_migrate_vram_to_ram
3333 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n");
3334 svm_range_bo_unref(svm_bo);
3338 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
3339 uint64_t start, uint64_t size, uint32_t nattr,
3340 struct kfd_ioctl_svm_attribute *attrs)
3342 struct amdkfd_process_info *process_info = p->kgd_process_info;
3343 struct list_head update_list;
3344 struct list_head insert_list;
3345 struct list_head remove_list;
3346 struct svm_range_list *svms;
3347 struct svm_range *prange;
3348 struct svm_range *next;
3349 bool update_mapping = false;
3353 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
3354 p->pasid, &p->svms, start, start + size - 1, size);
3356 r = svm_range_check_attr(p, nattr, attrs);
3362 mutex_lock(&process_info->lock);
3364 svm_range_list_lock_and_flush_work(svms, mm);
3366 r = svm_range_is_valid(p, start, size);
3368 pr_debug("invalid range r=%d\n", r);
3369 mmap_write_unlock(mm);
3373 mutex_lock(&svms->lock);
3375 /* Add new range and split existing ranges as needed */
3376 r = svm_range_add(p, start, size, nattr, attrs, &update_list,
3377 &insert_list, &remove_list);
3379 mutex_unlock(&svms->lock);
3380 mmap_write_unlock(mm);
3383 /* Apply changes as a transaction */
3384 list_for_each_entry_safe(prange, next, &insert_list, list) {
3385 svm_range_add_to_svms(prange);
3386 svm_range_add_notifier_locked(mm, prange);
3388 list_for_each_entry(prange, &update_list, update_list) {
3389 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
3390 /* TODO: unmap ranges from GPU that lost access */
3392 list_for_each_entry_safe(prange, next, &remove_list, update_list) {
3393 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
3394 prange->svms, prange, prange->start,
3396 svm_range_unlink(prange);
3397 svm_range_remove_notifier(prange);
3398 svm_range_free(prange, false);
3401 mmap_write_downgrade(mm);
3402 /* Trigger migrations and revalidate and map to GPUs as needed. If
3403 * this fails we may be left with partially completed actions. There
3404 * is no clean way of rolling back to the previous state in such a
3405 * case because the rollback wouldn't be guaranteed to work either.
3407 list_for_each_entry(prange, &update_list, update_list) {
3410 mutex_lock(&prange->migrate_mutex);
3412 r = svm_range_trigger_migration(mm, prange, &migrated);
3414 goto out_unlock_range;
3416 if (migrated && (!p->xnack_enabled ||
3417 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) &&
3418 prange->mapped_to_gpu) {
3419 pr_debug("restore_work will update mappings of GPUs\n");
3420 mutex_unlock(&prange->migrate_mutex);
3424 if (!migrated && !update_mapping) {
3425 mutex_unlock(&prange->migrate_mutex);
3429 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
3431 r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
3432 true, true, flush_tlb);
3434 pr_debug("failed %d to map svm range\n", r);
3437 mutex_unlock(&prange->migrate_mutex);
3442 svm_range_debug_dump(svms);
3444 mutex_unlock(&svms->lock);
3445 mmap_read_unlock(mm);
3447 mutex_unlock(&process_info->lock);
3449 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
3450 &p->svms, start, start + size - 1, r);
3456 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
3457 uint64_t start, uint64_t size, uint32_t nattr,
3458 struct kfd_ioctl_svm_attribute *attrs)
3460 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE);
3461 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE);
3462 bool get_preferred_loc = false;
3463 bool get_prefetch_loc = false;
3464 bool get_granularity = false;
3465 bool get_accessible = false;
3466 bool get_flags = false;
3467 uint64_t last = start + size - 1UL;
3468 uint8_t granularity = 0xff;
3469 struct interval_tree_node *node;
3470 struct svm_range_list *svms;
3471 struct svm_range *prange;
3472 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3473 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3474 uint32_t flags_and = 0xffffffff;
3475 uint32_t flags_or = 0;
3480 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
3481 start + size - 1, nattr);
3483 /* Flush pending deferred work to avoid racing with deferred actions from
3484 * previous memory map changes (e.g. munmap). Concurrent memory map changes
3485 * can still race with get_attr because we don't hold the mmap lock. But that
3486 * would be a race condition in the application anyway, and undefined
3487 * behaviour is acceptable in that case.
3489 flush_work(&p->svms.deferred_list_work);
3492 r = svm_range_is_valid(p, start, size);
3493 mmap_read_unlock(mm);
3495 pr_debug("invalid range r=%d\n", r);
3499 for (i = 0; i < nattr; i++) {
3500 switch (attrs[i].type) {
3501 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3502 get_preferred_loc = true;
3504 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3505 get_prefetch_loc = true;
3507 case KFD_IOCTL_SVM_ATTR_ACCESS:
3508 get_accessible = true;
3510 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3511 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3514 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3515 get_granularity = true;
3517 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
3518 case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
3521 pr_debug("get invalid attr type 0x%x\n", attrs[i].type);
3528 mutex_lock(&svms->lock);
3530 node = interval_tree_iter_first(&svms->objects, start, last);
3532 pr_debug("range attrs not found return default values\n");
3533 svm_range_set_default_attributes(&location, &prefetch_loc,
3534 &granularity, &flags_and);
3535 flags_or = flags_and;
3536 if (p->xnack_enabled)
3537 bitmap_copy(bitmap_access, svms->bitmap_supported,
3540 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE);
3541 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE);
3544 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE);
3545 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE);
3548 struct interval_tree_node *next;
3550 prange = container_of(node, struct svm_range, it_node);
3551 next = interval_tree_iter_next(node, start, last);
3553 if (get_preferred_loc) {
3554 if (prange->preferred_loc ==
3555 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3556 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3557 location != prange->preferred_loc)) {
3558 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3559 get_preferred_loc = false;
3561 location = prange->preferred_loc;
3564 if (get_prefetch_loc) {
3565 if (prange->prefetch_loc ==
3566 KFD_IOCTL_SVM_LOCATION_UNDEFINED ||
3567 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED &&
3568 prefetch_loc != prange->prefetch_loc)) {
3569 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
3570 get_prefetch_loc = false;
3572 prefetch_loc = prange->prefetch_loc;
3575 if (get_accessible) {
3576 bitmap_and(bitmap_access, bitmap_access,
3577 prange->bitmap_access, MAX_GPU_INSTANCE);
3578 bitmap_and(bitmap_aip, bitmap_aip,
3579 prange->bitmap_aip, MAX_GPU_INSTANCE);
3582 flags_and &= prange->flags;
3583 flags_or |= prange->flags;
3586 if (get_granularity && prange->granularity < granularity)
3587 granularity = prange->granularity;
3592 mutex_unlock(&svms->lock);
3594 for (i = 0; i < nattr; i++) {
3595 switch (attrs[i].type) {
3596 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC:
3597 attrs[i].value = location;
3599 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3600 attrs[i].value = prefetch_loc;
3602 case KFD_IOCTL_SVM_ATTR_ACCESS:
3603 gpuidx = kfd_process_gpuidx_from_gpuid(p,
3606 pr_debug("invalid gpuid %x\n", attrs[i].value);
3609 if (test_bit(gpuidx, bitmap_access))
3610 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS;
3611 else if (test_bit(gpuidx, bitmap_aip))
3613 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE;
3615 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
3617 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3618 attrs[i].value = flags_and;
3620 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
3621 attrs[i].value = ~flags_or;
3623 case KFD_IOCTL_SVM_ATTR_GRANULARITY:
3624 attrs[i].value = (uint32_t)granularity;
3632 int kfd_criu_resume_svm(struct kfd_process *p)
3634 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL;
3635 int nattr_common = 4, nattr_accessibility = 1;
3636 struct criu_svm_metadata *criu_svm_md = NULL;
3637 struct svm_range_list *svms = &p->svms;
3638 struct criu_svm_metadata *next = NULL;
3639 uint32_t set_flags = 0xffffffff;
3640 int i, j, num_attrs, ret = 0;
3641 uint64_t set_attr_size;
3642 struct mm_struct *mm;
3644 if (list_empty(&svms->criu_svm_metadata_list)) {
3645 pr_debug("No SVM data from CRIU restore stage 2\n");
3649 mm = get_task_mm(p->lead_thread);
3651 pr_err("failed to get mm for the target process\n");
3655 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds);
3658 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) {
3659 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n",
3660 i, criu_svm_md->data.start_addr, criu_svm_md->data.size);
3662 for (j = 0; j < num_attrs; j++) {
3663 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n",
3664 i, j, criu_svm_md->data.attrs[j].type,
3665 i, j, criu_svm_md->data.attrs[j].value);
3666 switch (criu_svm_md->data.attrs[j].type) {
3667 /* During Checkpoint operation, the query for
3668 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might
3669 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were
3670 * not used by the range which was checkpointed. Care
3671 * must be taken to not restore with an invalid value
3672 * otherwise the gpuidx value will be invalid and
3673 * set_attr would eventually fail so just replace those
3674 * with another dummy attribute such as
3675 * KFD_IOCTL_SVM_ATTR_SET_FLAGS.
3677 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC:
3678 if (criu_svm_md->data.attrs[j].value ==
3679 KFD_IOCTL_SVM_LOCATION_UNDEFINED) {
3680 criu_svm_md->data.attrs[j].type =
3681 KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3682 criu_svm_md->data.attrs[j].value = 0;
3685 case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
3686 set_flags = criu_svm_md->data.attrs[j].value;
3693 /* CLR_FLAGS is not available via get_attr during checkpoint but
3694 * it needs to be inserted before restoring the ranges so
3695 * allocate extra space for it before calling set_attr
3697 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3699 set_attr_new = krealloc(set_attr, set_attr_size,
3701 if (!set_attr_new) {
3705 set_attr = set_attr_new;
3707 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs *
3708 sizeof(struct kfd_ioctl_svm_attribute));
3709 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS;
3710 set_attr[num_attrs].value = ~set_flags;
3712 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
3713 criu_svm_md->data.size, num_attrs + 1,
3716 pr_err("CRIU: failed to set range attributes\n");
3724 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) {
3725 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n",
3726 criu_svm_md->data.start_addr);
3735 int kfd_criu_restore_svm(struct kfd_process *p,
3736 uint8_t __user *user_priv_ptr,
3737 uint64_t *priv_data_offset,
3738 uint64_t max_priv_data_size)
3740 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size;
3741 int nattr_common = 4, nattr_accessibility = 1;
3742 struct criu_svm_metadata *criu_svm_md = NULL;
3743 struct svm_range_list *svms = &p->svms;
3744 uint32_t num_devices;
3747 num_devices = p->n_pdds;
3748 /* Handle one SVM range object at a time, also the number of gpus are
3749 * assumed to be same on the restore node, checking must be done while
3750 * evaluating the topology earlier
3753 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) *
3754 (nattr_common + nattr_accessibility * num_devices);
3755 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size;
3757 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3760 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL);
3762 pr_err("failed to allocate memory to store svm metadata\n");
3765 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) {
3770 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset,
3771 svm_priv_data_size);
3776 *priv_data_offset += svm_priv_data_size;
3778 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list);
3788 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
3789 uint64_t *svm_priv_data_size)
3791 uint64_t total_size, accessibility_size, common_attr_size;
3792 int nattr_common = 4, nattr_accessibility = 1;
3793 int num_devices = p->n_pdds;
3794 struct svm_range_list *svms;
3795 struct svm_range *prange;
3798 *svm_priv_data_size = 0;
3804 mutex_lock(&svms->lock);
3805 list_for_each_entry(prange, &svms->list, list) {
3806 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n",
3807 prange, prange->start, prange->npages,
3808 prange->start + prange->npages - 1);
3811 mutex_unlock(&svms->lock);
3813 *num_svm_ranges = count;
3814 /* Only the accessbility attributes need to be queried for all the gpus
3815 * individually, remaining ones are spanned across the entire process
3816 * regardless of the various gpu nodes. Of the remaining attributes,
3817 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved.
3819 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC
3820 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC
3821 * KFD_IOCTL_SVM_ATTR_SET_FLAGS
3822 * KFD_IOCTL_SVM_ATTR_GRANULARITY
3824 * ** ACCESSBILITY ATTRIBUTES **
3825 * (Considered as one, type is altered during query, value is gpuid)
3826 * KFD_IOCTL_SVM_ATTR_ACCESS
3827 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE
3828 * KFD_IOCTL_SVM_ATTR_NO_ACCESS
3830 if (*num_svm_ranges > 0) {
3831 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3833 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) *
3834 nattr_accessibility * num_devices;
3836 total_size = sizeof(struct kfd_criu_svm_range_priv_data) +
3837 common_attr_size + accessibility_size;
3839 *svm_priv_data_size = *num_svm_ranges * total_size;
3842 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
3843 *svm_priv_data_size);
3847 int kfd_criu_checkpoint_svm(struct kfd_process *p,
3848 uint8_t __user *user_priv_data,
3849 uint64_t *priv_data_offset)
3851 struct kfd_criu_svm_range_priv_data *svm_priv = NULL;
3852 struct kfd_ioctl_svm_attribute *query_attr = NULL;
3853 uint64_t svm_priv_data_size, query_attr_size = 0;
3854 int index, nattr_common = 4, ret = 0;
3855 struct svm_range_list *svms;
3856 int num_devices = p->n_pdds;
3857 struct svm_range *prange;
3858 struct mm_struct *mm;
3864 mm = get_task_mm(p->lead_thread);
3866 pr_err("failed to get mm for the target process\n");
3870 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) *
3871 (nattr_common + num_devices);
3873 query_attr = kzalloc(query_attr_size, GFP_KERNEL);
3879 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC;
3880 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC;
3881 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS;
3882 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY;
3884 for (index = 0; index < num_devices; index++) {
3885 struct kfd_process_device *pdd = p->pdds[index];
3887 query_attr[index + nattr_common].type =
3888 KFD_IOCTL_SVM_ATTR_ACCESS;
3889 query_attr[index + nattr_common].value = pdd->user_gpu_id;
3892 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size;
3894 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL);
3901 list_for_each_entry(prange, &svms->list, list) {
3903 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE;
3904 svm_priv->start_addr = prange->start;
3905 svm_priv->size = prange->npages;
3906 memcpy(&svm_priv->attrs, query_attr, query_attr_size);
3907 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n",
3908 prange, prange->start, prange->npages,
3909 prange->start + prange->npages - 1,
3910 prange->npages * PAGE_SIZE);
3912 ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
3914 (nattr_common + num_devices),
3917 pr_err("CRIU: failed to obtain range attributes\n");
3921 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv,
3922 svm_priv_data_size)) {
3923 pr_err("Failed to copy svm priv to user\n");
3928 *priv_data_offset += svm_priv_data_size;
3943 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
3944 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs)
3946 struct mm_struct *mm = current->mm;
3949 start >>= PAGE_SHIFT;
3950 size >>= PAGE_SHIFT;
3953 case KFD_IOCTL_SVM_OP_SET_ATTR:
3954 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
3956 case KFD_IOCTL_SVM_OP_GET_ATTR:
3957 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);