mm/memory.c: fix race when faulting a device private page
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_migrate.c
CommitLineData
814ab993
PY
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
814ab993
PY
23#include <linux/types.h>
24#include <linux/hmm.h>
25#include <linux/dma-direction.h>
26#include <linux/dma-mapping.h>
730ff521 27#include <linux/migrate.h>
814ab993
PY
28#include "amdgpu_sync.h"
29#include "amdgpu_object.h"
30#include "amdgpu_vm.h"
31#include "amdgpu_mn.h"
2fdcb55d 32#include "amdgpu_res_cursor.h"
814ab993
PY
33#include "kfd_priv.h"
34#include "kfd_svm.h"
35#include "kfd_migrate.h"
acac270d 36#include "kfd_smi_events.h"
814ab993 37
a273bc99
PY
38#ifdef dev_fmt
39#undef dev_fmt
40#endif
7f161df1 41#define dev_fmt(fmt) "kfd_migrate: " fmt
a273bc99 42
50ea50cf
PY
43static uint64_t
44svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
45{
46 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
47}
48
49static int
50svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
51 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
52{
53 struct amdgpu_device *adev = ring->adev;
54 struct amdgpu_job *job;
55 unsigned int num_dw, num_bytes;
56 struct dma_fence *fence;
57 uint64_t src_addr, dst_addr;
58 uint64_t pte_flags;
59 void *cpu_addr;
60 int r;
61
62 /* use gart window 0 */
63 *gart_addr = adev->gmc.gart_start;
64
65 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
66 num_bytes = npages * 8;
67
68 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
69 AMDGPU_IB_POOL_DELAYED, &job);
70 if (r)
71 return r;
72
73 src_addr = num_dw * 4;
74 src_addr += job->ibs[0].gpu_addr;
75
76 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
77 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
78 dst_addr, num_bytes, false);
79
80 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
81 WARN_ON(job->ibs[0].length_dw > num_dw);
82
83 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
84 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
85 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
86 pte_flags |= AMDGPU_PTE_WRITEABLE;
87 pte_flags |= adev->gart.gart_pte_flags;
88
89 cpu_addr = &job->ibs[0].ptr[num_dw];
90
1b08dfb8 91 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
50ea50cf
PY
92 r = amdgpu_job_submit(job, &adev->mman.entity,
93 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
94 if (r)
95 goto error_free;
96
97 dma_fence_put(fence);
98
99 return r;
100
101error_free:
102 amdgpu_job_free(job);
103 return r;
104}
105
106/**
107 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
108 *
109 * @adev: amdgpu device the sdma ring running
bbe04dec
IB
110 * @sys: system DMA pointer to be copied
111 * @vram: vram destination DMA pointer
50ea50cf
PY
112 * @npages: number of pages to copy
113 * @direction: enum MIGRATION_COPY_DIR
114 * @mfence: output, sdma fence to signal after sdma is done
115 *
116 * ram address uses GART table continuous entries mapping to ram pages,
117 * vram address uses direct mapping of vram pages, which must have npages
118 * number of continuous pages.
119 * GART update and sdma uses same buf copy function ring, sdma is splited to
120 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
121 * the last sdma finish fence which is returned to check copy memory is done.
122 *
123 * Context: Process context, takes and releases gtt_window_lock
124 *
125 * Return:
126 * 0 - OK, otherwise error code
127 */
128
129static int
130svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
131 uint64_t *vram, uint64_t npages,
132 enum MIGRATION_COPY_DIR direction,
133 struct dma_fence **mfence)
134{
135 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
136 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
137 uint64_t gart_s, gart_d;
138 struct dma_fence *next;
139 uint64_t size;
140 int r;
141
142 mutex_lock(&adev->mman.gtt_window_lock);
143
144 while (npages) {
145 size = min(GTT_MAX_PAGES, npages);
146
147 if (direction == FROM_VRAM_TO_RAM) {
148 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
149 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
150
151 } else if (direction == FROM_RAM_TO_VRAM) {
152 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
153 KFD_IOCTL_SVM_FLAG_GPU_RO);
154 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
155 }
156 if (r) {
a273bc99 157 dev_err(adev->dev, "fail %d create gart mapping\n", r);
50ea50cf
PY
158 goto out_unlock;
159 }
160
161 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
162 NULL, &next, false, true, false);
163 if (r) {
a273bc99 164 dev_err(adev->dev, "fail %d to copy memory\n", r);
50ea50cf
PY
165 goto out_unlock;
166 }
167
168 dma_fence_put(*mfence);
169 *mfence = next;
170 npages -= size;
171 if (npages) {
172 sys += size;
173 vram += size;
174 }
175 }
176
177out_unlock:
178 mutex_unlock(&adev->mman.gtt_window_lock);
179
180 return r;
181}
182
183/**
184 * svm_migrate_copy_done - wait for memory copy sdma is done
185 *
186 * @adev: amdgpu device the sdma memory copy is executing on
187 * @mfence: migrate fence
188 *
189 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
190 * operations, this is the last sdma operation fence.
191 *
192 * Context: called after svm_migrate_copy_memory
193 *
194 * Return:
195 * 0 - success
196 * otherwise - error code from dma fence signal
197 */
48ff079b 198static int
50ea50cf
PY
199svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
200{
201 int r = 0;
202
203 if (mfence) {
204 r = dma_fence_wait(mfence, false);
205 dma_fence_put(mfence);
206 pr_debug("sdma copy memory fence done\n");
207 }
208
209 return r;
210}
211
0b0e518d
FK
212unsigned long
213svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
214{
215 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
216}
217
218static void
219svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
220{
221 struct page *page;
222
223 page = pfn_to_page(pfn);
7981ec65
AS
224 svm_range_bo_ref(prange->svm_bo);
225 page->zone_device_data = prange->svm_bo;
0b0e518d
FK
226 lock_page(page);
227}
228
229static void
230svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
231{
232 struct page *page;
233
234 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
235 unlock_page(page);
236 put_page(page);
237}
238
48ff079b
FK
239static unsigned long
240svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
241{
242 unsigned long addr;
243
244 addr = page_to_pfn(page) << PAGE_SHIFT;
245 return (addr - adev->kfd.dev->pgmap.range.start);
246}
247
248static struct page *
249svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
250{
251 struct page *page;
252
253 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
254 if (page)
255 lock_page(page);
256
257 return page;
258}
259
2e4ec251 260static void svm_migrate_put_sys_page(unsigned long addr)
48ff079b
FK
261{
262 struct page *page;
263
264 page = pfn_to_page(addr >> PAGE_SHIFT);
265 unlock_page(page);
266 put_page(page);
267}
0b0e518d 268
33c6bd98
PY
269static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
270{
271 unsigned long cpages = 0;
272 unsigned long i;
273
274 for (i = 0; i < migrate->npages; i++) {
275 if (migrate->src[i] & MIGRATE_PFN_VALID &&
276 migrate->src[i] & MIGRATE_PFN_MIGRATE)
277 cpages++;
278 }
279 return cpages;
280}
281
740a451b
FK
282static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
283{
284 unsigned long upages = 0;
285 unsigned long i;
286
287 for (i = 0; i < migrate->npages; i++) {
288 if (migrate->src[i] & MIGRATE_PFN_VALID &&
289 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
290 upages++;
291 }
292 return upages;
293}
294
0b0e518d
FK
295static int
296svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
297 struct migrate_vma *migrate, struct dma_fence **mfence,
298 dma_addr_t *scratch)
299{
88467db6 300 uint64_t npages = migrate->npages;
0b0e518d 301 struct device *dev = adev->dev;
2fdcb55d 302 struct amdgpu_res_cursor cursor;
0b0e518d
FK
303 dma_addr_t *src;
304 uint64_t *dst;
0b0e518d 305 uint64_t i, j;
a40eb089 306 int r;
0b0e518d
FK
307
308 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
309 prange->last);
310
311 src = scratch;
312 dst = (uint64_t *)(scratch + npages);
313
314 r = svm_range_vram_node_new(adev, prange, true);
315 if (r) {
7f161df1 316 dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
0b0e518d
FK
317 goto out;
318 }
319
2fdcb55d
CK
320 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
321 npages << PAGE_SHIFT, &cursor);
0b0e518d
FK
322 for (i = j = 0; i < npages; i++) {
323 struct page *spage;
324
6ffecc94
AS
325 spage = migrate_pfn_to_page(migrate->src[i]);
326 if (spage && !is_zone_device_page(spage)) {
327 dst[i] = cursor.start + (j << PAGE_SHIFT);
328 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
329 svm_migrate_get_vram_page(prange, migrate->dst[i]);
330 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
0b0e518d
FK
331 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
332 DMA_TO_DEVICE);
333 r = dma_mapping_error(dev, src[i]);
334 if (r) {
7f161df1
AS
335 dev_err(adev->dev, "%s: fail %d dma_map_page\n",
336 __func__, r);
0b0e518d
FK
337 goto out_free_vram_pages;
338 }
339 } else {
340 if (j) {
341 r = svm_migrate_copy_memory_gart(
342 adev, src + i - j,
343 dst + i - j, j,
344 FROM_RAM_TO_VRAM,
345 mfence);
346 if (r)
347 goto out_free_vram_pages;
88467db6 348 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
0b0e518d
FK
349 j = 0;
350 } else {
2fdcb55d 351 amdgpu_res_next(&cursor, PAGE_SIZE);
0b0e518d
FK
352 }
353 continue;
354 }
355
a273bc99
PY
356 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
357 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
0b0e518d 358
2fdcb55d 359 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
0b0e518d
FK
360 r = svm_migrate_copy_memory_gart(adev, src + i - j,
361 dst + i - j, j + 1,
362 FROM_RAM_TO_VRAM,
363 mfence);
364 if (r)
365 goto out_free_vram_pages;
2fdcb55d 366 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
2243f493 367 j = 0;
0b0e518d
FK
368 } else {
369 j++;
370 }
371 }
372
373 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
374 FROM_RAM_TO_VRAM, mfence);
375
376out_free_vram_pages:
377 if (r) {
378 pr_debug("failed %d to copy memory to vram\n", r);
379 while (i--) {
380 svm_migrate_put_vram_page(adev, dst[i]);
381 migrate->dst[i] = 0;
382 }
383 }
384
3bf8282c
AS
385#ifdef DEBUG_FORCE_MIXED_DOMAINS
386 for (i = 0, j = 0; i < npages; i += 4, j++) {
387 if (j & 1)
388 continue;
389 svm_migrate_put_vram_page(adev, dst[i]);
390 migrate->dst[i] = 0;
391 svm_migrate_put_vram_page(adev, dst[i + 1]);
392 migrate->dst[i + 1] = 0;
393 svm_migrate_put_vram_page(adev, dst[i + 2]);
394 migrate->dst[i + 2] = 0;
395 svm_migrate_put_vram_page(adev, dst[i + 3]);
396 migrate->dst[i + 3] = 0;
397 }
398#endif
0b0e518d
FK
399out:
400 return r;
401}
402
ca432dcc 403static long
0b0e518d
FK
404svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
405 struct vm_area_struct *vma, uint64_t start,
acac270d 406 uint64_t end, uint32_t trigger)
0b0e518d 407{
acac270d 408 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
0b0e518d 409 uint64_t npages = (end - start) >> PAGE_SHIFT;
d4ebc200 410 struct kfd_process_device *pdd;
0b0e518d 411 struct dma_fence *mfence = NULL;
16ce101d 412 struct migrate_vma migrate = { 0 };
75fa98d6 413 unsigned long cpages = 0;
0b0e518d 414 dma_addr_t *scratch;
0b0e518d
FK
415 void *buf;
416 int r = -ENOMEM;
0b0e518d
FK
417
418 memset(&migrate, 0, sizeof(migrate));
419 migrate.vma = vma;
420 migrate.start = start;
421 migrate.end = end;
422 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
3a61dae8 423 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
0b0e518d 424
cc9d82fc
YW
425 buf = kvcalloc(npages,
426 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
427 GFP_KERNEL);
0b0e518d
FK
428 if (!buf)
429 goto out;
430
431 migrate.src = buf;
432 migrate.dst = migrate.src + npages;
433 scratch = (dma_addr_t *)(migrate.dst + npages);
434
acac270d
PY
435 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
436 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
437 0, adev->kfd.dev->id, prange->prefetch_loc,
438 prange->preferred_loc, trigger);
439
0b0e518d
FK
440 r = migrate_vma_setup(&migrate);
441 if (r) {
7f161df1
AS
442 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
443 __func__, r, prange->start, prange->last);
0b0e518d
FK
444 goto out_free;
445 }
0b0e518d 446
75fa98d6
PY
447 cpages = migrate.cpages;
448 if (!cpages) {
ca432dcc
PY
449 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
450 prange->start, prange->last);
451 goto out_free;
0b0e518d 452 }
75fa98d6
PY
453 if (cpages != npages)
454 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
455 cpages, npages);
456 else
457 pr_debug("0x%lx pages migrated\n", cpages);
0b0e518d 458
ca432dcc
PY
459 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
460 migrate_vma_pages(&migrate);
33c6bd98
PY
461
462 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
463 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
464
ca432dcc
PY
465 svm_migrate_copy_done(adev, mfence);
466 migrate_vma_finalize(&migrate);
467
acac270d
PY
468 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
469 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
470 0, adev->kfd.dev->id, trigger);
471
0b0e518d
FK
472 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
473 svm_range_free_dma_mappings(prange);
474
475out_free:
476 kvfree(buf);
477out:
75fa98d6 478 if (!r && cpages) {
d4ebc200
PY
479 pdd = svm_range_get_pdd_by_adev(prange, adev);
480 if (pdd)
75fa98d6 481 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
d4ebc200 482
75fa98d6 483 return cpages;
ca432dcc 484 }
0b0e518d
FK
485 return r;
486}
487
488/**
489 * svm_migrate_ram_to_vram - migrate svm range from system to device
490 * @prange: range structure
491 * @best_loc: the device to migrate to
cda0f85b 492 * @mm: the process mm structure
acac270d 493 * @trigger: reason of migration
0b0e518d
FK
494 *
495 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
496 *
497 * Return:
498 * 0 - OK, otherwise error code
499 */
1a3b2b5d
FK
500static int
501svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
acac270d 502 struct mm_struct *mm, uint32_t trigger)
0b0e518d
FK
503{
504 unsigned long addr, start, end;
505 struct vm_area_struct *vma;
506 struct amdgpu_device *adev;
ca432dcc
PY
507 unsigned long cpages = 0;
508 long r = 0;
0b0e518d
FK
509
510 if (prange->actual_loc == best_loc) {
511 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
512 prange->svms, prange->start, prange->last, best_loc);
513 return 0;
514 }
515
516 adev = svm_range_get_adev_by_id(prange, best_loc);
517 if (!adev) {
518 pr_debug("failed to get device by id 0x%x\n", best_loc);
519 return -ENODEV;
520 }
521
522 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
523 prange->start, prange->last, best_loc);
524
525 /* FIXME: workaround for page locking bug with invalid pages */
a010d98a 526 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev));
0b0e518d
FK
527
528 start = prange->start << PAGE_SHIFT;
529 end = (prange->last + 1) << PAGE_SHIFT;
530
0b0e518d
FK
531 for (addr = start; addr < end;) {
532 unsigned long next;
533
534 vma = find_vma(mm, addr);
535 if (!vma || addr < vma->vm_start)
536 break;
537
538 next = min(vma->vm_end, end);
acac270d 539 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
ca432dcc
PY
540 if (r < 0) {
541 pr_debug("failed %ld to migrate\n", r);
0b0e518d 542 break;
ca432dcc
PY
543 } else {
544 cpages += r;
0b0e518d
FK
545 }
546 addr = next;
547 }
548
ca432dcc 549 if (cpages)
0b0e518d
FK
550 prange->actual_loc = best_loc;
551
ca432dcc 552 return r < 0 ? r : 0;
0b0e518d
FK
553}
554
814ab993
PY
555static void svm_migrate_page_free(struct page *page)
556{
7981ec65
AS
557 struct svm_range_bo *svm_bo = page->zone_device_data;
558
559 if (svm_bo) {
a273bc99 560 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
69879b30 561 svm_range_bo_unref_async(svm_bo);
7981ec65 562 }
48ff079b
FK
563}
564
565static int
566svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
567 struct migrate_vma *migrate, struct dma_fence **mfence,
1ade5f84 568 dma_addr_t *scratch, uint64_t npages)
48ff079b 569{
48ff079b
FK
570 struct device *dev = adev->dev;
571 uint64_t *src;
572 dma_addr_t *dst;
573 struct page *dpage;
574 uint64_t i = 0, j;
575 uint64_t addr;
576 int r = 0;
577
578 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
579 prange->last);
580
581 addr = prange->start << PAGE_SHIFT;
582
583 src = (uint64_t *)(scratch + npages);
584 dst = scratch;
585
1ade5f84 586 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
48ff079b
FK
587 struct page *spage;
588
589 spage = migrate_pfn_to_page(migrate->src[i]);
1ade5f84
AS
590 if (!spage || !is_zone_device_page(spage)) {
591 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
48ff079b 592 prange->svms, prange->start, prange->last);
1ade5f84
AS
593 if (j) {
594 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
595 src + i - j, j,
596 FROM_VRAM_TO_RAM,
597 mfence);
598 if (r)
599 goto out_oom;
600 j = 0;
601 }
602 continue;
48ff079b
FK
603 }
604 src[i] = svm_migrate_addr(adev, spage);
88467db6 605 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
48ff079b
FK
606 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
607 src + i - j, j,
608 FROM_VRAM_TO_RAM,
609 mfence);
610 if (r)
611 goto out_oom;
612 j = 0;
613 }
614
615 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
616 if (!dpage) {
617 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
618 prange->svms, prange->start, prange->last);
619 r = -ENOMEM;
620 goto out_oom;
621 }
622
623 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
624 r = dma_mapping_error(dev, dst[i]);
625 if (r) {
7f161df1 626 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
48ff079b
FK
627 goto out_oom;
628 }
629
a273bc99
PY
630 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
631 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
48ff079b
FK
632
633 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
1ade5f84 634 j++;
48ff079b
FK
635 }
636
637 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
638 FROM_VRAM_TO_RAM, mfence);
639
640out_oom:
641 if (r) {
642 pr_debug("failed %d copy to ram\n", r);
643 while (i--) {
644 svm_migrate_put_sys_page(dst[i]);
645 migrate->dst[i] = 0;
646 }
647 }
648
649 return r;
650}
651
9527b9ca
PY
652/**
653 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
654 *
655 * @adev: amdgpu device to migrate from
656 * @prange: svm range structure
657 * @vma: vm_area_struct that range [start, end] belongs to
658 * @start: range start virtual address in pages
659 * @end: range end virtual address in pages
660 *
661 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
662 *
663 * Return:
664 * 0 - success with all pages migrated
665 * negative values - indicate error
666 * positive values - partial migration, number of pages not migrated
667 */
ca432dcc 668static long
48ff079b 669svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
acac270d 670 struct vm_area_struct *vma, uint64_t start, uint64_t end,
16ce101d 671 uint32_t trigger, struct page *fault_page)
48ff079b 672{
acac270d 673 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
48ff079b 674 uint64_t npages = (end - start) >> PAGE_SHIFT;
740a451b
FK
675 unsigned long upages = npages;
676 unsigned long cpages = 0;
d4ebc200 677 struct kfd_process_device *pdd;
48ff079b 678 struct dma_fence *mfence = NULL;
16ce101d 679 struct migrate_vma migrate = { 0 };
48ff079b 680 dma_addr_t *scratch;
48ff079b
FK
681 void *buf;
682 int r = -ENOMEM;
683
684 memset(&migrate, 0, sizeof(migrate));
685 migrate.vma = vma;
686 migrate.start = start;
687 migrate.end = end;
3a61dae8 688 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
c83dee9b
AS
689 if (adev->gmc.xgmi.connected_to_cpu)
690 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
691 else
692 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
48ff079b 693
cc9d82fc
YW
694 buf = kvcalloc(npages,
695 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
696 GFP_KERNEL);
48ff079b
FK
697 if (!buf)
698 goto out;
699
700 migrate.src = buf;
701 migrate.dst = migrate.src + npages;
16ce101d 702 migrate.fault_page = fault_page;
48ff079b
FK
703 scratch = (dma_addr_t *)(migrate.dst + npages);
704
acac270d
PY
705 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
706 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
707 adev->kfd.dev->id, 0, prange->prefetch_loc,
708 prange->preferred_loc, trigger);
709
48ff079b
FK
710 r = migrate_vma_setup(&migrate);
711 if (r) {
7f161df1
AS
712 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
713 __func__, r, prange->start, prange->last);
48ff079b
FK
714 goto out_free;
715 }
716
75fa98d6
PY
717 cpages = migrate.cpages;
718 if (!cpages) {
48ff079b
FK
719 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
720 prange->start, prange->last);
740a451b 721 upages = svm_migrate_unsuccessful_pages(&migrate);
ca432dcc 722 goto out_free;
48ff079b 723 }
75fa98d6
PY
724 if (cpages != npages)
725 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
726 cpages, npages);
727 else
728 pr_debug("0x%lx pages migrated\n", cpages);
48ff079b 729
ca432dcc
PY
730 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
731 scratch, npages);
732 migrate_vma_pages(&migrate);
33c6bd98 733
740a451b
FK
734 upages = svm_migrate_unsuccessful_pages(&migrate);
735 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
736 upages, cpages, migrate.npages);
33c6bd98 737
ca432dcc
PY
738 svm_migrate_copy_done(adev, mfence);
739 migrate_vma_finalize(&migrate);
acac270d
PY
740
741 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
742 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
743 adev->kfd.dev->id, 0, trigger);
744
48ff079b
FK
745 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
746
747out_free:
748 kvfree(buf);
749out:
75fa98d6 750 if (!r && cpages) {
d4ebc200
PY
751 pdd = svm_range_get_pdd_by_adev(prange, adev);
752 if (pdd)
75fa98d6 753 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
d4ebc200 754 }
740a451b 755 return r ? r : upages;
48ff079b
FK
756}
757
758/**
759 * svm_migrate_vram_to_ram - migrate svm range from device to system
760 * @prange: range structure
761 * @mm: process mm, use current->mm if NULL
acac270d 762 * @trigger: reason of migration
48ff079b 763 *
9527b9ca 764 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
48ff079b
FK
765 *
766 * Return:
767 * 0 - OK, otherwise error code
768 */
acac270d 769int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
16ce101d 770 uint32_t trigger, struct page *fault_page)
48ff079b
FK
771{
772 struct amdgpu_device *adev;
773 struct vm_area_struct *vma;
774 unsigned long addr;
775 unsigned long start;
776 unsigned long end;
740a451b 777 unsigned long upages = 0;
ca432dcc 778 long r = 0;
48ff079b
FK
779
780 if (!prange->actual_loc) {
781 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
782 prange->start, prange->last);
783 return 0;
784 }
785
786 adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
787 if (!adev) {
788 pr_debug("failed to get device by id 0x%x\n",
789 prange->actual_loc);
790 return -ENODEV;
791 }
792
793 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
794 prange->svms, prange, prange->start, prange->last,
795 prange->actual_loc);
796
797 start = prange->start << PAGE_SHIFT;
798 end = (prange->last + 1) << PAGE_SHIFT;
799
800 for (addr = start; addr < end;) {
801 unsigned long next;
802
803 vma = find_vma(mm, addr);
9527b9ca
PY
804 if (!vma || addr < vma->vm_start) {
805 pr_debug("failed to find vma for prange %p\n", prange);
806 r = -EFAULT;
48ff079b 807 break;
9527b9ca 808 }
48ff079b
FK
809
810 next = min(vma->vm_end, end);
16ce101d
AP
811 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
812 fault_page);
ca432dcc 813 if (r < 0) {
9527b9ca 814 pr_debug("failed %ld to migrate prange %p\n", r, prange);
48ff079b 815 break;
ca432dcc 816 } else {
740a451b 817 upages += r;
48ff079b
FK
818 }
819 addr = next;
820 }
821
9527b9ca 822 if (r >= 0 && !upages) {
48ff079b
FK
823 svm_range_vram_node_free(prange);
824 prange->actual_loc = 0;
825 }
ca432dcc
PY
826
827 return r < 0 ? r : 0;
814ab993
PY
828}
829
1a3b2b5d
FK
830/**
831 * svm_migrate_vram_to_vram - migrate svm range from device to device
832 * @prange: range structure
833 * @best_loc: the device to migrate to
834 * @mm: process mm, use current->mm if NULL
acac270d 835 * @trigger: reason of migration
1a3b2b5d
FK
836 *
837 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
838 *
839 * Return:
840 * 0 - OK, otherwise error code
841 */
842static int
843svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
acac270d 844 struct mm_struct *mm, uint32_t trigger)
1a3b2b5d 845{
740a451b 846 int r, retries = 3;
1a3b2b5d
FK
847
848 /*
849 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
850 * system memory as migration bridge
851 */
852
853 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
854
740a451b 855 do {
16ce101d 856 r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
740a451b
FK
857 if (r)
858 return r;
859 } while (prange->actual_loc && --retries);
860
861 if (prange->actual_loc)
862 return -EDEADLK;
1a3b2b5d 863
acac270d 864 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
1a3b2b5d
FK
865}
866
867int
868svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
acac270d 869 struct mm_struct *mm, uint32_t trigger)
1a3b2b5d
FK
870{
871 if (!prange->actual_loc)
acac270d 872 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
1a3b2b5d 873 else
acac270d 874 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
1a3b2b5d
FK
875
876}
877
814ab993
PY
878/**
879 * svm_migrate_to_ram - CPU page fault handler
880 * @vmf: CPU vm fault vma, address
881 *
48ff079b 882 * Context: vm fault handler, caller holds the mmap read lock
814ab993
PY
883 *
884 * Return:
885 * 0 - OK
886 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
887 */
888static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
889{
48ff079b
FK
890 unsigned long addr = vmf->address;
891 struct vm_area_struct *vma;
892 enum svm_work_list_ops op;
893 struct svm_range *parent;
894 struct svm_range *prange;
895 struct kfd_process *p;
896 struct mm_struct *mm;
897 int r = 0;
898
899 vma = vmf->vma;
900 mm = vma->vm_mm;
901
902 p = kfd_lookup_process_by_mm(vma->vm_mm);
903 if (!p) {
904 pr_debug("failed find process at fault address 0x%lx\n", addr);
905 return VM_FAULT_SIGBUS;
906 }
a6283010
AS
907 if (READ_ONCE(p->svms.faulting_task) == current) {
908 pr_debug("skipping ram migration\n");
909 kfd_unref_process(p);
910 return 0;
911 }
48ff079b
FK
912 addr >>= PAGE_SHIFT;
913 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
914
915 mutex_lock(&p->svms.lock);
916
917 prange = svm_range_from_addr(&p->svms, addr, &parent);
918 if (!prange) {
919 pr_debug("cannot find svm range at 0x%lx\n", addr);
920 r = -EFAULT;
921 goto out;
922 }
923
924 mutex_lock(&parent->migrate_mutex);
925 if (prange != parent)
926 mutex_lock_nested(&prange->migrate_mutex, 1);
927
928 if (!prange->actual_loc)
929 goto out_unlock_prange;
930
931 svm_range_lock(parent);
932 if (prange != parent)
933 mutex_lock_nested(&prange->lock, 1);
934 r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
935 if (prange != parent)
936 mutex_unlock(&prange->lock);
937 svm_range_unlock(parent);
938 if (r) {
939 pr_debug("failed %d to split range by granularity\n", r);
940 goto out_unlock_prange;
941 }
942
16ce101d
AP
943 r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
944 vmf->page);
48ff079b
FK
945 if (r)
946 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
947 prange, prange->start, prange->last);
948
90d7d3ed
FK
949 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
950 if (p->xnack_enabled && parent == prange)
951 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
952 else
953 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
48ff079b
FK
954 svm_range_add_list_work(&p->svms, parent, mm, op);
955 schedule_deferred_list_work(&p->svms);
956
957out_unlock_prange:
958 if (prange != parent)
959 mutex_unlock(&prange->migrate_mutex);
960 mutex_unlock(&parent->migrate_mutex);
961out:
962 mutex_unlock(&p->svms.lock);
963 kfd_unref_process(p);
964
965 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
966
967 return r ? VM_FAULT_SIGBUS : 0;
814ab993
PY
968}
969
970static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
971 .page_free = svm_migrate_page_free,
972 .migrate_to_ram = svm_migrate_to_ram,
973};
974
c46ebb6a
PY
975/* Each VRAM page uses sizeof(struct page) on system memory */
976#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
977
814ab993
PY
978int svm_migrate_init(struct amdgpu_device *adev)
979{
980 struct kfd_dev *kfddev = adev->kfd.dev;
981 struct dev_pagemap *pgmap;
c83dee9b 982 struct resource *res = NULL;
814ab993
PY
983 unsigned long size;
984 void *r;
985
986 /* Page migration works on Vega10 or newer */
046e674b 987 if (!KFD_IS_SOC15(kfddev))
814ab993
PY
988 return -EINVAL;
989
990 pgmap = &kfddev->pgmap;
991 memset(pgmap, 0, sizeof(*pgmap));
992
993 /* TODO: register all vram to HMM for now.
994 * should remove reserved size
995 */
996 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
c83dee9b
AS
997 if (adev->gmc.xgmi.connected_to_cpu) {
998 pgmap->range.start = adev->gmc.aper_base;
999 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1000 pgmap->type = MEMORY_DEVICE_COHERENT;
1001 } else {
1002 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1003 if (IS_ERR(res))
1004 return -ENOMEM;
1005 pgmap->range.start = res->start;
1006 pgmap->range.end = res->end;
1007 pgmap->type = MEMORY_DEVICE_PRIVATE;
1008 }
814ab993 1009
814ab993 1010 pgmap->nr_range = 1;
814ab993 1011 pgmap->ops = &svm_migrate_pgmap_ops;
3a61dae8 1012 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
c83dee9b 1013 pgmap->flags = 0;
22f4f4fa
PY
1014 /* Device manager releases device-specific resources, memory region and
1015 * pgmap when driver disconnects from device.
1016 */
814ab993
PY
1017 r = devm_memremap_pages(adev->dev, pgmap);
1018 if (IS_ERR(r)) {
1019 pr_err("failed to register HMM device memory\n");
586d71a4
PY
1020 /* Disable SVM support capability */
1021 pgmap->type = 0;
c83dee9b
AS
1022 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1023 devm_release_mem_region(adev->dev, res->start,
1024 res->end - res->start + 1);
814ab993
PY
1025 return PTR_ERR(r);
1026 }
1027
c46ebb6a
PY
1028 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1029 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1030
1031 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1032
4959e609
PY
1033 svm_range_set_max_pages(adev);
1034
814ab993
PY
1035 pr_info("HMM registered %ldMB device memory\n", size >> 20);
1036
1037 return 0;
1038}