2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/if000c.h>
33 #include <nvif/if500b.h>
34 #include <nvif/if900b.h>
35 #include <nvif/if000c.h>
37 #include <linux/sched/mm.h>
38 #include <linux/hmm.h>
41 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
42 * it in vram while in use. We likely want to overhaul memory management for
43 * nouveau to be more page like (not necessarily with system page size but a
44 * bigger page size) at lowest level and have some shim layer on top that would
45 * provide the same functionality as TTM.
47 #define DMEM_CHUNK_SIZE (2UL << 20)
48 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
56 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
57 enum nouveau_aper, u64 dst_addr,
58 enum nouveau_aper, u64 src_addr);
59 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
60 enum nouveau_aper, u64 dst_addr);
62 struct nouveau_dmem_chunk {
63 struct list_head list;
64 struct nouveau_bo *bo;
65 struct nouveau_drm *drm;
66 unsigned long callocated;
67 struct dev_pagemap pagemap;
70 struct nouveau_dmem_migrate {
71 nouveau_migrate_copy_t copy_func;
72 nouveau_clear_page_t clear_func;
73 struct nouveau_channel *chan;
77 struct nouveau_drm *drm;
78 struct nouveau_dmem_migrate migrate;
79 struct list_head chunks;
81 struct page *free_pages;
85 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
87 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
90 static struct nouveau_drm *page_to_drm(struct page *page)
92 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
97 unsigned long nouveau_dmem_page_addr(struct page *page)
99 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
100 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
101 chunk->pagemap.res.start;
103 return chunk->bo->bo.offset + off;
106 static void nouveau_dmem_page_free(struct page *page)
108 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
109 struct nouveau_dmem *dmem = chunk->drm->dmem;
111 spin_lock(&dmem->lock);
112 page->zone_device_data = dmem->free_pages;
113 dmem->free_pages = page;
115 WARN_ON(!chunk->callocated);
118 * FIXME when chunk->callocated reach 0 we should add the chunk to
119 * a reclaim list so that it can be freed in case of memory pressure.
121 spin_unlock(&dmem->lock);
124 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
127 nouveau_fence_wait(*fence, true, false);
128 nouveau_fence_unref(fence);
131 * FIXME wait for channel to be IDLE before calling finalizing
137 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
138 struct vm_fault *vmf, struct migrate_vma *args,
139 dma_addr_t *dma_addr)
141 struct device *dev = drm->dev->dev;
142 struct page *dpage, *spage;
144 spage = migrate_pfn_to_page(args->src[0]);
145 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
148 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
150 return VM_FAULT_SIGBUS;
153 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
154 if (dma_mapping_error(dev, *dma_addr))
155 goto error_free_page;
157 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
158 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
159 goto error_dma_unmap;
161 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
165 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
168 return VM_FAULT_SIGBUS;
171 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
173 struct nouveau_drm *drm = page_to_drm(vmf->page);
174 struct nouveau_dmem *dmem = drm->dmem;
175 struct nouveau_fence *fence;
176 unsigned long src = 0, dst = 0;
177 dma_addr_t dma_addr = 0;
179 struct migrate_vma args = {
181 .start = vmf->address,
182 .end = vmf->address + PAGE_SIZE,
185 .src_owner = drm->dev,
189 * FIXME what we really want is to find some heuristic to migrate more
190 * than just one page on CPU fault. When such fault happens it is very
191 * likely that more surrounding page will CPU fault too.
193 if (migrate_vma_setup(&args) < 0)
194 return VM_FAULT_SIGBUS;
198 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
202 nouveau_fence_new(dmem->migrate.chan, false, &fence);
203 migrate_vma_pages(&args);
204 nouveau_dmem_fence_done(&fence);
205 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
207 migrate_vma_finalize(&args);
211 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
212 .page_free = nouveau_dmem_page_free,
213 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
217 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
219 struct nouveau_dmem_chunk *chunk;
220 struct resource *res;
223 unsigned long i, pfn_first;
226 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
232 /* Allocate unused physical address space for device private pages. */
233 res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
241 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
242 chunk->pagemap.res = *res;
243 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
244 chunk->pagemap.owner = drm->dev;
246 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
247 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
252 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
256 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
262 mutex_lock(&drm->dmem->mutex);
263 list_add(&chunk->list, &drm->dmem->chunks);
264 mutex_unlock(&drm->dmem->mutex);
266 pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
267 page = pfn_to_page(pfn_first);
268 spin_lock(&drm->dmem->lock);
269 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
270 page->zone_device_data = drm->dmem->free_pages;
271 drm->dmem->free_pages = page;
275 spin_unlock(&drm->dmem->lock);
277 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
278 DMEM_CHUNK_SIZE >> 20);
283 nouveau_bo_unpin(chunk->bo);
285 nouveau_bo_ref(NULL, &chunk->bo);
287 release_mem_region(chunk->pagemap.res.start,
288 resource_size(&chunk->pagemap.res));
296 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
298 struct nouveau_dmem_chunk *chunk;
299 struct page *page = NULL;
302 spin_lock(&drm->dmem->lock);
303 if (drm->dmem->free_pages) {
304 page = drm->dmem->free_pages;
305 drm->dmem->free_pages = page->zone_device_data;
306 chunk = nouveau_page_to_chunk(page);
308 spin_unlock(&drm->dmem->lock);
310 spin_unlock(&drm->dmem->lock);
311 ret = nouveau_dmem_chunk_alloc(drm, &page);
322 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
329 nouveau_dmem_resume(struct nouveau_drm *drm)
331 struct nouveau_dmem_chunk *chunk;
334 if (drm->dmem == NULL)
337 mutex_lock(&drm->dmem->mutex);
338 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
339 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
340 /* FIXME handle pin failure */
343 mutex_unlock(&drm->dmem->mutex);
347 nouveau_dmem_suspend(struct nouveau_drm *drm)
349 struct nouveau_dmem_chunk *chunk;
351 if (drm->dmem == NULL)
354 mutex_lock(&drm->dmem->mutex);
355 list_for_each_entry(chunk, &drm->dmem->chunks, list)
356 nouveau_bo_unpin(chunk->bo);
357 mutex_unlock(&drm->dmem->mutex);
361 nouveau_dmem_fini(struct nouveau_drm *drm)
363 struct nouveau_dmem_chunk *chunk, *tmp;
365 if (drm->dmem == NULL)
368 mutex_lock(&drm->dmem->mutex);
370 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
371 nouveau_bo_unpin(chunk->bo);
372 nouveau_bo_ref(NULL, &chunk->bo);
373 list_del(&chunk->list);
374 memunmap_pages(&chunk->pagemap);
375 release_mem_region(chunk->pagemap.res.start,
376 resource_size(&chunk->pagemap.res));
380 mutex_unlock(&drm->dmem->mutex);
384 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
385 enum nouveau_aper dst_aper, u64 dst_addr,
386 enum nouveau_aper src_aper, u64 src_addr)
388 struct nouveau_channel *chan = drm->dmem->migrate.chan;
389 u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
390 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
391 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
392 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
393 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
396 ret = RING_SPACE(chan, 13);
400 if (src_aper != NOUVEAU_APER_VIRT) {
402 case NOUVEAU_APER_VRAM:
403 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
405 case NOUVEAU_APER_HOST:
406 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
411 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
414 if (dst_aper != NOUVEAU_APER_VIRT) {
416 case NOUVEAU_APER_VRAM:
417 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
419 case NOUVEAU_APER_HOST:
420 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
425 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
428 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
429 OUT_RING (chan, upper_32_bits(src_addr));
430 OUT_RING (chan, lower_32_bits(src_addr));
431 OUT_RING (chan, upper_32_bits(dst_addr));
432 OUT_RING (chan, lower_32_bits(dst_addr));
433 OUT_RING (chan, PAGE_SIZE);
434 OUT_RING (chan, PAGE_SIZE);
435 OUT_RING (chan, PAGE_SIZE);
436 OUT_RING (chan, npages);
437 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
438 OUT_RING (chan, launch_dma);
443 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
444 enum nouveau_aper dst_aper, u64 dst_addr)
446 struct nouveau_channel *chan = drm->dmem->migrate.chan;
447 u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
448 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
449 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
450 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
451 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
452 u32 remap = (4 << 0) /* DST_X_CONST_A */ |
453 (5 << 4) /* DST_Y_CONST_B */ |
454 (3 << 16) /* COMPONENT_SIZE_FOUR */ |
455 (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
458 ret = RING_SPACE(chan, 12);
463 case NOUVEAU_APER_VRAM:
464 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
466 case NOUVEAU_APER_HOST:
467 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
472 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
474 BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
477 OUT_RING(chan, remap);
478 BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
479 OUT_RING(chan, upper_32_bits(dst_addr));
480 OUT_RING(chan, lower_32_bits(dst_addr));
481 BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
482 OUT_RING(chan, length >> 3);
483 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
484 OUT_RING(chan, launch_dma);
489 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
491 switch (drm->ttm.copy.oclass) {
492 case PASCAL_DMA_COPY_A:
493 case PASCAL_DMA_COPY_B:
494 case VOLTA_DMA_COPY_A:
495 case TURING_DMA_COPY_A:
496 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
497 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
498 drm->dmem->migrate.chan = drm->ttm.chan;
507 nouveau_dmem_init(struct nouveau_drm *drm)
511 /* This only make sense on PASCAL or newer */
512 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
515 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
518 drm->dmem->drm = drm;
519 mutex_init(&drm->dmem->mutex);
520 INIT_LIST_HEAD(&drm->dmem->chunks);
521 mutex_init(&drm->dmem->mutex);
522 spin_lock_init(&drm->dmem->lock);
524 /* Initialize migration dma helpers before registering memory */
525 ret = nouveau_dmem_migrate_init(drm);
532 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
533 unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
535 struct device *dev = drm->dev->dev;
536 struct page *dpage, *spage;
539 spage = migrate_pfn_to_page(src);
540 if (!(src & MIGRATE_PFN_MIGRATE))
543 dpage = nouveau_dmem_page_alloc_locked(drm);
547 paddr = nouveau_dmem_page_addr(dpage);
549 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
551 if (dma_mapping_error(dev, *dma_addr))
553 if (drm->dmem->migrate.copy_func(drm, page_size(spage),
554 NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
557 *dma_addr = DMA_MAPPING_ERROR;
558 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
559 NOUVEAU_APER_VRAM, paddr))
563 *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
564 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
565 if (src & MIGRATE_PFN_WRITE)
566 *pfn |= NVIF_VMM_PFNMAP_V0_W;
567 return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
570 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
572 nouveau_dmem_page_free_locked(drm, dpage);
574 *pfn = NVIF_VMM_PFNMAP_V0_NONE;
578 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
579 struct nouveau_svmm *svmm, struct migrate_vma *args,
580 dma_addr_t *dma_addrs, u64 *pfns)
582 struct nouveau_fence *fence;
583 unsigned long addr = args->start, nr_dma = 0, i;
585 for (i = 0; addr < args->end; i++) {
586 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
587 dma_addrs + nr_dma, pfns + i);
588 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
593 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
594 migrate_vma_pages(args);
595 nouveau_dmem_fence_done(&fence);
596 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
599 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
602 migrate_vma_finalize(args);
606 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
607 struct nouveau_svmm *svmm,
608 struct vm_area_struct *vma,
612 unsigned long npages = (end - start) >> PAGE_SHIFT;
613 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
614 dma_addr_t *dma_addrs;
615 struct migrate_vma args = {
623 if (drm->dmem == NULL)
626 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
629 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
633 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
637 pfns = nouveau_pfns_alloc(max);
641 for (i = 0; i < npages; i += max) {
642 args.end = start + (max << PAGE_SHIFT);
643 ret = migrate_vma_setup(&args);
648 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
650 args.start = args.end;
655 nouveau_pfns_free(pfns);