2 * Copyright 2018 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
29 #include <nvif/class.h>
30 #include <nvif/object.h>
31 #include <nvif/if000c.h>
32 #include <nvif/if500b.h>
33 #include <nvif/if900b.h>
35 #include <linux/sched/mm.h>
36 #include <linux/hmm.h>
39 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
40 * it in vram while in use. We likely want to overhaul memory management for
41 * nouveau to be more page like (not necessarily with system page size but a
42 * bigger page size) at lowest level and have some shim layer on top that would
43 * provide the same functionality as TTM.
45 #define DMEM_CHUNK_SIZE (2UL << 20)
46 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
54 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
55 enum nouveau_aper, u64 dst_addr,
56 enum nouveau_aper, u64 src_addr);
58 struct nouveau_dmem_chunk {
59 struct list_head list;
60 struct nouveau_bo *bo;
61 struct nouveau_drm *drm;
62 unsigned long pfn_first;
63 unsigned long callocated;
64 unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
68 struct nouveau_dmem_migrate {
69 nouveau_migrate_copy_t copy_func;
70 struct nouveau_channel *chan;
74 struct nouveau_drm *drm;
75 struct dev_pagemap pagemap;
76 struct nouveau_dmem_migrate migrate;
77 struct list_head chunk_free;
78 struct list_head chunk_full;
79 struct list_head chunk_empty;
83 static inline struct nouveau_dmem *page_to_dmem(struct page *page)
85 return container_of(page->pgmap, struct nouveau_dmem, pagemap);
88 static unsigned long nouveau_dmem_page_addr(struct page *page)
90 struct nouveau_dmem_chunk *chunk = page->zone_device_data;
91 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
93 return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
96 static void nouveau_dmem_page_free(struct page *page)
98 struct nouveau_dmem_chunk *chunk = page->zone_device_data;
99 unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
104 * This is really a bad example, we need to overhaul nouveau memory
105 * management to be more page focus and allow lighter locking scheme
106 * to be use in the process.
108 spin_lock(&chunk->lock);
109 clear_bit(idx, chunk->bitmap);
110 WARN_ON(!chunk->callocated);
113 * FIXME when chunk->callocated reach 0 we should add the chunk to
114 * a reclaim list so that it can be freed in case of memory pressure.
116 spin_unlock(&chunk->lock);
119 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
122 nouveau_fence_wait(*fence, true, false);
123 nouveau_fence_unref(fence);
126 * FIXME wait for channel to be IDLE before calling finalizing
132 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
133 struct vm_fault *vmf, struct migrate_vma *args,
134 dma_addr_t *dma_addr)
136 struct device *dev = drm->dev->dev;
137 struct page *dpage, *spage;
139 spage = migrate_pfn_to_page(args->src[0]);
140 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
143 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
145 return VM_FAULT_SIGBUS;
148 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
149 if (dma_mapping_error(dev, *dma_addr))
150 goto error_free_page;
152 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
153 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
154 goto error_dma_unmap;
156 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
160 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
163 return VM_FAULT_SIGBUS;
166 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
168 struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
169 struct nouveau_drm *drm = dmem->drm;
170 struct nouveau_fence *fence;
171 unsigned long src = 0, dst = 0;
172 dma_addr_t dma_addr = 0;
174 struct migrate_vma args = {
176 .start = vmf->address,
177 .end = vmf->address + PAGE_SIZE,
180 .src_owner = drm->dev,
184 * FIXME what we really want is to find some heuristic to migrate more
185 * than just one page on CPU fault. When such fault happens it is very
186 * likely that more surrounding page will CPU fault too.
188 if (migrate_vma_setup(&args) < 0)
189 return VM_FAULT_SIGBUS;
193 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
197 nouveau_fence_new(dmem->migrate.chan, false, &fence);
198 migrate_vma_pages(&args);
199 nouveau_dmem_fence_done(&fence);
200 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
202 migrate_vma_finalize(&args);
206 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
207 .page_free = nouveau_dmem_page_free,
208 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
212 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
214 struct nouveau_dmem_chunk *chunk;
217 if (drm->dmem == NULL)
220 mutex_lock(&drm->dmem->mutex);
221 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
222 struct nouveau_dmem_chunk,
225 mutex_unlock(&drm->dmem->mutex);
229 list_del(&chunk->list);
230 mutex_unlock(&drm->dmem->mutex);
232 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
233 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
238 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
240 nouveau_bo_ref(NULL, &chunk->bo);
244 bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
245 spin_lock_init(&chunk->lock);
248 mutex_lock(&drm->dmem->mutex);
250 list_add(&chunk->list, &drm->dmem->chunk_empty);
252 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
253 mutex_unlock(&drm->dmem->mutex);
258 static struct nouveau_dmem_chunk *
259 nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
261 struct nouveau_dmem_chunk *chunk;
263 chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
264 struct nouveau_dmem_chunk,
269 chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
270 struct nouveau_dmem_chunk,
279 nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
280 unsigned long npages,
281 unsigned long *pages)
283 struct nouveau_dmem_chunk *chunk;
287 memset(pages, 0xff, npages * sizeof(*pages));
289 mutex_lock(&drm->dmem->mutex);
290 for (c = 0; c < npages;) {
293 chunk = nouveau_dmem_chunk_first_free_locked(drm);
295 mutex_unlock(&drm->dmem->mutex);
296 ret = nouveau_dmem_chunk_alloc(drm);
302 mutex_lock(&drm->dmem->mutex);
306 spin_lock(&chunk->lock);
307 i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
308 while (i < DMEM_CHUNK_NPAGES && c < npages) {
309 pages[c] = chunk->pfn_first + i;
310 set_bit(i, chunk->bitmap);
314 i = find_next_zero_bit(chunk->bitmap,
315 DMEM_CHUNK_NPAGES, i);
317 spin_unlock(&chunk->lock);
319 mutex_unlock(&drm->dmem->mutex);
325 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
327 unsigned long pfns[1];
331 /* FIXME stop all the miss-match API ... */
332 ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
336 page = pfn_to_page(pfns[0]);
343 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
350 nouveau_dmem_resume(struct nouveau_drm *drm)
352 struct nouveau_dmem_chunk *chunk;
355 if (drm->dmem == NULL)
358 mutex_lock(&drm->dmem->mutex);
359 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
360 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
361 /* FIXME handle pin failure */
364 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
365 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
366 /* FIXME handle pin failure */
369 mutex_unlock(&drm->dmem->mutex);
373 nouveau_dmem_suspend(struct nouveau_drm *drm)
375 struct nouveau_dmem_chunk *chunk;
377 if (drm->dmem == NULL)
380 mutex_lock(&drm->dmem->mutex);
381 list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
382 nouveau_bo_unpin(chunk->bo);
384 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
385 nouveau_bo_unpin(chunk->bo);
387 mutex_unlock(&drm->dmem->mutex);
391 nouveau_dmem_fini(struct nouveau_drm *drm)
393 struct nouveau_dmem_chunk *chunk, *tmp;
395 if (drm->dmem == NULL)
398 mutex_lock(&drm->dmem->mutex);
400 WARN_ON(!list_empty(&drm->dmem->chunk_free));
401 WARN_ON(!list_empty(&drm->dmem->chunk_full));
403 list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
405 nouveau_bo_unpin(chunk->bo);
406 nouveau_bo_ref(NULL, &chunk->bo);
408 list_del(&chunk->list);
412 mutex_unlock(&drm->dmem->mutex);
416 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
417 enum nouveau_aper dst_aper, u64 dst_addr,
418 enum nouveau_aper src_aper, u64 src_addr)
420 struct nouveau_channel *chan = drm->dmem->migrate.chan;
421 u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
422 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
423 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
424 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
425 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
428 ret = RING_SPACE(chan, 13);
432 if (src_aper != NOUVEAU_APER_VIRT) {
434 case NOUVEAU_APER_VRAM:
435 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
437 case NOUVEAU_APER_HOST:
438 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
443 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
446 if (dst_aper != NOUVEAU_APER_VIRT) {
448 case NOUVEAU_APER_VRAM:
449 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
451 case NOUVEAU_APER_HOST:
452 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
457 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
460 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
461 OUT_RING (chan, upper_32_bits(src_addr));
462 OUT_RING (chan, lower_32_bits(src_addr));
463 OUT_RING (chan, upper_32_bits(dst_addr));
464 OUT_RING (chan, lower_32_bits(dst_addr));
465 OUT_RING (chan, PAGE_SIZE);
466 OUT_RING (chan, PAGE_SIZE);
467 OUT_RING (chan, PAGE_SIZE);
468 OUT_RING (chan, npages);
469 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
470 OUT_RING (chan, launch_dma);
475 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
477 switch (drm->ttm.copy.oclass) {
478 case PASCAL_DMA_COPY_A:
479 case PASCAL_DMA_COPY_B:
480 case VOLTA_DMA_COPY_A:
481 case TURING_DMA_COPY_A:
482 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
483 drm->dmem->migrate.chan = drm->ttm.chan;
492 nouveau_dmem_init(struct nouveau_drm *drm)
494 struct device *device = drm->dev->dev;
495 struct resource *res;
496 unsigned long i, size, pfn_first;
499 /* This only make sense on PASCAL or newer */
500 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
503 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
506 drm->dmem->drm = drm;
507 mutex_init(&drm->dmem->mutex);
508 INIT_LIST_HEAD(&drm->dmem->chunk_free);
509 INIT_LIST_HEAD(&drm->dmem->chunk_full);
510 INIT_LIST_HEAD(&drm->dmem->chunk_empty);
512 size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
514 /* Initialize migration dma helpers before registering memory */
515 ret = nouveau_dmem_migrate_init(drm);
520 * FIXME we need some kind of policy to decide how much VRAM we
521 * want to register with HMM. For now just register everything
522 * and latter if we want to do thing like over commit then we
523 * could revisit this.
525 res = devm_request_free_mem_region(device, &iomem_resource, size);
528 drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
529 drm->dmem->pagemap.res = *res;
530 drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
531 drm->dmem->pagemap.owner = drm->dev;
532 if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
535 pfn_first = res->start >> PAGE_SHIFT;
536 for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
537 struct nouveau_dmem_chunk *chunk;
541 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
543 nouveau_dmem_fini(drm);
548 chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
549 list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
551 page = pfn_to_page(chunk->pfn_first);
552 for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
553 page->zone_device_data = chunk;
556 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
563 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
564 unsigned long src, dma_addr_t *dma_addr)
566 struct device *dev = drm->dev->dev;
567 struct page *dpage, *spage;
569 spage = migrate_pfn_to_page(src);
570 if (!spage || !(src & MIGRATE_PFN_MIGRATE))
573 dpage = nouveau_dmem_page_alloc_locked(drm);
577 *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
578 if (dma_mapping_error(dev, *dma_addr))
581 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
582 nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
586 return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
589 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
591 nouveau_dmem_page_free_locked(drm, dpage);
596 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
597 struct migrate_vma *args, dma_addr_t *dma_addrs)
599 struct nouveau_fence *fence;
600 unsigned long addr = args->start, nr_dma = 0, i;
602 for (i = 0; addr < args->end; i++) {
603 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
610 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
611 migrate_vma_pages(args);
612 nouveau_dmem_fence_done(&fence);
615 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
619 * FIXME optimization: update GPU page table to point to newly migrated
622 migrate_vma_finalize(args);
626 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
627 struct vm_area_struct *vma,
631 unsigned long npages = (end - start) >> PAGE_SHIFT;
632 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
633 dma_addr_t *dma_addrs;
634 struct migrate_vma args = {
641 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
644 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
648 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
652 for (i = 0; i < npages; i += c) {
653 c = min(SG_MAX_SINGLE_ALLOC, npages);
654 args.end = start + (c << PAGE_SHIFT);
655 ret = migrate_vma_setup(&args);
660 nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
661 args.start = args.end;
676 nouveau_dmem_page(struct nouveau_drm *drm, struct page *page)
678 return is_device_private_page(page) && drm->dmem == page_to_dmem(page);
682 nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
683 struct hmm_range *range)
685 unsigned long i, npages;
687 npages = (range->end - range->start) >> PAGE_SHIFT;
688 for (i = 0; i < npages; ++i) {
692 page = hmm_device_entry_to_page(range, range->pfns[i]);
696 if (!is_device_private_page(page))
699 if (!nouveau_dmem_page(drm, page)) {
700 WARN(1, "Some unknown device memory !\n");
705 addr = nouveau_dmem_page_addr(page);
706 range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
707 range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
708 range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;