2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * GK20A does not have dedicated video memory, and to accurately represent this
25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
26 * implementation must be done directly on top of system memory, while
27 * preserving coherency for read and write operations.
29 * Instmem can be allocated through two means:
30 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
31 * pages contiguous to the GPU. This is the preferred way.
32 * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
35 * In both cases CPU read and writes are performed by creating a write-combined
36 * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
37 * be conservative we do this every time we acquire or release an instobj, but
38 * ideally L2 management should be handled at a higher level.
40 * To improve performance, CPU mappings are not removed upon instobj release.
41 * Instead they are placed into a LRU list to be recycled when the mapped space
42 * goes beyond a certain threshold. At the moment this limit is 1MB.
46 #include <core/memory.h>
48 #include <core/tegra.h>
49 #include <subdev/fb.h>
50 #include <subdev/ltc.h>
52 struct gk20a_instobj {
53 struct nvkm_memory memory;
55 struct gk20a_instmem *imem;
59 struct list_head vaddr_node;
61 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
64 * Used for objects allocated using the DMA API
66 struct gk20a_instobj_dma {
67 struct gk20a_instobj base;
71 struct nvkm_mm_node r;
73 #define gk20a_instobj_dma(p) \
74 container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
77 * Used for objects flattened using the IOMMU API
79 struct gk20a_instobj_iommu {
80 struct gk20a_instobj base;
82 /* will point to the higher half of pages */
83 dma_addr_t *dma_addrs;
84 /* array of base.mem->size pages (+ dma_addr_ts) */
87 #define gk20a_instobj_iommu(p) \
88 container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
90 struct gk20a_instmem {
91 struct nvkm_instmem base;
93 /* protects vaddr_* and gk20a_instobj::vaddr* */
96 /* CPU mappings LRU */
97 unsigned int vaddr_use;
98 unsigned int vaddr_max;
99 struct list_head vaddr_lru;
101 /* Only used if IOMMU if present */
102 struct mutex *mm_mutex;
104 struct iommu_domain *domain;
105 unsigned long iommu_pgshift;
108 /* Only used by DMA API */
109 struct dma_attrs attrs;
111 void __iomem * (*cpu_map)(struct nvkm_memory *);
113 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
115 static enum nvkm_memory_target
116 gk20a_instobj_target(struct nvkm_memory *memory)
118 return NVKM_MEM_TARGET_HOST;
122 gk20a_instobj_addr(struct nvkm_memory *memory)
124 return gk20a_instobj(memory)->mem.offset;
128 gk20a_instobj_size(struct nvkm_memory *memory)
130 return (u64)gk20a_instobj(memory)->mem.size << 12;
133 static void __iomem *
134 gk20a_instobj_cpu_map_dma(struct nvkm_memory *memory)
136 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
137 struct device *dev = node->base.imem->base.subdev.device->dev;
138 int npages = nvkm_memory_size(memory) >> 12;
139 struct page *pages[npages];
142 /* phys_to_page does not exist on all platforms... */
143 pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT);
144 for (i = 1; i < npages; i++)
145 pages[i] = pages[0] + i;
147 return vmap(pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL));
150 static void __iomem *
151 gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory)
153 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
154 int npages = nvkm_memory_size(memory) >> 12;
156 return vmap(node->pages, npages, VM_MAP,
157 pgprot_writecombine(PAGE_KERNEL));
161 * Must be called while holding gk20a_instmem_lock
164 gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
166 while (imem->vaddr_use + size > imem->vaddr_max) {
167 struct gk20a_instobj *obj;
169 /* no candidate that can be unmapped, abort... */
170 if (list_empty(&imem->vaddr_lru))
173 obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj,
175 list_del(&obj->vaddr_node);
178 imem->vaddr_use -= nvkm_memory_size(&obj->memory);
179 nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n",
180 imem->vaddr_use, imem->vaddr_max);
185 static void __iomem *
186 gk20a_instobj_acquire(struct nvkm_memory *memory)
188 struct gk20a_instobj *node = gk20a_instobj(memory);
189 struct gk20a_instmem *imem = node->imem;
190 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
191 const u64 size = nvkm_memory_size(memory);
196 spin_lock_irqsave(&imem->lock, flags);
199 /* remove us from the LRU list since we cannot be unmapped */
200 list_del(&node->vaddr_node);
205 /* try to free some address space if we reached the limit */
206 gk20a_instmem_vaddr_gc(imem, size);
208 node->vaddr = imem->cpu_map(memory);
211 nvkm_error(&imem->base.subdev, "cannot map instobj - "
212 "this is not going to end well...\n");
216 imem->vaddr_use += size;
217 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
218 imem->vaddr_use, imem->vaddr_max);
221 spin_unlock_irqrestore(&imem->lock, flags);
227 gk20a_instobj_release(struct nvkm_memory *memory)
229 struct gk20a_instobj *node = gk20a_instobj(memory);
230 struct gk20a_instmem *imem = node->imem;
231 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
234 spin_lock_irqsave(&imem->lock, flags);
236 /* add ourselves to the LRU list so our CPU mapping can be freed */
237 list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
239 spin_unlock_irqrestore(&imem->lock, flags);
242 nvkm_ltc_invalidate(ltc);
246 gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
248 struct gk20a_instobj *node = gk20a_instobj(memory);
250 return node->vaddr[offset / 4];
254 gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
256 struct gk20a_instobj *node = gk20a_instobj(memory);
258 node->vaddr[offset / 4] = data;
262 gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
264 struct gk20a_instobj *node = gk20a_instobj(memory);
266 nvkm_vm_map_at(vma, offset, &node->mem);
270 * Clear the CPU mapping of an instobj if it exists
273 gk20a_instobj_dtor(struct gk20a_instobj *node)
275 struct gk20a_instmem *imem = node->imem;
276 struct gk20a_instobj *obj;
279 spin_lock_irqsave(&imem->lock, flags);
284 list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) {
286 list_del(&obj->vaddr_node);
292 imem->vaddr_use -= nvkm_memory_size(&node->memory);
293 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
294 imem->vaddr_use, imem->vaddr_max);
297 spin_unlock_irqrestore(&imem->lock, flags);
301 gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
303 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
304 struct gk20a_instmem *imem = node->base.imem;
305 struct device *dev = imem->base.subdev.device->dev;
307 gk20a_instobj_dtor(&node->base);
309 if (unlikely(!node->cpuaddr))
312 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr,
313 node->handle, &imem->attrs);
320 gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
322 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
323 struct gk20a_instmem *imem = node->base.imem;
324 struct device *dev = imem->base.subdev.device->dev;
325 struct nvkm_mm_node *r;
328 gk20a_instobj_dtor(&node->base);
330 if (unlikely(list_empty(&node->base.mem.regions)))
333 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
336 /* clear IOMMU bit to unmap pages */
337 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
339 /* Unmap pages from GPU address space and free them */
340 for (i = 0; i < node->base.mem.size; i++) {
341 iommu_unmap(imem->domain,
342 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
343 dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
345 __free_page(node->pages[i]);
348 /* Release area from GPU address space */
349 mutex_lock(imem->mm_mutex);
350 nvkm_mm_free(imem->mm, &r);
351 mutex_unlock(imem->mm_mutex);
357 static const struct nvkm_memory_func
358 gk20a_instobj_func_dma = {
359 .dtor = gk20a_instobj_dtor_dma,
360 .target = gk20a_instobj_target,
361 .addr = gk20a_instobj_addr,
362 .size = gk20a_instobj_size,
363 .acquire = gk20a_instobj_acquire,
364 .release = gk20a_instobj_release,
365 .rd32 = gk20a_instobj_rd32,
366 .wr32 = gk20a_instobj_wr32,
367 .map = gk20a_instobj_map,
370 static const struct nvkm_memory_func
371 gk20a_instobj_func_iommu = {
372 .dtor = gk20a_instobj_dtor_iommu,
373 .target = gk20a_instobj_target,
374 .addr = gk20a_instobj_addr,
375 .size = gk20a_instobj_size,
376 .acquire = gk20a_instobj_acquire,
377 .release = gk20a_instobj_release,
378 .rd32 = gk20a_instobj_rd32,
379 .wr32 = gk20a_instobj_wr32,
380 .map = gk20a_instobj_map,
384 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
385 struct gk20a_instobj **_node)
387 struct gk20a_instobj_dma *node;
388 struct nvkm_subdev *subdev = &imem->base.subdev;
389 struct device *dev = subdev->device->dev;
391 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
393 *_node = &node->base;
395 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
397 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
398 &node->handle, GFP_KERNEL,
400 if (!node->cpuaddr) {
401 nvkm_error(subdev, "cannot allocate DMA memory\n");
405 /* alignment check */
406 if (unlikely(node->handle & (align - 1)))
408 "memory not aligned as requested: %pad (0x%x)\n",
409 &node->handle, align);
411 /* present memory for being mapped using small pages */
413 node->r.offset = node->handle >> 12;
414 node->r.length = (npages << PAGE_SHIFT) >> 12;
416 node->base.mem.offset = node->handle;
418 INIT_LIST_HEAD(&node->base.mem.regions);
419 list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
425 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
426 struct gk20a_instobj **_node)
428 struct gk20a_instobj_iommu *node;
429 struct nvkm_subdev *subdev = &imem->base.subdev;
430 struct device *dev = subdev->device->dev;
431 struct nvkm_mm_node *r;
436 * despite their variable size, instmem allocations are small enough
437 * (< 1 page) to be handled by kzalloc
439 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
440 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
442 *_node = &node->base;
443 node->dma_addrs = (void *)(node->pages + npages);
445 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
447 /* Allocate backing memory */
448 for (i = 0; i < npages; i++) {
449 struct page *p = alloc_page(GFP_KERNEL);
457 dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
458 if (dma_mapping_error(dev, dma_adr)) {
459 nvkm_error(subdev, "DMA mapping error!\n");
463 node->dma_addrs[i] = dma_adr;
466 mutex_lock(imem->mm_mutex);
467 /* Reserve area from GPU address space */
468 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
469 align >> imem->iommu_pgshift, &r);
470 mutex_unlock(imem->mm_mutex);
472 nvkm_error(subdev, "IOMMU space is full!\n");
476 /* Map into GPU address space */
477 for (i = 0; i < npages; i++) {
478 u32 offset = (r->offset + i) << imem->iommu_pgshift;
480 ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
481 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
483 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
487 iommu_unmap(imem->domain, offset, PAGE_SIZE);
493 /* IOMMU bit tells that an address is to be resolved through the IOMMU */
494 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
496 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
498 INIT_LIST_HEAD(&node->base.mem.regions);
499 list_add_tail(&r->rl_entry, &node->base.mem.regions);
504 mutex_lock(imem->mm_mutex);
505 nvkm_mm_free(imem->mm, &r);
506 mutex_unlock(imem->mm_mutex);
509 for (i = 0; i < npages && node->pages[i] != NULL; i++) {
510 dma_addr_t dma_addr = node->dma_addrs[i];
512 dma_unmap_page(dev, dma_addr, PAGE_SIZE,
514 __free_page(node->pages[i]);
521 gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
522 struct nvkm_memory **pmemory)
524 struct gk20a_instmem *imem = gk20a_instmem(base);
525 struct nvkm_subdev *subdev = &imem->base.subdev;
526 struct gk20a_instobj *node = NULL;
529 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
530 imem->domain ? "IOMMU" : "DMA", size, align);
532 /* Round size and align to page bounds */
533 size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
534 align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
537 ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
540 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
542 *pmemory = node ? &node->memory : NULL;
548 /* present memory for being mapped using small pages */
549 node->mem.size = size >> 12;
550 node->mem.memtype = 0;
551 node->mem.page_shift = 12;
553 nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
554 size, align, node->mem.offset);
560 gk20a_instmem_dtor(struct nvkm_instmem *base)
562 struct gk20a_instmem *imem = gk20a_instmem(base);
564 /* perform some sanity checks... */
565 if (!list_empty(&imem->vaddr_lru))
566 nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
568 if (imem->vaddr_use != 0)
569 nvkm_warn(&base->subdev, "instobj vmap area not empty! "
570 "0x%x bytes still mapped\n", imem->vaddr_use);
575 static const struct nvkm_instmem_func
577 .dtor = gk20a_instmem_dtor,
578 .memory_new = gk20a_instobj_new,
584 gk20a_instmem_new(struct nvkm_device *device, int index,
585 struct nvkm_instmem **pimem)
587 struct nvkm_device_tegra *tdev = device->func->tegra(device);
588 struct gk20a_instmem *imem;
590 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
592 nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
593 spin_lock_init(&imem->lock);
594 *pimem = &imem->base;
596 /* do not allow more than 1MB of CPU-mapped instmem */
598 imem->vaddr_max = 0x100000;
599 INIT_LIST_HEAD(&imem->vaddr_lru);
601 if (tdev->iommu.domain) {
602 imem->mm_mutex = &tdev->iommu.mutex;
603 imem->mm = &tdev->iommu.mm;
604 imem->domain = tdev->iommu.domain;
605 imem->iommu_pgshift = tdev->iommu.pgshift;
606 imem->cpu_map = gk20a_instobj_cpu_map_iommu;
607 imem->iommu_bit = tdev->func->iommu_bit;
609 nvkm_info(&imem->base.subdev, "using IOMMU\n");
611 init_dma_attrs(&imem->attrs);
612 /* We will access the memory through our own mapping */
613 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
614 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
615 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
616 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
617 imem->cpu_map = gk20a_instobj_cpu_map_dma;
619 nvkm_info(&imem->base.subdev, "using DMA API\n");