2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * GK20A does not have dedicated video memory, and to accurately represent this
25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
26 * implementation must be done directly on top of system memory, while providing
27 * coherent read and write operations.
29 * Instmem can be allocated through two means:
30 * 1) If an IOMMU mapping has been probed, the IOMMU API is used to make memory
31 * pages contiguous to the GPU. This is the preferred way.
32 * 2) If no IOMMU mapping is probed, the DMA API is used to allocate physically
35 * In both cases CPU read and writes are performed using PRAMIN (i.e. using the
36 * GPU path) to ensure these operations are coherent for the GPU. This allows us
37 * to use more "relaxed" allocation parameters when using the DMA API, since we
38 * never need a kernel mapping.
40 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
43 #include <core/memory.h>
45 #include <core/tegra.h>
46 #include <subdev/fb.h>
48 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
50 struct gk20a_instobj {
51 struct nvkm_memory memory;
52 struct gk20a_instmem *imem;
57 * Used for objects allocated using the DMA API
59 struct gk20a_instobj_dma {
60 struct gk20a_instobj base;
64 struct nvkm_mm_node r;
68 * Used for objects flattened using the IOMMU API
70 struct gk20a_instobj_iommu {
71 struct gk20a_instobj base;
73 /* array of base.mem->size pages */
77 struct gk20a_instmem {
78 struct nvkm_instmem base;
79 unsigned long lock_flags;
83 /* Only used if IOMMU if present */
84 struct mutex *mm_mutex;
86 struct iommu_domain *domain;
87 unsigned long iommu_pgshift;
89 /* Only used by DMA API */
90 struct dma_attrs attrs;
93 static enum nvkm_memory_target
94 gk20a_instobj_target(struct nvkm_memory *memory)
96 return NVKM_MEM_TARGET_HOST;
100 gk20a_instobj_addr(struct nvkm_memory *memory)
102 return gk20a_instobj(memory)->mem.offset;
107 gk20a_instobj_size(struct nvkm_memory *memory)
109 return (u64)gk20a_instobj(memory)->mem.size << 12;
112 static void __iomem *
113 gk20a_instobj_acquire(struct nvkm_memory *memory)
115 struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
117 spin_lock_irqsave(&imem->lock, flags);
118 imem->lock_flags = flags;
123 gk20a_instobj_release(struct nvkm_memory *memory)
125 struct gk20a_instmem *imem = gk20a_instobj(memory)->imem;
126 spin_unlock_irqrestore(&imem->lock, imem->lock_flags);
130 * Use PRAMIN to read/write data and avoid coherency issues.
131 * PRAMIN uses the GPU path and ensures data will always be coherent.
133 * A dynamic mapping based solution would be desirable in the future, but
134 * the issue remains of how to maintain coherency efficiently. On ARM it is
135 * not easy (if possible at all?) to create uncached temporary mappings.
139 gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
141 struct gk20a_instobj *node = gk20a_instobj(memory);
142 struct gk20a_instmem *imem = node->imem;
143 struct nvkm_device *device = imem->base.subdev.device;
144 u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
145 u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
148 if (unlikely(imem->addr != base)) {
149 nvkm_wr32(device, 0x001700, base >> 16);
152 data = nvkm_rd32(device, 0x700000 + addr);
157 gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
159 struct gk20a_instobj *node = gk20a_instobj(memory);
160 struct gk20a_instmem *imem = node->imem;
161 struct nvkm_device *device = imem->base.subdev.device;
162 u64 base = (node->mem.offset + offset) & 0xffffff00000ULL;
163 u64 addr = (node->mem.offset + offset) & 0x000000fffffULL;
165 if (unlikely(imem->addr != base)) {
166 nvkm_wr32(device, 0x001700, base >> 16);
169 nvkm_wr32(device, 0x700000 + addr, data);
173 gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
175 struct gk20a_instobj *node = gk20a_instobj(memory);
176 nvkm_vm_map_at(vma, offset, &node->mem);
180 gk20a_instobj_dtor_dma(struct gk20a_instobj *_node)
182 struct gk20a_instobj_dma *node = (void *)_node;
183 struct gk20a_instmem *imem = _node->imem;
184 struct device *dev = imem->base.subdev.device->dev;
186 if (unlikely(!node->cpuaddr))
189 dma_free_attrs(dev, _node->mem.size << PAGE_SHIFT, node->cpuaddr,
190 node->handle, &imem->attrs);
194 gk20a_instobj_dtor_iommu(struct gk20a_instobj *_node)
196 struct gk20a_instobj_iommu *node = (void *)_node;
197 struct gk20a_instmem *imem = _node->imem;
198 struct nvkm_mm_node *r;
201 if (unlikely(list_empty(&_node->mem.regions)))
204 r = list_first_entry(&_node->mem.regions, struct nvkm_mm_node,
207 /* clear bit 34 to unmap pages */
208 r->offset &= ~BIT(34 - imem->iommu_pgshift);
210 /* Unmap pages from GPU address space and free them */
211 for (i = 0; i < _node->mem.size; i++) {
212 iommu_unmap(imem->domain,
213 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
214 __free_page(node->pages[i]);
217 /* Release area from GPU address space */
218 mutex_lock(imem->mm_mutex);
219 nvkm_mm_free(imem->mm, &r);
220 mutex_unlock(imem->mm_mutex);
224 gk20a_instobj_dtor(struct nvkm_memory *memory)
226 struct gk20a_instobj *node = gk20a_instobj(memory);
227 struct gk20a_instmem *imem = node->imem;
230 gk20a_instobj_dtor_iommu(node);
232 gk20a_instobj_dtor_dma(node);
237 static const struct nvkm_memory_func
238 gk20a_instobj_func = {
239 .dtor = gk20a_instobj_dtor,
240 .target = gk20a_instobj_target,
241 .addr = gk20a_instobj_addr,
242 .size = gk20a_instobj_size,
243 .acquire = gk20a_instobj_acquire,
244 .release = gk20a_instobj_release,
245 .rd32 = gk20a_instobj_rd32,
246 .wr32 = gk20a_instobj_wr32,
247 .map = gk20a_instobj_map,
251 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
252 struct gk20a_instobj **_node)
254 struct gk20a_instobj_dma *node;
255 struct nvkm_subdev *subdev = &imem->base.subdev;
256 struct device *dev = subdev->device->dev;
258 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
260 *_node = &node->base;
262 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
263 &node->handle, GFP_KERNEL,
265 if (!node->cpuaddr) {
266 nvkm_error(subdev, "cannot allocate DMA memory\n");
270 /* alignment check */
271 if (unlikely(node->handle & (align - 1)))
273 "memory not aligned as requested: %pad (0x%x)\n",
274 &node->handle, align);
276 /* present memory for being mapped using small pages */
278 node->r.offset = node->handle >> 12;
279 node->r.length = (npages << PAGE_SHIFT) >> 12;
281 node->base.mem.offset = node->handle;
283 INIT_LIST_HEAD(&node->base.mem.regions);
284 list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
290 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
291 struct gk20a_instobj **_node)
293 struct gk20a_instobj_iommu *node;
294 struct nvkm_subdev *subdev = &imem->base.subdev;
295 struct nvkm_mm_node *r;
299 if (!(node = kzalloc(sizeof(*node) +
300 sizeof( node->pages[0]) * npages, GFP_KERNEL)))
302 *_node = &node->base;
304 /* Allocate backing memory */
305 for (i = 0; i < npages; i++) {
306 struct page *p = alloc_page(GFP_KERNEL);
315 mutex_lock(imem->mm_mutex);
316 /* Reserve area from GPU address space */
317 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
318 align >> imem->iommu_pgshift, &r);
319 mutex_unlock(imem->mm_mutex);
321 nvkm_error(subdev, "virtual space is full!\n");
325 /* Map into GPU address space */
326 for (i = 0; i < npages; i++) {
327 struct page *p = node->pages[i];
328 u32 offset = (r->offset + i) << imem->iommu_pgshift;
330 ret = iommu_map(imem->domain, offset, page_to_phys(p),
331 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
333 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
337 iommu_unmap(imem->domain, offset, PAGE_SIZE);
343 /* Bit 34 tells that an address is to be resolved through the IOMMU */
344 r->offset |= BIT(34 - imem->iommu_pgshift);
346 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
348 INIT_LIST_HEAD(&node->base.mem.regions);
349 list_add_tail(&r->rl_entry, &node->base.mem.regions);
354 mutex_lock(imem->mm_mutex);
355 nvkm_mm_free(imem->mm, &r);
356 mutex_unlock(imem->mm_mutex);
359 for (i = 0; i < npages && node->pages[i] != NULL; i++)
360 __free_page(node->pages[i]);
366 gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
367 struct nvkm_memory **pmemory)
369 struct gk20a_instmem *imem = gk20a_instmem(base);
370 struct gk20a_instobj *node = NULL;
371 struct nvkm_subdev *subdev = &imem->base.subdev;
374 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
375 imem->domain ? "IOMMU" : "DMA", size, align);
377 /* Round size and align to page bounds */
378 size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
379 align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
382 ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
385 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
387 *pmemory = node ? &node->memory : NULL;
391 nvkm_memory_ctor(&gk20a_instobj_func, &node->memory);
394 /* present memory for being mapped using small pages */
395 node->mem.size = size >> 12;
396 node->mem.memtype = 0;
397 node->mem.page_shift = 12;
399 nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
400 size, align, node->mem.offset);
406 gk20a_instmem_fini(struct nvkm_instmem *base)
408 gk20a_instmem(base)->addr = ~0ULL;
411 static const struct nvkm_instmem_func
413 .fini = gk20a_instmem_fini,
414 .memory_new = gk20a_instobj_new,
420 gk20a_instmem_new(struct nvkm_device *device, int index,
421 struct nvkm_instmem **pimem)
423 struct nvkm_device_tegra *tdev = device->func->tegra(device);
424 struct gk20a_instmem *imem;
426 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
428 nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
429 spin_lock_init(&imem->lock);
430 *pimem = &imem->base;
432 if (tdev->iommu.domain) {
433 imem->domain = tdev->iommu.domain;
434 imem->mm = &tdev->iommu.mm;
435 imem->iommu_pgshift = tdev->iommu.pgshift;
436 imem->mm_mutex = &tdev->iommu.mutex;
438 nvkm_info(&imem->base.subdev, "using IOMMU\n");
440 init_dma_attrs(&imem->attrs);
442 * We will access instmem through PRAMIN and thus do not need a
443 * consistent CPU pointer or kernel mapping
445 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
446 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
447 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
448 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs);
450 nvkm_info(&imem->base.subdev, "using DMA API\n");