Merge branches 'doc.2021.01.06a', 'fixes.2021.01.04b', 'kfree_rcu.2021.01.04a', ...
[linux-block.git] / drivers / gpu / drm / nouveau / nouveau_ttm.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3  * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sub license,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial portions
15  * of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
21  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include <linux/limits.h>
27 #include <linux/swiotlb.h>
28
29 #include "nouveau_drv.h"
30 #include "nouveau_gem.h"
31 #include "nouveau_mem.h"
32 #include "nouveau_ttm.h"
33
34 #include <drm/drm_legacy.h>
35
36 #include <core/tegra.h>
37
38 static void
39 nouveau_manager_del(struct ttm_resource_manager *man, struct ttm_resource *reg)
40 {
41         nouveau_mem_del(reg);
42 }
43
44 static int
45 nouveau_vram_manager_new(struct ttm_resource_manager *man,
46                          struct ttm_buffer_object *bo,
47                          const struct ttm_place *place,
48                          struct ttm_resource *reg)
49 {
50         struct nouveau_bo *nvbo = nouveau_bo(bo);
51         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
52         int ret;
53
54         if (drm->client.device.info.ram_size == 0)
55                 return -ENOMEM;
56
57         ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
58         if (ret)
59                 return ret;
60
61         ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
62         if (ret) {
63                 nouveau_mem_del(reg);
64                 return ret;
65         }
66
67         return 0;
68 }
69
70 const struct ttm_resource_manager_func nouveau_vram_manager = {
71         .alloc = nouveau_vram_manager_new,
72         .free = nouveau_manager_del,
73 };
74
75 static int
76 nouveau_gart_manager_new(struct ttm_resource_manager *man,
77                          struct ttm_buffer_object *bo,
78                          const struct ttm_place *place,
79                          struct ttm_resource *reg)
80 {
81         struct nouveau_bo *nvbo = nouveau_bo(bo);
82         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
83         int ret;
84
85         ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
86         if (ret)
87                 return ret;
88
89         reg->start = 0;
90         return 0;
91 }
92
93 const struct ttm_resource_manager_func nouveau_gart_manager = {
94         .alloc = nouveau_gart_manager_new,
95         .free = nouveau_manager_del,
96 };
97
98 static int
99 nv04_gart_manager_new(struct ttm_resource_manager *man,
100                       struct ttm_buffer_object *bo,
101                       const struct ttm_place *place,
102                       struct ttm_resource *reg)
103 {
104         struct nouveau_bo *nvbo = nouveau_bo(bo);
105         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
106         struct nouveau_mem *mem;
107         int ret;
108
109         ret = nouveau_mem_new(&drm->master, nvbo->kind, nvbo->comp, reg);
110         mem = nouveau_mem(reg);
111         if (ret)
112                 return ret;
113
114         ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
115                            (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
116         if (ret) {
117                 nouveau_mem_del(reg);
118                 return ret;
119         }
120
121         reg->start = mem->vma[0].addr >> PAGE_SHIFT;
122         return 0;
123 }
124
125 const struct ttm_resource_manager_func nv04_gart_manager = {
126         .alloc = nv04_gart_manager_new,
127         .free = nouveau_manager_del,
128 };
129
130 static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
131 {
132         struct vm_area_struct *vma = vmf->vma;
133         struct ttm_buffer_object *bo = vma->vm_private_data;
134         pgprot_t prot;
135         vm_fault_t ret;
136
137         ret = ttm_bo_vm_reserve(bo, vmf);
138         if (ret)
139                 return ret;
140
141         ret = nouveau_ttm_fault_reserve_notify(bo);
142         if (ret)
143                 goto error_unlock;
144
145         nouveau_bo_del_io_reserve_lru(bo);
146         prot = vm_get_page_prot(vma->vm_flags);
147         ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
148         nouveau_bo_add_io_reserve_lru(bo);
149         if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
150                 return ret;
151
152 error_unlock:
153         dma_resv_unlock(bo->base.resv);
154         return ret;
155 }
156
157 static struct vm_operations_struct nouveau_ttm_vm_ops = {
158         .fault = nouveau_ttm_fault,
159         .open = ttm_bo_vm_open,
160         .close = ttm_bo_vm_close,
161         .access = ttm_bo_vm_access
162 };
163
164 int
165 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
166 {
167         struct drm_file *file_priv = filp->private_data;
168         struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
169         int ret;
170
171         ret = ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
172         if (ret)
173                 return ret;
174
175         vma->vm_ops = &nouveau_ttm_vm_ops;
176         return 0;
177 }
178
179 static int
180 nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
181 {
182         struct nvif_mmu *mmu = &drm->client.mmu;
183         int typei;
184
185         typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE |
186                                             kind | NVIF_MEM_COHERENT);
187         if (typei < 0)
188                 return -ENOSYS;
189
190         drm->ttm.type_host[!!kind] = typei;
191
192         typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind);
193         if (typei < 0)
194                 return -ENOSYS;
195
196         drm->ttm.type_ncoh[!!kind] = typei;
197         return 0;
198 }
199
200 static int
201 nouveau_ttm_init_vram(struct nouveau_drm *drm)
202 {
203         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
204                 struct ttm_resource_manager *man = kzalloc(sizeof(*man), GFP_KERNEL);
205
206                 if (!man)
207                         return -ENOMEM;
208
209                 man->func = &nouveau_vram_manager;
210
211                 ttm_resource_manager_init(man,
212                                           drm->gem.vram_available >> PAGE_SHIFT);
213                 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man);
214                 ttm_resource_manager_set_used(man, true);
215                 return 0;
216         } else {
217                 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false,
218                                           drm->gem.vram_available >> PAGE_SHIFT);
219         }
220 }
221
222 static void
223 nouveau_ttm_fini_vram(struct nouveau_drm *drm)
224 {
225         struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
226
227         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
228                 ttm_resource_manager_set_used(man, false);
229                 ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
230                 ttm_resource_manager_cleanup(man);
231                 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
232                 kfree(man);
233         } else
234                 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM);
235 }
236
237 static int
238 nouveau_ttm_init_gtt(struct nouveau_drm *drm)
239 {
240         struct ttm_resource_manager *man;
241         unsigned long size_pages = drm->gem.gart_available >> PAGE_SHIFT;
242         const struct ttm_resource_manager_func *func = NULL;
243
244         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
245                 func = &nouveau_gart_manager;
246         else if (!drm->agp.bridge)
247                 func = &nv04_gart_manager;
248         else
249                 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true,
250                                           size_pages);
251
252         man = kzalloc(sizeof(*man), GFP_KERNEL);
253         if (!man)
254                 return -ENOMEM;
255
256         man->func = func;
257         man->use_tt = true;
258         ttm_resource_manager_init(man, size_pages);
259         ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, man);
260         ttm_resource_manager_set_used(man, true);
261         return 0;
262 }
263
264 static void
265 nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
266 {
267         struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_TT);
268
269         if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA &&
270             drm->agp.bridge)
271                 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
272         else {
273                 ttm_resource_manager_set_used(man, false);
274                 ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
275                 ttm_resource_manager_cleanup(man);
276                 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
277                 kfree(man);
278         }
279 }
280
281 int
282 nouveau_ttm_init(struct nouveau_drm *drm)
283 {
284         struct nvkm_device *device = nvxx_device(&drm->client.device);
285         struct nvkm_pci *pci = device->pci;
286         struct nvif_mmu *mmu = &drm->client.mmu;
287         struct drm_device *dev = drm->dev;
288         bool need_swiotlb = false;
289         int typei, ret;
290
291         ret = nouveau_ttm_init_host(drm, 0);
292         if (ret)
293                 return ret;
294
295         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
296             drm->client.device.info.chipset != 0x50) {
297                 ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND);
298                 if (ret)
299                         return ret;
300         }
301
302         if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC &&
303             drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
304                 typei = nvif_mmu_type(mmu, NVIF_MEM_VRAM | NVIF_MEM_MAPPABLE |
305                                            NVIF_MEM_KIND |
306                                            NVIF_MEM_COMP |
307                                            NVIF_MEM_DISP);
308                 if (typei < 0)
309                         return -ENOSYS;
310
311                 drm->ttm.type_vram = typei;
312         } else {
313                 drm->ttm.type_vram = -1;
314         }
315
316         if (pci && pci->agp.bridge) {
317                 drm->agp.bridge = pci->agp.bridge;
318                 drm->agp.base = pci->agp.base;
319                 drm->agp.size = pci->agp.size;
320                 drm->agp.cma = pci->agp.cma;
321         }
322
323 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
324         need_swiotlb = !!swiotlb_nr_tbl();
325 #endif
326
327         ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
328                                  drm->dev->dev, dev->anon_inode->i_mapping,
329                                  dev->vma_offset_manager, need_swiotlb,
330                                  drm->client.mmu.dmabits <= 32);
331         if (ret) {
332                 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
333                 return ret;
334         }
335
336         /* VRAM init */
337         drm->gem.vram_available = drm->client.device.info.ram_user;
338
339         arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
340                                    device->func->resource_size(device, 1));
341
342         ret = nouveau_ttm_init_vram(drm);
343         if (ret) {
344                 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
345                 return ret;
346         }
347
348         drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
349                                          device->func->resource_size(device, 1));
350
351         /* GART init */
352         if (!drm->agp.bridge) {
353                 drm->gem.gart_available = drm->client.vmm.vmm.limit;
354         } else {
355                 drm->gem.gart_available = drm->agp.size;
356         }
357
358         ret = nouveau_ttm_init_gtt(drm);
359         if (ret) {
360                 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
361                 return ret;
362         }
363
364         mutex_init(&drm->ttm.io_reserve_mutex);
365         INIT_LIST_HEAD(&drm->ttm.io_reserve_lru);
366
367         NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
368         NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
369         return 0;
370 }
371
372 void
373 nouveau_ttm_fini(struct nouveau_drm *drm)
374 {
375         struct nvkm_device *device = nvxx_device(&drm->client.device);
376
377         nouveau_ttm_fini_vram(drm);
378         nouveau_ttm_fini_gtt(drm);
379
380         ttm_bo_device_release(&drm->ttm.bdev);
381
382         arch_phys_wc_del(drm->ttm.mtrr);
383         drm->ttm.mtrr = 0;
384         arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
385                                 device->func->resource_size(device, 1));
386
387 }