drm/nouveau/mmu/gp100-: add privileged methods for fault replay/cancel
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / uvmm.c
1 /*
2  * Copyright 2017 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "uvmm.h"
23 #include "umem.h"
24 #include "ummu.h"
25
26 #include <core/client.h>
27 #include <core/memory.h>
28
29 #include <nvif/if000c.h>
30 #include <nvif/unpack.h>
31
32 static const struct nvkm_object_func nvkm_uvmm;
33 struct nvkm_vmm *
34 nvkm_uvmm_search(struct nvkm_client *client, u64 handle)
35 {
36         struct nvkm_object *object;
37
38         object = nvkm_object_search(client, handle, &nvkm_uvmm);
39         if (IS_ERR(object))
40                 return (void *)object;
41
42         return nvkm_uvmm(object)->vmm;
43 }
44
45 static int
46 nvkm_uvmm_mthd_pfnclr(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
47 {
48         struct nvkm_client *client = uvmm->object.client;
49         union {
50                 struct nvif_vmm_pfnclr_v0 v0;
51         } *args = argv;
52         struct nvkm_vmm *vmm = uvmm->vmm;
53         int ret = -ENOSYS;
54         u64 addr, size;
55
56         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
57                 addr = args->v0.addr;
58                 size = args->v0.size;
59         } else
60                 return ret;
61
62         if (!client->super)
63                 return -ENOENT;
64
65         if (size) {
66                 mutex_lock(&vmm->mutex);
67                 ret = nvkm_vmm_pfn_unmap(vmm, addr, size);
68                 mutex_unlock(&vmm->mutex);
69         }
70
71         return ret;
72 }
73
74 static int
75 nvkm_uvmm_mthd_pfnmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
76 {
77         struct nvkm_client *client = uvmm->object.client;
78         union {
79                 struct nvif_vmm_pfnmap_v0 v0;
80         } *args = argv;
81         struct nvkm_vmm *vmm = uvmm->vmm;
82         int ret = -ENOSYS;
83         u64 addr, size, *phys;
84         u8  page;
85
86         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
87                 page = args->v0.page;
88                 addr = args->v0.addr;
89                 size = args->v0.size;
90                 phys = args->v0.phys;
91                 if (argc != (size >> page) * sizeof(args->v0.phys[0]))
92                         return -EINVAL;
93         } else
94                 return ret;
95
96         if (!client->super)
97                 return -ENOENT;
98
99         if (size) {
100                 mutex_lock(&vmm->mutex);
101                 ret = nvkm_vmm_pfn_map(vmm, page, addr, size, phys);
102                 mutex_unlock(&vmm->mutex);
103         }
104
105         return ret;
106 }
107
108 static int
109 nvkm_uvmm_mthd_unmap(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
110 {
111         struct nvkm_client *client = uvmm->object.client;
112         union {
113                 struct nvif_vmm_unmap_v0 v0;
114         } *args = argv;
115         struct nvkm_vmm *vmm = uvmm->vmm;
116         struct nvkm_vma *vma;
117         int ret = -ENOSYS;
118         u64 addr;
119
120         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
121                 addr = args->v0.addr;
122         } else
123                 return ret;
124
125         mutex_lock(&vmm->mutex);
126         vma = nvkm_vmm_node_search(vmm, addr);
127         if (ret = -ENOENT, !vma || vma->addr != addr) {
128                 VMM_DEBUG(vmm, "lookup %016llx: %016llx",
129                           addr, vma ? vma->addr : ~0ULL);
130                 goto done;
131         }
132
133         if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
134                 VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
135                           vma->user, !client->super, vma->busy);
136                 goto done;
137         }
138
139         if (ret = -EINVAL, !vma->memory) {
140                 VMM_DEBUG(vmm, "unmapped");
141                 goto done;
142         }
143
144         nvkm_vmm_unmap_locked(vmm, vma, false);
145         ret = 0;
146 done:
147         mutex_unlock(&vmm->mutex);
148         return ret;
149 }
150
151 static int
152 nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
153 {
154         struct nvkm_client *client = uvmm->object.client;
155         union {
156                 struct nvif_vmm_map_v0 v0;
157         } *args = argv;
158         u64 addr, size, handle, offset;
159         struct nvkm_vmm *vmm = uvmm->vmm;
160         struct nvkm_vma *vma;
161         struct nvkm_memory *memory;
162         int ret = -ENOSYS;
163
164         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, true))) {
165                 addr = args->v0.addr;
166                 size = args->v0.size;
167                 handle = args->v0.memory;
168                 offset = args->v0.offset;
169         } else
170                 return ret;
171
172         memory = nvkm_umem_search(client, handle);
173         if (IS_ERR(memory)) {
174                 VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
175                 return PTR_ERR(memory);
176         }
177
178         mutex_lock(&vmm->mutex);
179         if (ret = -ENOENT, !(vma = nvkm_vmm_node_search(vmm, addr))) {
180                 VMM_DEBUG(vmm, "lookup %016llx", addr);
181                 goto fail;
182         }
183
184         if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
185                 VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
186                           vma->user, !client->super, vma->busy);
187                 goto fail;
188         }
189
190         if (ret = -EINVAL, vma->mapped && !vma->memory) {
191                 VMM_DEBUG(vmm, "pfnmap %016llx", addr);
192                 goto fail;
193         }
194
195         if (ret = -EINVAL, vma->addr != addr || vma->size != size) {
196                 if (addr + size > vma->addr + vma->size || vma->memory ||
197                     (vma->refd == NVKM_VMA_PAGE_NONE && !vma->mapref)) {
198                         VMM_DEBUG(vmm, "split %d %d %d "
199                                        "%016llx %016llx %016llx %016llx",
200                                   !!vma->memory, vma->refd, vma->mapref,
201                                   addr, size, vma->addr, (u64)vma->size);
202                         goto fail;
203                 }
204
205                 vma = nvkm_vmm_node_split(vmm, vma, addr, size);
206                 if (!vma) {
207                         ret = -ENOMEM;
208                         goto fail;
209                 }
210         }
211         vma->busy = true;
212         mutex_unlock(&vmm->mutex);
213
214         ret = nvkm_memory_map(memory, offset, vmm, vma, argv, argc);
215         if (ret == 0) {
216                 /* Successful map will clear vma->busy. */
217                 nvkm_memory_unref(&memory);
218                 return 0;
219         }
220
221         mutex_lock(&vmm->mutex);
222         vma->busy = false;
223         nvkm_vmm_unmap_region(vmm, vma);
224 fail:
225         mutex_unlock(&vmm->mutex);
226         nvkm_memory_unref(&memory);
227         return ret;
228 }
229
230 static int
231 nvkm_uvmm_mthd_put(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
232 {
233         struct nvkm_client *client = uvmm->object.client;
234         union {
235                 struct nvif_vmm_put_v0 v0;
236         } *args = argv;
237         struct nvkm_vmm *vmm = uvmm->vmm;
238         struct nvkm_vma *vma;
239         int ret = -ENOSYS;
240         u64 addr;
241
242         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
243                 addr = args->v0.addr;
244         } else
245                 return ret;
246
247         mutex_lock(&vmm->mutex);
248         vma = nvkm_vmm_node_search(vmm, args->v0.addr);
249         if (ret = -ENOENT, !vma || vma->addr != addr || vma->part) {
250                 VMM_DEBUG(vmm, "lookup %016llx: %016llx %d", addr,
251                           vma ? vma->addr : ~0ULL, vma ? vma->part : 0);
252                 goto done;
253         }
254
255         if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) {
256                 VMM_DEBUG(vmm, "denied %016llx: %d %d %d", addr,
257                           vma->user, !client->super, vma->busy);
258                 goto done;
259         }
260
261         nvkm_vmm_put_locked(vmm, vma);
262         ret = 0;
263 done:
264         mutex_unlock(&vmm->mutex);
265         return ret;
266 }
267
268 static int
269 nvkm_uvmm_mthd_get(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
270 {
271         struct nvkm_client *client = uvmm->object.client;
272         union {
273                 struct nvif_vmm_get_v0 v0;
274         } *args = argv;
275         struct nvkm_vmm *vmm = uvmm->vmm;
276         struct nvkm_vma *vma;
277         int ret = -ENOSYS;
278         bool getref, mapref, sparse;
279         u8 page, align;
280         u64 size;
281
282         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
283                 getref = args->v0.type == NVIF_VMM_GET_V0_PTES;
284                 mapref = args->v0.type == NVIF_VMM_GET_V0_ADDR;
285                 sparse = args->v0.sparse;
286                 page = args->v0.page;
287                 align = args->v0.align;
288                 size = args->v0.size;
289         } else
290                 return ret;
291
292         mutex_lock(&vmm->mutex);
293         ret = nvkm_vmm_get_locked(vmm, getref, mapref, sparse,
294                                   page, align, size, &vma);
295         mutex_unlock(&vmm->mutex);
296         if (ret)
297                 return ret;
298
299         args->v0.addr = vma->addr;
300         vma->user = !client->super;
301         return ret;
302 }
303
304 static int
305 nvkm_uvmm_mthd_page(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
306 {
307         union {
308                 struct nvif_vmm_page_v0 v0;
309         } *args = argv;
310         const struct nvkm_vmm_page *page;
311         int ret = -ENOSYS;
312         u8 type, index, nr;
313
314         page = uvmm->vmm->func->page;
315         for (nr = 0; page[nr].shift; nr++);
316
317         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) {
318                 if ((index = args->v0.index) >= nr)
319                         return -EINVAL;
320                 type = page[index].type;
321                 args->v0.shift = page[index].shift;
322                 args->v0.sparse = !!(type & NVKM_VMM_PAGE_SPARSE);
323                 args->v0.vram = !!(type & NVKM_VMM_PAGE_VRAM);
324                 args->v0.host = !!(type & NVKM_VMM_PAGE_HOST);
325                 args->v0.comp = !!(type & NVKM_VMM_PAGE_COMP);
326         } else
327                 return -ENOSYS;
328
329         return 0;
330 }
331
332 static int
333 nvkm_uvmm_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
334 {
335         struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
336         switch (mthd) {
337         case NVIF_VMM_V0_PAGE  : return nvkm_uvmm_mthd_page  (uvmm, argv, argc);
338         case NVIF_VMM_V0_GET   : return nvkm_uvmm_mthd_get   (uvmm, argv, argc);
339         case NVIF_VMM_V0_PUT   : return nvkm_uvmm_mthd_put   (uvmm, argv, argc);
340         case NVIF_VMM_V0_MAP   : return nvkm_uvmm_mthd_map   (uvmm, argv, argc);
341         case NVIF_VMM_V0_UNMAP : return nvkm_uvmm_mthd_unmap (uvmm, argv, argc);
342         case NVIF_VMM_V0_PFNMAP: return nvkm_uvmm_mthd_pfnmap(uvmm, argv, argc);
343         case NVIF_VMM_V0_PFNCLR: return nvkm_uvmm_mthd_pfnclr(uvmm, argv, argc);
344         case NVIF_VMM_V0_MTHD(0x00) ... NVIF_VMM_V0_MTHD(0x7f):
345                 if (uvmm->vmm->func->mthd) {
346                         return uvmm->vmm->func->mthd(uvmm->vmm,
347                                                      uvmm->object.client,
348                                                      mthd, argv, argc);
349                 }
350                 break;
351         default:
352                 break;
353         }
354         return -EINVAL;
355 }
356
357 static void *
358 nvkm_uvmm_dtor(struct nvkm_object *object)
359 {
360         struct nvkm_uvmm *uvmm = nvkm_uvmm(object);
361         nvkm_vmm_unref(&uvmm->vmm);
362         return uvmm;
363 }
364
365 static const struct nvkm_object_func
366 nvkm_uvmm = {
367         .dtor = nvkm_uvmm_dtor,
368         .mthd = nvkm_uvmm_mthd,
369 };
370
371 int
372 nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
373               struct nvkm_object **pobject)
374 {
375         struct nvkm_mmu *mmu = nvkm_ummu(oclass->parent)->mmu;
376         const bool more = oclass->base.maxver >= 0;
377         union {
378                 struct nvif_vmm_v0 v0;
379         } *args = argv;
380         const struct nvkm_vmm_page *page;
381         struct nvkm_uvmm *uvmm;
382         int ret = -ENOSYS;
383         u64 addr, size;
384         bool managed;
385
386         if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, more))) {
387                 managed = args->v0.managed != 0;
388                 addr = args->v0.addr;
389                 size = args->v0.size;
390         } else
391                 return ret;
392
393         if (!(uvmm = kzalloc(sizeof(*uvmm), GFP_KERNEL)))
394                 return -ENOMEM;
395         nvkm_object_ctor(&nvkm_uvmm, oclass, &uvmm->object);
396         *pobject = &uvmm->object;
397
398         if (!mmu->vmm) {
399                 ret = mmu->func->vmm.ctor(mmu, managed, addr, size, argv, argc,
400                                           NULL, "user", &uvmm->vmm);
401                 if (ret)
402                         return ret;
403
404                 uvmm->vmm->debug = max(uvmm->vmm->debug, oclass->client->debug);
405         } else {
406                 if (size)
407                         return -EINVAL;
408
409                 uvmm->vmm = nvkm_vmm_ref(mmu->vmm);
410         }
411
412         page = uvmm->vmm->func->page;
413         args->v0.page_nr = 0;
414         while (page && (page++)->shift)
415                 args->v0.page_nr++;
416         args->v0.addr = uvmm->vmm->start;
417         args->v0.size = uvmm->vmm->limit;
418         return 0;
419 }