drm/nouveau/core: remove nouveau_mm.mutex, no more users
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / core / subdev / vm / base.c
CommitLineData
a11c3198
BS
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
3863c9bc 25#include <core/gpuobj.h>
02a841d4 26#include <core/mm.h>
3863c9bc
BS
27
28#include <subdev/fb.h>
02a841d4 29#include <subdev/vm.h>
a11c3198
BS
30
31void
d5f42394 32nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
a11c3198
BS
33{
34 struct nouveau_vm *vm = vma->vm;
3863c9bc 35 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198 36 struct nouveau_mm_node *r;
3863c9bc 37 int big = vma->node->type != vmm->spg_shift;
a11c3198
BS
38 u32 offset = vma->node->offset + (delta >> 12);
39 u32 bits = vma->node->type - 12;
3863c9bc
BS
40 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
41 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
42 u32 max = 1 << (vmm->pgt_bits - bits);
a11c3198
BS
43 u32 end, len;
44
8f7286f8 45 delta = 0;
d5f42394 46 list_for_each_entry(r, &node->regions, rl_entry) {
a11c3198
BS
47 u64 phys = (u64)r->offset << 12;
48 u32 num = r->length >> bits;
49
50 while (num) {
3ee01281 51 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
a11c3198
BS
52
53 end = (pte + num);
54 if (unlikely(end >= max))
55 end = max;
56 len = end - pte;
57
3863c9bc 58 vmm->map(vma, pgt, node, pte, len, phys, delta);
a11c3198
BS
59
60 num -= len;
61 pte += len;
62 if (unlikely(end >= max)) {
73c337e7 63 phys += len << (bits + 12);
a11c3198
BS
64 pde++;
65 pte = 0;
66 }
8f7286f8
BS
67
68 delta += (u64)len << vma->node->type;
a11c3198
BS
69 }
70 }
71
3863c9bc 72 vmm->flush(vm);
a11c3198
BS
73}
74
75void
d5f42394 76nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
a11c3198 77{
d5f42394 78 nouveau_vm_map_at(vma, 0, node);
a11c3198
BS
79}
80
22b33e8e
DA
81void
82nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
83 struct nouveau_mem *mem)
84{
85 struct nouveau_vm *vm = vma->vm;
3863c9bc
BS
86 struct nouveau_vmmgr *vmm = vm->vmm;
87 int big = vma->node->type != vmm->spg_shift;
22b33e8e
DA
88 u32 offset = vma->node->offset + (delta >> 12);
89 u32 bits = vma->node->type - 12;
90 u32 num = length >> vma->node->type;
3863c9bc
BS
91 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
92 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
93 u32 max = 1 << (vmm->pgt_bits - bits);
22b33e8e
DA
94 unsigned m, sglen;
95 u32 end, len;
96 int i;
97 struct scatterlist *sg;
98
99 for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
100 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
101 sglen = sg_dma_len(sg) >> PAGE_SHIFT;
102
103 end = pte + sglen;
104 if (unlikely(end >= max))
105 end = max;
106 len = end - pte;
107
108 for (m = 0; m < len; m++) {
109 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
110
3863c9bc 111 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
22b33e8e
DA
112 num--;
113 pte++;
114
115 if (num == 0)
116 goto finish;
117 }
118 if (unlikely(end >= max)) {
119 pde++;
120 pte = 0;
121 }
122 if (m < sglen) {
123 for (; m < sglen; m++) {
124 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
125
3863c9bc 126 vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
22b33e8e
DA
127 num--;
128 pte++;
129 if (num == 0)
130 goto finish;
131 }
132 }
133
134 }
135finish:
3863c9bc 136 vmm->flush(vm);
22b33e8e
DA
137}
138
a11c3198
BS
139void
140nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
f7b24c42 141 struct nouveau_mem *mem)
a11c3198
BS
142{
143 struct nouveau_vm *vm = vma->vm;
3863c9bc 144 struct nouveau_vmmgr *vmm = vm->vmm;
f7b24c42 145 dma_addr_t *list = mem->pages;
3863c9bc 146 int big = vma->node->type != vmm->spg_shift;
a11c3198
BS
147 u32 offset = vma->node->offset + (delta >> 12);
148 u32 bits = vma->node->type - 12;
149 u32 num = length >> vma->node->type;
3863c9bc
BS
150 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
151 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
152 u32 max = 1 << (vmm->pgt_bits - bits);
a11c3198
BS
153 u32 end, len;
154
155 while (num) {
3ee01281 156 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
a11c3198
BS
157
158 end = (pte + num);
159 if (unlikely(end >= max))
160 end = max;
161 len = end - pte;
162
3863c9bc 163 vmm->map_sg(vma, pgt, mem, pte, len, list);
a11c3198
BS
164
165 num -= len;
166 pte += len;
167 list += len;
168 if (unlikely(end >= max)) {
169 pde++;
170 pte = 0;
171 }
172 }
173
3863c9bc 174 vmm->flush(vm);
a11c3198
BS
175}
176
177void
178nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
179{
180 struct nouveau_vm *vm = vma->vm;
3863c9bc
BS
181 struct nouveau_vmmgr *vmm = vm->vmm;
182 int big = vma->node->type != vmm->spg_shift;
a11c3198
BS
183 u32 offset = vma->node->offset + (delta >> 12);
184 u32 bits = vma->node->type - 12;
185 u32 num = length >> vma->node->type;
3863c9bc
BS
186 u32 pde = (offset >> vmm->pgt_bits) - vm->fpde;
187 u32 pte = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
188 u32 max = 1 << (vmm->pgt_bits - bits);
a11c3198
BS
189 u32 end, len;
190
191 while (num) {
3ee01281 192 struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
a11c3198
BS
193
194 end = (pte + num);
195 if (unlikely(end >= max))
196 end = max;
197 len = end - pte;
198
3863c9bc 199 vmm->unmap(pgt, pte, len);
a11c3198
BS
200
201 num -= len;
202 pte += len;
203 if (unlikely(end >= max)) {
204 pde++;
205 pte = 0;
206 }
207 }
208
3863c9bc 209 vmm->flush(vm);
a11c3198
BS
210}
211
212void
213nouveau_vm_unmap(struct nouveau_vma *vma)
214{
215 nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
216}
217
218static void
3ee01281 219nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
a11c3198 220{
3863c9bc 221 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198
BS
222 struct nouveau_vm_pgd *vpgd;
223 struct nouveau_vm_pgt *vpgt;
224 struct nouveau_gpuobj *pgt;
225 u32 pde;
226
227 for (pde = fpde; pde <= lpde; pde++) {
228 vpgt = &vm->pgt[pde - vm->fpde];
3ee01281 229 if (--vpgt->refcount[big])
a11c3198
BS
230 continue;
231
3ee01281
BS
232 pgt = vpgt->obj[big];
233 vpgt->obj[big] = NULL;
234
a11c3198 235 list_for_each_entry(vpgd, &vm->pgd_list, head) {
3863c9bc 236 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
a11c3198
BS
237 }
238
4e67bee8 239 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198 240 nouveau_gpuobj_ref(NULL, &pgt);
4e67bee8 241 mutex_lock(&nv_subdev(vmm)->mutex);
a11c3198
BS
242 }
243}
244
245static int
246nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
247{
3863c9bc 248 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198
BS
249 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
250 struct nouveau_vm_pgd *vpgd;
251 struct nouveau_gpuobj *pgt;
3863c9bc 252 int big = (type != vmm->spg_shift);
a11c3198
BS
253 u32 pgt_size;
254 int ret;
255
3863c9bc 256 pgt_size = (1 << (vmm->pgt_bits + 12)) >> type;
a11c3198
BS
257 pgt_size *= 8;
258
4e67bee8 259 mutex_unlock(&nv_subdev(vmm)->mutex);
3863c9bc 260 ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
a11c3198 261 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
4e67bee8 262 mutex_lock(&nv_subdev(vmm)->mutex);
a11c3198
BS
263 if (unlikely(ret))
264 return ret;
265
266 /* someone beat us to filling the PDE while we didn't have the lock */
3ee01281 267 if (unlikely(vpgt->refcount[big]++)) {
4e67bee8 268 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198 269 nouveau_gpuobj_ref(NULL, &pgt);
4e67bee8 270 mutex_lock(&nv_subdev(vmm)->mutex);
a11c3198
BS
271 return 0;
272 }
273
3ee01281 274 vpgt->obj[big] = pgt;
a11c3198 275 list_for_each_entry(vpgd, &vm->pgd_list, head) {
3863c9bc 276 vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
a11c3198
BS
277 }
278
a11c3198
BS
279 return 0;
280}
281
282int
283nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
284 u32 access, struct nouveau_vma *vma)
285{
3863c9bc 286 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198
BS
287 u32 align = (1 << page_shift) >> 12;
288 u32 msize = size >> 12;
289 u32 fpde, lpde, pde;
290 int ret;
291
4e67bee8 292 mutex_lock(&nv_subdev(vmm)->mutex);
496734bf
BS
293 ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
294 &vma->node);
a11c3198 295 if (unlikely(ret != 0)) {
4e67bee8 296 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198
BS
297 return ret;
298 }
299
3863c9bc
BS
300 fpde = (vma->node->offset >> vmm->pgt_bits);
301 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
ebb945a9 302
a11c3198
BS
303 for (pde = fpde; pde <= lpde; pde++) {
304 struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
3863c9bc 305 int big = (vma->node->type != vmm->spg_shift);
a11c3198 306
3ee01281
BS
307 if (likely(vpgt->refcount[big])) {
308 vpgt->refcount[big]++;
a11c3198
BS
309 continue;
310 }
311
312 ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
313 if (ret) {
314 if (pde != fpde)
3ee01281 315 nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
496734bf 316 nouveau_mm_free(&vm->mm, &vma->node);
4e67bee8 317 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198
BS
318 return ret;
319 }
320 }
4e67bee8 321 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198
BS
322
323 vma->vm = vm;
324 vma->offset = (u64)vma->node->offset << 12;
325 vma->access = access;
326 return 0;
327}
328
329void
330nouveau_vm_put(struct nouveau_vma *vma)
331{
332 struct nouveau_vm *vm = vma->vm;
3863c9bc 333 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198
BS
334 u32 fpde, lpde;
335
336 if (unlikely(vma->node == NULL))
337 return;
3863c9bc
BS
338 fpde = (vma->node->offset >> vmm->pgt_bits);
339 lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
a11c3198 340
4e67bee8 341 mutex_lock(&nv_subdev(vmm)->mutex);
3863c9bc 342 nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
496734bf 343 nouveau_mm_free(&vm->mm, &vma->node);
4e67bee8 344 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198
BS
345}
346
347int
3863c9bc
BS
348nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
349 u64 mm_offset, u32 block, struct nouveau_vm **pvm)
a11c3198 350{
a11c3198
BS
351 struct nouveau_vm *vm;
352 u64 mm_length = (offset + length) - mm_offset;
a11c3198
BS
353 int ret;
354
cfd376b6 355 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
a11c3198
BS
356 if (!vm)
357 return -ENOMEM;
358
3863c9bc
BS
359 INIT_LIST_HEAD(&vm->pgd_list);
360 vm->vmm = vmm;
361 vm->refcount = 1;
362 vm->fpde = offset >> (vmm->pgt_bits + 12);
363 vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
a11c3198 364
3863c9bc 365 vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
a11c3198
BS
366 if (!vm->pgt) {
367 kfree(vm);
368 return -ENOMEM;
369 }
370
a11c3198
BS
371 ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
372 block >> 12);
373 if (ret) {
3863c9bc 374 kfree(vm->pgt);
a11c3198
BS
375 kfree(vm);
376 return ret;
377 }
378
cfd376b6
MS
379 *pvm = vm;
380
a11c3198
BS
381 return 0;
382}
383
3863c9bc
BS
384int
385nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
386 u64 mm_offset, struct nouveau_vm **pvm)
387{
388 struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
389 return vmm->create(vmm, offset, length, mm_offset, pvm);
390}
391
a11c3198
BS
392static int
393nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
394{
3863c9bc 395 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198
BS
396 struct nouveau_vm_pgd *vpgd;
397 int i;
398
399 if (!pgd)
400 return 0;
401
402 vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
403 if (!vpgd)
404 return -ENOMEM;
405
406 nouveau_gpuobj_ref(pgd, &vpgd->obj);
407
4e67bee8 408 mutex_lock(&nv_subdev(vmm)->mutex);
3ee01281 409 for (i = vm->fpde; i <= vm->lpde; i++)
3863c9bc 410 vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
a11c3198 411 list_add(&vpgd->head, &vm->pgd_list);
4e67bee8 412 mutex_unlock(&nv_subdev(vmm)->mutex);
a11c3198
BS
413 return 0;
414}
415
416static void
15ba79ad 417nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
a11c3198 418{
4e67bee8 419 struct nouveau_vmmgr *vmm = vm->vmm;
a11c3198 420 struct nouveau_vm_pgd *vpgd, *tmp;
15ba79ad 421 struct nouveau_gpuobj *pgd = NULL;
a11c3198 422
15ba79ad 423 if (!mpgd)
a11c3198
BS
424 return;
425
4e67bee8 426 mutex_lock(&nv_subdev(vmm)->mutex);
a11c3198 427 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
15ba79ad
BS
428 if (vpgd->obj == mpgd) {
429 pgd = vpgd->obj;
430 list_del(&vpgd->head);
431 kfree(vpgd);
432 break;
433 }
a11c3198 434 }
4e67bee8 435 mutex_unlock(&nv_subdev(vmm)->mutex);
15ba79ad
BS
436
437 nouveau_gpuobj_ref(NULL, &pgd);
a11c3198
BS
438}
439
440static void
441nouveau_vm_del(struct nouveau_vm *vm)
442{
443 struct nouveau_vm_pgd *vpgd, *tmp;
444
445 list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
446 nouveau_vm_unlink(vm, vpgd->obj);
447 }
a11c3198 448
ad9ac437 449 nouveau_mm_fini(&vm->mm);
a11c3198
BS
450 kfree(vm->pgt);
451 kfree(vm);
452}
453
454int
455nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
456 struct nouveau_gpuobj *pgd)
457{
458 struct nouveau_vm *vm;
459 int ret;
460
461 vm = ref;
462 if (vm) {
463 ret = nouveau_vm_link(vm, pgd);
464 if (ret)
465 return ret;
466
467 vm->refcount++;
468 }
469
470 vm = *ptr;
471 *ptr = ref;
472
473 if (vm) {
474 nouveau_vm_unlink(vm, pgd);
475
476 if (--vm->refcount == 0)
477 nouveau_vm_del(vm);
478 }
479
480 return 0;
481}