Merge remote-tracking branches 'asoc/topic/wm8804', 'asoc/topic/wm8904', 'asoc/topic...
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_gem.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
771fe6b9
JG
30#include "radeon.h"
31
771fe6b9
JG
32void radeon_gem_object_free(struct drm_gem_object *gobj)
33{
7e4d15d9 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
771fe6b9 35
771fe6b9 36 if (robj) {
40f5cf99
AD
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
4c788679 39 radeon_bo_unref(&robj);
771fe6b9
JG
40 }
41}
42
391bfec3 43int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
4c788679 44 int alignment, int initial_domain,
ed5cb43f 45 u32 flags, bool kernel,
4c788679 46 struct drm_gem_object **obj)
771fe6b9 47{
4c788679 48 struct radeon_bo *robj;
6c0d112f 49 unsigned long max_size;
771fe6b9
JG
50 int r;
51
52 *obj = NULL;
771fe6b9
JG
53 /* At least align on page size */
54 if (alignment < PAGE_SIZE) {
55 alignment = PAGE_SIZE;
56 }
6c0d112f 57
391bfec3
AD
58 /* Maximum bo size is the unpinned gtt size since we use the gtt to
59 * handle vram to system pool migrations.
60 */
61 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
6c0d112f 62 if (size > max_size) {
391bfec3 63 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
380670ae 64 size >> 20, max_size >> 20);
6c0d112f
CK
65 return -ENOMEM;
66 }
67
0fe7158c 68retry:
02376d82 69 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
831b6966 70 flags, NULL, NULL, &robj);
771fe6b9 71 if (r) {
0fe7158c
CK
72 if (r != -ERESTARTSYS) {
73 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
74 initial_domain |= RADEON_GEM_DOMAIN_GTT;
75 goto retry;
76 }
391bfec3 77 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
ecabd32a 78 size, initial_domain, alignment, r);
0fe7158c 79 }
771fe6b9
JG
80 return r;
81 }
441921d5 82 *obj = &robj->gem_base;
409851f4 83 robj->pid = task_pid_nr(current);
441921d5
DV
84
85 mutex_lock(&rdev->gem.mutex);
86 list_add_tail(&robj->list, &rdev->gem.objects);
87 mutex_unlock(&rdev->gem.mutex);
88
771fe6b9
JG
89 return 0;
90}
91
248a6c4a 92static int radeon_gem_set_domain(struct drm_gem_object *gobj,
771fe6b9
JG
93 uint32_t rdomain, uint32_t wdomain)
94{
4c788679 95 struct radeon_bo *robj;
771fe6b9 96 uint32_t domain;
39e7f6f8 97 long r;
771fe6b9
JG
98
99 /* FIXME: reeimplement */
7e4d15d9 100 robj = gem_to_radeon_bo(gobj);
771fe6b9
JG
101 /* work out where to validate the buffer to */
102 domain = wdomain;
103 if (!domain) {
104 domain = rdomain;
105 }
106 if (!domain) {
107 /* Do nothings */
b6cafa27 108 printk(KERN_WARNING "Set domain without domain !\n");
771fe6b9
JG
109 return 0;
110 }
111 if (domain == RADEON_GEM_DOMAIN_CPU) {
112 /* Asking for cpu access wait for object idle */
39e7f6f8
ML
113 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
114 if (!r)
115 r = -EBUSY;
116
117 if (r < 0 && r != -EINTR) {
118 printk(KERN_ERR "Failed to wait for object: %li\n", r);
771fe6b9
JG
119 return r;
120 }
121 }
122 return 0;
123}
124
125int radeon_gem_init(struct radeon_device *rdev)
126{
127 INIT_LIST_HEAD(&rdev->gem.objects);
128 return 0;
129}
130
131void radeon_gem_fini(struct radeon_device *rdev)
132{
4c788679 133 radeon_bo_force_delete(rdev);
771fe6b9
JG
134}
135
721604a1
JG
136/*
137 * Call from drm_gem_handle_create which appear in both new and open ioctl
138 * case.
139 */
140int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
141{
e971bd5e
CK
142 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
143 struct radeon_device *rdev = rbo->rdev;
144 struct radeon_fpriv *fpriv = file_priv->driver_priv;
145 struct radeon_vm *vm = &fpriv->vm;
146 struct radeon_bo_va *bo_va;
147 int r;
148
149 if (rdev->family < CHIP_CAYMAN) {
150 return 0;
151 }
152
153 r = radeon_bo_reserve(rbo, false);
154 if (r) {
155 return r;
156 }
157
158 bo_va = radeon_vm_bo_find(vm, rbo);
159 if (!bo_va) {
160 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
161 } else {
162 ++bo_va->ref_count;
163 }
164 radeon_bo_unreserve(rbo);
165
721604a1
JG
166 return 0;
167}
168
169void radeon_gem_object_close(struct drm_gem_object *obj,
170 struct drm_file *file_priv)
171{
172 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
173 struct radeon_device *rdev = rbo->rdev;
174 struct radeon_fpriv *fpriv = file_priv->driver_priv;
175 struct radeon_vm *vm = &fpriv->vm;
e971bd5e 176 struct radeon_bo_va *bo_va;
d59f7021 177 int r;
721604a1
JG
178
179 if (rdev->family < CHIP_CAYMAN) {
180 return;
181 }
182
d59f7021
CK
183 r = radeon_bo_reserve(rbo, true);
184 if (r) {
185 dev_err(rdev->dev, "leaking bo va because "
186 "we fail to reserve bo (%d)\n", r);
721604a1
JG
187 return;
188 }
e971bd5e
CK
189 bo_va = radeon_vm_bo_find(vm, rbo);
190 if (bo_va) {
191 if (--bo_va->ref_count == 0) {
192 radeon_vm_bo_rmv(rdev, bo_va);
193 }
194 }
721604a1
JG
195 radeon_bo_unreserve(rbo);
196}
197
6c6f4783
CK
198static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
199{
200 if (r == -EDEADLK) {
6c6f4783
CK
201 r = radeon_gpu_reset(rdev);
202 if (!r)
203 r = -EAGAIN;
6c6f4783
CK
204 }
205 return r;
206}
771fe6b9
JG
207
208/*
209 * GEM ioctls.
210 */
211int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
212 struct drm_file *filp)
213{
214 struct radeon_device *rdev = dev->dev_private;
215 struct drm_radeon_gem_info *args = data;
53595338
DA
216 struct ttm_mem_type_manager *man;
217
218 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
771fe6b9 219
7a50f01a 220 args->vram_size = rdev->mc.real_vram_size;
53595338 221 args->vram_visible = (u64)man->size << PAGE_SHIFT;
ccbe0060
AD
222 args->vram_visible -= rdev->vram_pin_size;
223 args->gart_size = rdev->mc.gtt_size;
224 args->gart_size -= rdev->gart_pin_size;
225
771fe6b9
JG
226 return 0;
227}
228
229int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
230 struct drm_file *filp)
231{
232 /* TODO: implement */
233 DRM_ERROR("unimplemented %s\n", __func__);
234 return -ENOSYS;
235}
236
237int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
238 struct drm_file *filp)
239{
240 /* TODO: implement */
241 DRM_ERROR("unimplemented %s\n", __func__);
242 return -ENOSYS;
243}
244
245int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
246 struct drm_file *filp)
247{
248 struct radeon_device *rdev = dev->dev_private;
249 struct drm_radeon_gem_create *args = data;
250 struct drm_gem_object *gobj;
251 uint32_t handle;
252 int r;
253
dee53e7f 254 down_read(&rdev->exclusive_lock);
771fe6b9
JG
255 /* create a gem object to contain this object in */
256 args->size = roundup(args->size, PAGE_SIZE);
257 r = radeon_gem_object_create(rdev, args->size, args->alignment,
02376d82 258 args->initial_domain, args->flags,
ed5cb43f 259 false, &gobj);
771fe6b9 260 if (r) {
dee53e7f 261 up_read(&rdev->exclusive_lock);
6c6f4783 262 r = radeon_gem_handle_lockup(rdev, r);
771fe6b9
JG
263 return r;
264 }
265 r = drm_gem_handle_create(filp, gobj, &handle);
29d08b3e
DA
266 /* drop reference from allocate - handle holds it now */
267 drm_gem_object_unreference_unlocked(gobj);
771fe6b9 268 if (r) {
dee53e7f 269 up_read(&rdev->exclusive_lock);
6c6f4783 270 r = radeon_gem_handle_lockup(rdev, r);
771fe6b9
JG
271 return r;
272 }
771fe6b9 273 args->handle = handle;
dee53e7f 274 up_read(&rdev->exclusive_lock);
771fe6b9
JG
275 return 0;
276}
277
f72a113a
CK
278int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
279 struct drm_file *filp)
280{
281 struct radeon_device *rdev = dev->dev_private;
282 struct drm_radeon_gem_userptr *args = data;
283 struct drm_gem_object *gobj;
284 struct radeon_bo *bo;
285 uint32_t handle;
286 int r;
287
288 if (offset_in_page(args->addr | args->size))
289 return -EINVAL;
290
f72a113a 291 /* reject unknown flag values */
ddd00e33 292 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
341cb9e4
CK
293 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
294 RADEON_GEM_USERPTR_REGISTER))
f72a113a
CK
295 return -EINVAL;
296
bd645e43
CK
297 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
298 /* readonly pages not tested on older hardware */
299 if (rdev->family < CHIP_R600)
300 return -EINVAL;
301
302 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
303 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
304
305 /* if we want to write to it we must require anonymous
306 memory and install a MMU notifier */
307 return -EACCES;
308 }
f72a113a
CK
309
310 down_read(&rdev->exclusive_lock);
311
312 /* create a gem object to contain this object in */
313 r = radeon_gem_object_create(rdev, args->size, 0,
314 RADEON_GEM_DOMAIN_CPU, 0,
315 false, &gobj);
316 if (r)
317 goto handle_lockup;
318
319 bo = gem_to_radeon_bo(gobj);
320 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
321 if (r)
322 goto release_object;
323
341cb9e4
CK
324 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
325 r = radeon_mn_register(bo, args->addr);
326 if (r)
327 goto release_object;
328 }
329
2a84a447
CK
330 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
331 down_read(&current->mm->mmap_sem);
332 r = radeon_bo_reserve(bo, true);
333 if (r) {
334 up_read(&current->mm->mmap_sem);
335 goto release_object;
336 }
337
338 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
339 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
340 radeon_bo_unreserve(bo);
341 up_read(&current->mm->mmap_sem);
342 if (r)
343 goto release_object;
344 }
345
f72a113a
CK
346 r = drm_gem_handle_create(filp, gobj, &handle);
347 /* drop reference from allocate - handle holds it now */
348 drm_gem_object_unreference_unlocked(gobj);
349 if (r)
350 goto handle_lockup;
351
352 args->handle = handle;
353 up_read(&rdev->exclusive_lock);
354 return 0;
355
356release_object:
357 drm_gem_object_unreference_unlocked(gobj);
358
359handle_lockup:
360 up_read(&rdev->exclusive_lock);
361 r = radeon_gem_handle_lockup(rdev, r);
362
363 return r;
364}
365
771fe6b9
JG
366int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
367 struct drm_file *filp)
368{
369 /* transition the BO to a domain -
370 * just validate the BO into a certain domain */
dee53e7f 371 struct radeon_device *rdev = dev->dev_private;
771fe6b9
JG
372 struct drm_radeon_gem_set_domain *args = data;
373 struct drm_gem_object *gobj;
4c788679 374 struct radeon_bo *robj;
771fe6b9
JG
375 int r;
376
377 /* for now if someone requests domain CPU -
378 * just make sure the buffer is finished with */
dee53e7f 379 down_read(&rdev->exclusive_lock);
771fe6b9
JG
380
381 /* just do a BO wait for now */
382 gobj = drm_gem_object_lookup(dev, filp, args->handle);
383 if (gobj == NULL) {
dee53e7f 384 up_read(&rdev->exclusive_lock);
bf79cb91 385 return -ENOENT;
771fe6b9 386 }
7e4d15d9 387 robj = gem_to_radeon_bo(gobj);
771fe6b9
JG
388
389 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
390
bc9025bd 391 drm_gem_object_unreference_unlocked(gobj);
dee53e7f 392 up_read(&rdev->exclusive_lock);
6c6f4783 393 r = radeon_gem_handle_lockup(robj->rdev, r);
771fe6b9
JG
394 return r;
395}
396
da6b51d0
DA
397int radeon_mode_dumb_mmap(struct drm_file *filp,
398 struct drm_device *dev,
399 uint32_t handle, uint64_t *offset_p)
771fe6b9 400{
771fe6b9 401 struct drm_gem_object *gobj;
4c788679 402 struct radeon_bo *robj;
771fe6b9 403
ff72145b 404 gobj = drm_gem_object_lookup(dev, filp, handle);
771fe6b9 405 if (gobj == NULL) {
bf79cb91 406 return -ENOENT;
771fe6b9 407 }
7e4d15d9 408 robj = gem_to_radeon_bo(gobj);
f72a113a
CK
409 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
410 drm_gem_object_unreference_unlocked(gobj);
411 return -EPERM;
412 }
ff72145b 413 *offset_p = radeon_bo_mmap_offset(robj);
bc9025bd 414 drm_gem_object_unreference_unlocked(gobj);
4c788679 415 return 0;
771fe6b9
JG
416}
417
ff72145b
DA
418int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
419 struct drm_file *filp)
420{
421 struct drm_radeon_gem_mmap *args = data;
422
da6b51d0 423 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
ff72145b
DA
424}
425
771fe6b9
JG
426int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
427 struct drm_file *filp)
428{
1ef5325b 429 struct radeon_device *rdev = dev->dev_private;
cefb87ef
DA
430 struct drm_radeon_gem_busy *args = data;
431 struct drm_gem_object *gobj;
4c788679 432 struct radeon_bo *robj;
cefb87ef 433 int r;
4361e52a 434 uint32_t cur_placement = 0;
cefb87ef
DA
435
436 gobj = drm_gem_object_lookup(dev, filp, args->handle);
437 if (gobj == NULL) {
bf79cb91 438 return -ENOENT;
cefb87ef 439 }
7e4d15d9 440 robj = gem_to_radeon_bo(gobj);
4c788679 441 r = radeon_bo_wait(robj, &cur_placement, true);
0bc490a8 442 args->domain = radeon_mem_type_to_domain(cur_placement);
bc9025bd 443 drm_gem_object_unreference_unlocked(gobj);
1ef5325b 444 r = radeon_gem_handle_lockup(rdev, r);
e3b2415e 445 return r;
771fe6b9
JG
446}
447
448int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
449 struct drm_file *filp)
450{
1ef5325b 451 struct radeon_device *rdev = dev->dev_private;
771fe6b9
JG
452 struct drm_radeon_gem_wait_idle *args = data;
453 struct drm_gem_object *gobj;
4c788679 454 struct radeon_bo *robj;
39e7f6f8 455 int r = 0;
404a6a51 456 uint32_t cur_placement = 0;
39e7f6f8 457 long ret;
771fe6b9
JG
458
459 gobj = drm_gem_object_lookup(dev, filp, args->handle);
460 if (gobj == NULL) {
bf79cb91 461 return -ENOENT;
771fe6b9 462 }
7e4d15d9 463 robj = gem_to_radeon_bo(gobj);
39e7f6f8
ML
464
465 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
466 if (ret == 0)
467 r = -EBUSY;
468 else if (ret < 0)
469 r = ret;
470
124764f1 471 /* Flush HDP cache via MMIO if necessary */
404a6a51
MD
472 if (rdev->asic->mmio_hdp_flush &&
473 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
124764f1 474 robj->rdev->asic->mmio_hdp_flush(rdev);
bc9025bd 475 drm_gem_object_unreference_unlocked(gobj);
1ef5325b 476 r = radeon_gem_handle_lockup(rdev, r);
771fe6b9
JG
477 return r;
478}
e024e110
DA
479
480int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
481 struct drm_file *filp)
482{
483 struct drm_radeon_gem_set_tiling *args = data;
484 struct drm_gem_object *gobj;
4c788679 485 struct radeon_bo *robj;
e024e110
DA
486 int r = 0;
487
488 DRM_DEBUG("%d \n", args->handle);
489 gobj = drm_gem_object_lookup(dev, filp, args->handle);
490 if (gobj == NULL)
bf79cb91 491 return -ENOENT;
7e4d15d9 492 robj = gem_to_radeon_bo(gobj);
4c788679 493 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
bc9025bd 494 drm_gem_object_unreference_unlocked(gobj);
e024e110
DA
495 return r;
496}
497
498int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
499 struct drm_file *filp)
500{
501 struct drm_radeon_gem_get_tiling *args = data;
502 struct drm_gem_object *gobj;
4c788679 503 struct radeon_bo *rbo;
e024e110
DA
504 int r = 0;
505
506 DRM_DEBUG("\n");
507 gobj = drm_gem_object_lookup(dev, filp, args->handle);
508 if (gobj == NULL)
bf79cb91 509 return -ENOENT;
7e4d15d9 510 rbo = gem_to_radeon_bo(gobj);
4c788679
JG
511 r = radeon_bo_reserve(rbo, false);
512 if (unlikely(r != 0))
51f07b7e 513 goto out;
4c788679
JG
514 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
515 radeon_bo_unreserve(rbo);
51f07b7e 516out:
bc9025bd 517 drm_gem_object_unreference_unlocked(gobj);
721604a1
JG
518 return r;
519}
520
2f2624c2
CK
521/**
522 * radeon_gem_va_update_vm -update the bo_va in its VM
523 *
524 * @rdev: radeon_device pointer
525 * @bo_va: bo_va to update
526 *
527 * Update the bo_va directly after setting it's address. Errors are not
528 * vital here, so they are not reported back to userspace.
529 */
530static void radeon_gem_va_update_vm(struct radeon_device *rdev,
531 struct radeon_bo_va *bo_va)
532{
533 struct ttm_validate_buffer tv, *entry;
1d0c0942 534 struct radeon_bo_list *vm_bos;
2f2624c2
CK
535 struct ww_acquire_ctx ticket;
536 struct list_head list;
537 unsigned domain;
538 int r;
539
540 INIT_LIST_HEAD(&list);
541
542 tv.bo = &bo_va->bo->tbo;
543 tv.shared = true;
544 list_add(&tv.head, &list);
545
546 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
547 if (!vm_bos)
548 return;
549
aa35071c 550 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
2f2624c2
CK
551 if (r)
552 goto error_free;
553
554 list_for_each_entry(entry, &list, head) {
555 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
556 /* if anything is swapped out don't swap it in here,
557 just abort and wait for the next CS */
558 if (domain == RADEON_GEM_DOMAIN_CPU)
559 goto error_unreserve;
560 }
561
562 mutex_lock(&bo_va->vm->mutex);
563 r = radeon_vm_clear_freed(rdev, bo_va->vm);
564 if (r)
565 goto error_unlock;
566
567 if (bo_va->it.start)
568 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
569
570error_unlock:
571 mutex_unlock(&bo_va->vm->mutex);
572
573error_unreserve:
574 ttm_eu_backoff_reservation(&ticket, &list);
575
576error_free:
577 drm_free_large(vm_bos);
578
ad1a6222 579 if (r && r != -ERESTARTSYS)
2f2624c2
CK
580 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
581}
582
721604a1
JG
583int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
584 struct drm_file *filp)
585{
586 struct drm_radeon_gem_va *args = data;
587 struct drm_gem_object *gobj;
588 struct radeon_device *rdev = dev->dev_private;
589 struct radeon_fpriv *fpriv = filp->driver_priv;
590 struct radeon_bo *rbo;
591 struct radeon_bo_va *bo_va;
592 u32 invalid_flags;
593 int r = 0;
594
67e915e4
AD
595 if (!rdev->vm_manager.enabled) {
596 args->operation = RADEON_VA_RESULT_ERROR;
597 return -ENOTTY;
598 }
599
721604a1
JG
600 /* !! DONT REMOVE !!
601 * We don't support vm_id yet, to be sure we don't have have broken
602 * userspace, reject anyone trying to use non 0 value thus moving
603 * forward we can use those fields without breaking existant userspace
604 */
605 if (args->vm_id) {
606 args->operation = RADEON_VA_RESULT_ERROR;
607 return -EINVAL;
608 }
609
610 if (args->offset < RADEON_VA_RESERVED_SIZE) {
611 dev_err(&dev->pdev->dev,
612 "offset 0x%lX is in reserved area 0x%X\n",
613 (unsigned long)args->offset,
614 RADEON_VA_RESERVED_SIZE);
615 args->operation = RADEON_VA_RESULT_ERROR;
616 return -EINVAL;
617 }
618
619 /* don't remove, we need to enforce userspace to set the snooped flag
620 * otherwise we will endup with broken userspace and we won't be able
621 * to enable this feature without adding new interface
622 */
623 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
624 if ((args->flags & invalid_flags)) {
625 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
626 args->flags, invalid_flags);
627 args->operation = RADEON_VA_RESULT_ERROR;
628 return -EINVAL;
629 }
721604a1
JG
630
631 switch (args->operation) {
632 case RADEON_VA_MAP:
633 case RADEON_VA_UNMAP:
634 break;
635 default:
636 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
637 args->operation);
638 args->operation = RADEON_VA_RESULT_ERROR;
639 return -EINVAL;
640 }
641
642 gobj = drm_gem_object_lookup(dev, filp, args->handle);
643 if (gobj == NULL) {
644 args->operation = RADEON_VA_RESULT_ERROR;
645 return -ENOENT;
646 }
647 rbo = gem_to_radeon_bo(gobj);
648 r = radeon_bo_reserve(rbo, false);
649 if (r) {
650 args->operation = RADEON_VA_RESULT_ERROR;
651 drm_gem_object_unreference_unlocked(gobj);
652 return r;
653 }
e971bd5e
CK
654 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
655 if (!bo_va) {
656 args->operation = RADEON_VA_RESULT_ERROR;
657 drm_gem_object_unreference_unlocked(gobj);
658 return -ENOENT;
659 }
660
721604a1
JG
661 switch (args->operation) {
662 case RADEON_VA_MAP:
0aea5e4a 663 if (bo_va->it.start) {
721604a1 664 args->operation = RADEON_VA_RESULT_VA_EXIST;
0aea5e4a 665 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
85761f60 666 radeon_bo_unreserve(rbo);
721604a1
JG
667 goto out;
668 }
e971bd5e 669 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
721604a1
JG
670 break;
671 case RADEON_VA_UNMAP:
e971bd5e 672 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
721604a1
JG
673 break;
674 default:
675 break;
676 }
2f2624c2
CK
677 if (!r)
678 radeon_gem_va_update_vm(rdev, bo_va);
721604a1
JG
679 args->operation = RADEON_VA_RESULT_OK;
680 if (r) {
681 args->operation = RADEON_VA_RESULT_ERROR;
682 }
683out:
721604a1 684 drm_gem_object_unreference_unlocked(gobj);
e024e110 685 return r;
bda72d58
MO
686}
687
688int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
689 struct drm_file *filp)
690{
691 struct drm_radeon_gem_op *args = data;
692 struct drm_gem_object *gobj;
693 struct radeon_bo *robj;
694 int r;
695
696 gobj = drm_gem_object_lookup(dev, filp, args->handle);
697 if (gobj == NULL) {
698 return -ENOENT;
699 }
700 robj = gem_to_radeon_bo(gobj);
f72a113a
CK
701
702 r = -EPERM;
703 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
704 goto out;
705
bda72d58
MO
706 r = radeon_bo_reserve(robj, false);
707 if (unlikely(r))
708 goto out;
709
710 switch (args->op) {
711 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
712 args->value = robj->initial_domain;
713 break;
714 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
715 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
716 RADEON_GEM_DOMAIN_GTT |
717 RADEON_GEM_DOMAIN_CPU);
718 break;
719 default:
720 r = -EINVAL;
721 }
722
723 radeon_bo_unreserve(robj);
724out:
725 drm_gem_object_unreference_unlocked(gobj);
726 return r;
e024e110 727}
ff72145b
DA
728
729int radeon_mode_dumb_create(struct drm_file *file_priv,
730 struct drm_device *dev,
731 struct drm_mode_create_dumb *args)
732{
733 struct radeon_device *rdev = dev->dev_private;
734 struct drm_gem_object *gobj;
c87a8d8d 735 uint32_t handle;
ff72145b
DA
736 int r;
737
738 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
739 args->size = args->pitch * args->height;
740 args->size = ALIGN(args->size, PAGE_SIZE);
741
742 r = radeon_gem_object_create(rdev, args->size, 0,
02376d82 743 RADEON_GEM_DOMAIN_VRAM, 0,
ed5cb43f 744 false, &gobj);
ff72145b
DA
745 if (r)
746 return -ENOMEM;
747
c87a8d8d
DA
748 r = drm_gem_handle_create(file_priv, gobj, &handle);
749 /* drop reference from allocate - handle holds it now */
750 drm_gem_object_unreference_unlocked(gobj);
ff72145b 751 if (r) {
ff72145b
DA
752 return r;
753 }
c87a8d8d 754 args->handle = handle;
ff72145b
DA
755 return 0;
756}
757
409851f4
JG
758#if defined(CONFIG_DEBUG_FS)
759static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
760{
761 struct drm_info_node *node = (struct drm_info_node *)m->private;
762 struct drm_device *dev = node->minor->dev;
763 struct radeon_device *rdev = dev->dev_private;
764 struct radeon_bo *rbo;
765 unsigned i = 0;
766
767 mutex_lock(&rdev->gem.mutex);
768 list_for_each_entry(rbo, &rdev->gem.objects, list) {
769 unsigned domain;
770 const char *placement;
771
772 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
773 switch (domain) {
774 case RADEON_GEM_DOMAIN_VRAM:
775 placement = "VRAM";
776 break;
777 case RADEON_GEM_DOMAIN_GTT:
778 placement = " GTT";
779 break;
780 case RADEON_GEM_DOMAIN_CPU:
781 default:
782 placement = " CPU";
783 break;
784 }
785 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
786 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
787 placement, (unsigned long)rbo->pid);
788 i++;
789 }
790 mutex_unlock(&rdev->gem.mutex);
791 return 0;
792}
793
794static struct drm_info_list radeon_debugfs_gem_list[] = {
795 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
796};
797#endif
798
799int radeon_gem_debugfs_init(struct radeon_device *rdev)
800{
801#if defined(CONFIG_DEBUG_FS)
802 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
803#endif
804 return 0;
805}