Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
f9183127 SR |
28 | |
29 | #include <drm/drm_debugfs.h> | |
30 | #include <drm/drm_device.h> | |
31 | #include <drm/drm_file.h> | |
32 | #include <drm/drm_pci.h> | |
760285e7 | 33 | #include <drm/radeon_drm.h> |
f9183127 | 34 | |
771fe6b9 JG |
35 | #include "radeon.h" |
36 | ||
771fe6b9 JG |
37 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
38 | { | |
7e4d15d9 | 39 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
771fe6b9 | 40 | |
771fe6b9 | 41 | if (robj) { |
12f1384d | 42 | radeon_mn_unregister(robj); |
4c788679 | 43 | radeon_bo_unref(&robj); |
771fe6b9 JG |
44 | } |
45 | } | |
46 | ||
391bfec3 | 47 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
4c788679 | 48 | int alignment, int initial_domain, |
ed5cb43f | 49 | u32 flags, bool kernel, |
4c788679 | 50 | struct drm_gem_object **obj) |
771fe6b9 | 51 | { |
4c788679 | 52 | struct radeon_bo *robj; |
6c0d112f | 53 | unsigned long max_size; |
771fe6b9 JG |
54 | int r; |
55 | ||
56 | *obj = NULL; | |
771fe6b9 JG |
57 | /* At least align on page size */ |
58 | if (alignment < PAGE_SIZE) { | |
59 | alignment = PAGE_SIZE; | |
60 | } | |
6c0d112f | 61 | |
391bfec3 AD |
62 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
63 | * handle vram to system pool migrations. | |
64 | */ | |
65 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; | |
6c0d112f | 66 | if (size > max_size) { |
391bfec3 | 67 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
380670ae | 68 | size >> 20, max_size >> 20); |
6c0d112f CK |
69 | return -ENOMEM; |
70 | } | |
71 | ||
0fe7158c | 72 | retry: |
02376d82 | 73 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
831b6966 | 74 | flags, NULL, NULL, &robj); |
771fe6b9 | 75 | if (r) { |
0fe7158c CK |
76 | if (r != -ERESTARTSYS) { |
77 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { | |
78 | initial_domain |= RADEON_GEM_DOMAIN_GTT; | |
79 | goto retry; | |
80 | } | |
391bfec3 | 81 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
ecabd32a | 82 | size, initial_domain, alignment, r); |
0fe7158c | 83 | } |
771fe6b9 JG |
84 | return r; |
85 | } | |
ce77038f | 86 | *obj = &robj->tbo.base; |
409851f4 | 87 | robj->pid = task_pid_nr(current); |
441921d5 DV |
88 | |
89 | mutex_lock(&rdev->gem.mutex); | |
90 | list_add_tail(&robj->list, &rdev->gem.objects); | |
91 | mutex_unlock(&rdev->gem.mutex); | |
92 | ||
771fe6b9 JG |
93 | return 0; |
94 | } | |
95 | ||
248a6c4a | 96 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
771fe6b9 JG |
97 | uint32_t rdomain, uint32_t wdomain) |
98 | { | |
4c788679 | 99 | struct radeon_bo *robj; |
771fe6b9 | 100 | uint32_t domain; |
39e7f6f8 | 101 | long r; |
771fe6b9 JG |
102 | |
103 | /* FIXME: reeimplement */ | |
7e4d15d9 | 104 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
105 | /* work out where to validate the buffer to */ |
106 | domain = wdomain; | |
107 | if (!domain) { | |
108 | domain = rdomain; | |
109 | } | |
110 | if (!domain) { | |
111 | /* Do nothings */ | |
7ca85295 | 112 | pr_warn("Set domain without domain !\n"); |
771fe6b9 JG |
113 | return 0; |
114 | } | |
115 | if (domain == RADEON_GEM_DOMAIN_CPU) { | |
116 | /* Asking for cpu access wait for object idle */ | |
52791eee | 117 | r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); |
39e7f6f8 ML |
118 | if (!r) |
119 | r = -EBUSY; | |
120 | ||
121 | if (r < 0 && r != -EINTR) { | |
7ca85295 | 122 | pr_err("Failed to wait for object: %li\n", r); |
771fe6b9 JG |
123 | return r; |
124 | } | |
125 | } | |
ede2e019 CJHR |
126 | if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { |
127 | /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ | |
128 | return -EINVAL; | |
129 | } | |
771fe6b9 JG |
130 | return 0; |
131 | } | |
132 | ||
133 | int radeon_gem_init(struct radeon_device *rdev) | |
134 | { | |
135 | INIT_LIST_HEAD(&rdev->gem.objects); | |
136 | return 0; | |
137 | } | |
138 | ||
139 | void radeon_gem_fini(struct radeon_device *rdev) | |
140 | { | |
4c788679 | 141 | radeon_bo_force_delete(rdev); |
771fe6b9 JG |
142 | } |
143 | ||
721604a1 JG |
144 | /* |
145 | * Call from drm_gem_handle_create which appear in both new and open ioctl | |
146 | * case. | |
147 | */ | |
148 | int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | |
149 | { | |
e971bd5e CK |
150 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); |
151 | struct radeon_device *rdev = rbo->rdev; | |
152 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
153 | struct radeon_vm *vm = &fpriv->vm; | |
154 | struct radeon_bo_va *bo_va; | |
155 | int r; | |
156 | ||
544143f9 AD |
157 | if ((rdev->family < CHIP_CAYMAN) || |
158 | (!rdev->accel_working)) { | |
e971bd5e CK |
159 | return 0; |
160 | } | |
161 | ||
162 | r = radeon_bo_reserve(rbo, false); | |
163 | if (r) { | |
164 | return r; | |
165 | } | |
166 | ||
167 | bo_va = radeon_vm_bo_find(vm, rbo); | |
168 | if (!bo_va) { | |
169 | bo_va = radeon_vm_bo_add(rdev, vm, rbo); | |
170 | } else { | |
171 | ++bo_va->ref_count; | |
172 | } | |
173 | radeon_bo_unreserve(rbo); | |
174 | ||
721604a1 JG |
175 | return 0; |
176 | } | |
177 | ||
178 | void radeon_gem_object_close(struct drm_gem_object *obj, | |
179 | struct drm_file *file_priv) | |
180 | { | |
181 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); | |
182 | struct radeon_device *rdev = rbo->rdev; | |
183 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
184 | struct radeon_vm *vm = &fpriv->vm; | |
e971bd5e | 185 | struct radeon_bo_va *bo_va; |
d59f7021 | 186 | int r; |
721604a1 | 187 | |
544143f9 AD |
188 | if ((rdev->family < CHIP_CAYMAN) || |
189 | (!rdev->accel_working)) { | |
721604a1 JG |
190 | return; |
191 | } | |
192 | ||
d59f7021 CK |
193 | r = radeon_bo_reserve(rbo, true); |
194 | if (r) { | |
195 | dev_err(rdev->dev, "leaking bo va because " | |
196 | "we fail to reserve bo (%d)\n", r); | |
721604a1 JG |
197 | return; |
198 | } | |
e971bd5e CK |
199 | bo_va = radeon_vm_bo_find(vm, rbo); |
200 | if (bo_va) { | |
201 | if (--bo_va->ref_count == 0) { | |
202 | radeon_vm_bo_rmv(rdev, bo_va); | |
203 | } | |
204 | } | |
721604a1 JG |
205 | radeon_bo_unreserve(rbo); |
206 | } | |
207 | ||
6c6f4783 CK |
208 | static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) |
209 | { | |
210 | if (r == -EDEADLK) { | |
6c6f4783 CK |
211 | r = radeon_gpu_reset(rdev); |
212 | if (!r) | |
213 | r = -EAGAIN; | |
6c6f4783 CK |
214 | } |
215 | return r; | |
216 | } | |
771fe6b9 JG |
217 | |
218 | /* | |
219 | * GEM ioctls. | |
220 | */ | |
221 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |
222 | struct drm_file *filp) | |
223 | { | |
224 | struct radeon_device *rdev = dev->dev_private; | |
225 | struct drm_radeon_gem_info *args = data; | |
53595338 DA |
226 | struct ttm_mem_type_manager *man; |
227 | ||
228 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
771fe6b9 | 229 | |
51964e9e MD |
230 | args->vram_size = (u64)man->size << PAGE_SHIFT; |
231 | args->vram_visible = rdev->mc.visible_vram_size; | |
ccbe0060 AD |
232 | args->vram_visible -= rdev->vram_pin_size; |
233 | args->gart_size = rdev->mc.gtt_size; | |
234 | args->gart_size -= rdev->gart_pin_size; | |
235 | ||
771fe6b9 JG |
236 | return 0; |
237 | } | |
238 | ||
239 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, | |
240 | struct drm_file *filp) | |
241 | { | |
242 | /* TODO: implement */ | |
243 | DRM_ERROR("unimplemented %s\n", __func__); | |
244 | return -ENOSYS; | |
245 | } | |
246 | ||
247 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
248 | struct drm_file *filp) | |
249 | { | |
250 | /* TODO: implement */ | |
251 | DRM_ERROR("unimplemented %s\n", __func__); | |
252 | return -ENOSYS; | |
253 | } | |
254 | ||
255 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |
256 | struct drm_file *filp) | |
257 | { | |
258 | struct radeon_device *rdev = dev->dev_private; | |
259 | struct drm_radeon_gem_create *args = data; | |
260 | struct drm_gem_object *gobj; | |
261 | uint32_t handle; | |
262 | int r; | |
263 | ||
dee53e7f | 264 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
265 | /* create a gem object to contain this object in */ |
266 | args->size = roundup(args->size, PAGE_SIZE); | |
267 | r = radeon_gem_object_create(rdev, args->size, args->alignment, | |
02376d82 | 268 | args->initial_domain, args->flags, |
ed5cb43f | 269 | false, &gobj); |
771fe6b9 | 270 | if (r) { |
dee53e7f | 271 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 272 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
273 | return r; |
274 | } | |
275 | r = drm_gem_handle_create(filp, gobj, &handle); | |
29d08b3e | 276 | /* drop reference from allocate - handle holds it now */ |
07f65bb2 | 277 | drm_gem_object_put_unlocked(gobj); |
771fe6b9 | 278 | if (r) { |
dee53e7f | 279 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 280 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
281 | return r; |
282 | } | |
771fe6b9 | 283 | args->handle = handle; |
dee53e7f | 284 | up_read(&rdev->exclusive_lock); |
771fe6b9 JG |
285 | return 0; |
286 | } | |
287 | ||
f72a113a CK |
288 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, |
289 | struct drm_file *filp) | |
290 | { | |
19be5570 | 291 | struct ttm_operation_ctx ctx = { true, false }; |
f72a113a CK |
292 | struct radeon_device *rdev = dev->dev_private; |
293 | struct drm_radeon_gem_userptr *args = data; | |
294 | struct drm_gem_object *gobj; | |
295 | struct radeon_bo *bo; | |
296 | uint32_t handle; | |
297 | int r; | |
298 | ||
299 | if (offset_in_page(args->addr | args->size)) | |
300 | return -EINVAL; | |
301 | ||
f72a113a | 302 | /* reject unknown flag values */ |
ddd00e33 | 303 | if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | |
341cb9e4 CK |
304 | RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | |
305 | RADEON_GEM_USERPTR_REGISTER)) | |
f72a113a CK |
306 | return -EINVAL; |
307 | ||
bd645e43 CK |
308 | if (args->flags & RADEON_GEM_USERPTR_READONLY) { |
309 | /* readonly pages not tested on older hardware */ | |
310 | if (rdev->family < CHIP_R600) | |
311 | return -EINVAL; | |
312 | ||
313 | } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || | |
314 | !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { | |
315 | ||
316 | /* if we want to write to it we must require anonymous | |
317 | memory and install a MMU notifier */ | |
318 | return -EACCES; | |
319 | } | |
f72a113a CK |
320 | |
321 | down_read(&rdev->exclusive_lock); | |
322 | ||
323 | /* create a gem object to contain this object in */ | |
324 | r = radeon_gem_object_create(rdev, args->size, 0, | |
325 | RADEON_GEM_DOMAIN_CPU, 0, | |
326 | false, &gobj); | |
327 | if (r) | |
328 | goto handle_lockup; | |
329 | ||
330 | bo = gem_to_radeon_bo(gobj); | |
331 | r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | |
332 | if (r) | |
333 | goto release_object; | |
334 | ||
341cb9e4 CK |
335 | if (args->flags & RADEON_GEM_USERPTR_REGISTER) { |
336 | r = radeon_mn_register(bo, args->addr); | |
337 | if (r) | |
338 | goto release_object; | |
339 | } | |
340 | ||
2a84a447 CK |
341 | if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { |
342 | down_read(¤t->mm->mmap_sem); | |
343 | r = radeon_bo_reserve(bo, true); | |
344 | if (r) { | |
345 | up_read(¤t->mm->mmap_sem); | |
346 | goto release_object; | |
347 | } | |
348 | ||
349 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); | |
19be5570 | 350 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
2a84a447 CK |
351 | radeon_bo_unreserve(bo); |
352 | up_read(¤t->mm->mmap_sem); | |
353 | if (r) | |
354 | goto release_object; | |
355 | } | |
356 | ||
f72a113a CK |
357 | r = drm_gem_handle_create(filp, gobj, &handle); |
358 | /* drop reference from allocate - handle holds it now */ | |
07f65bb2 | 359 | drm_gem_object_put_unlocked(gobj); |
f72a113a CK |
360 | if (r) |
361 | goto handle_lockup; | |
362 | ||
363 | args->handle = handle; | |
364 | up_read(&rdev->exclusive_lock); | |
365 | return 0; | |
366 | ||
367 | release_object: | |
07f65bb2 | 368 | drm_gem_object_put_unlocked(gobj); |
f72a113a CK |
369 | |
370 | handle_lockup: | |
371 | up_read(&rdev->exclusive_lock); | |
372 | r = radeon_gem_handle_lockup(rdev, r); | |
373 | ||
374 | return r; | |
375 | } | |
376 | ||
771fe6b9 JG |
377 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
378 | struct drm_file *filp) | |
379 | { | |
380 | /* transition the BO to a domain - | |
381 | * just validate the BO into a certain domain */ | |
dee53e7f | 382 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
383 | struct drm_radeon_gem_set_domain *args = data; |
384 | struct drm_gem_object *gobj; | |
4c788679 | 385 | struct radeon_bo *robj; |
771fe6b9 JG |
386 | int r; |
387 | ||
388 | /* for now if someone requests domain CPU - | |
389 | * just make sure the buffer is finished with */ | |
dee53e7f | 390 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
391 | |
392 | /* just do a BO wait for now */ | |
a8ad0bd8 | 393 | gobj = drm_gem_object_lookup(filp, args->handle); |
771fe6b9 | 394 | if (gobj == NULL) { |
dee53e7f | 395 | up_read(&rdev->exclusive_lock); |
bf79cb91 | 396 | return -ENOENT; |
771fe6b9 | 397 | } |
7e4d15d9 | 398 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
399 | |
400 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | |
401 | ||
07f65bb2 | 402 | drm_gem_object_put_unlocked(gobj); |
dee53e7f | 403 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 404 | r = radeon_gem_handle_lockup(robj->rdev, r); |
771fe6b9 JG |
405 | return r; |
406 | } | |
407 | ||
da6b51d0 DA |
408 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
409 | struct drm_device *dev, | |
410 | uint32_t handle, uint64_t *offset_p) | |
771fe6b9 | 411 | { |
771fe6b9 | 412 | struct drm_gem_object *gobj; |
4c788679 | 413 | struct radeon_bo *robj; |
771fe6b9 | 414 | |
a8ad0bd8 | 415 | gobj = drm_gem_object_lookup(filp, handle); |
771fe6b9 | 416 | if (gobj == NULL) { |
bf79cb91 | 417 | return -ENOENT; |
771fe6b9 | 418 | } |
7e4d15d9 | 419 | robj = gem_to_radeon_bo(gobj); |
f72a113a | 420 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { |
07f65bb2 | 421 | drm_gem_object_put_unlocked(gobj); |
f72a113a CK |
422 | return -EPERM; |
423 | } | |
ff72145b | 424 | *offset_p = radeon_bo_mmap_offset(robj); |
07f65bb2 | 425 | drm_gem_object_put_unlocked(gobj); |
4c788679 | 426 | return 0; |
771fe6b9 JG |
427 | } |
428 | ||
ff72145b DA |
429 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
430 | struct drm_file *filp) | |
431 | { | |
432 | struct drm_radeon_gem_mmap *args = data; | |
433 | ||
da6b51d0 | 434 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
ff72145b DA |
435 | } |
436 | ||
771fe6b9 JG |
437 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
438 | struct drm_file *filp) | |
439 | { | |
cefb87ef DA |
440 | struct drm_radeon_gem_busy *args = data; |
441 | struct drm_gem_object *gobj; | |
4c788679 | 442 | struct radeon_bo *robj; |
cefb87ef | 443 | int r; |
4361e52a | 444 | uint32_t cur_placement = 0; |
cefb87ef | 445 | |
a8ad0bd8 | 446 | gobj = drm_gem_object_lookup(filp, args->handle); |
cefb87ef | 447 | if (gobj == NULL) { |
bf79cb91 | 448 | return -ENOENT; |
cefb87ef | 449 | } |
7e4d15d9 | 450 | robj = gem_to_radeon_bo(gobj); |
828202a3 | 451 | |
52791eee | 452 | r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); |
828202a3 GG |
453 | if (r == 0) |
454 | r = -EBUSY; | |
455 | else | |
456 | r = 0; | |
457 | ||
6aa7de05 | 458 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
0bc490a8 | 459 | args->domain = radeon_mem_type_to_domain(cur_placement); |
07f65bb2 | 460 | drm_gem_object_put_unlocked(gobj); |
e3b2415e | 461 | return r; |
771fe6b9 JG |
462 | } |
463 | ||
464 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |
465 | struct drm_file *filp) | |
466 | { | |
1ef5325b | 467 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
468 | struct drm_radeon_gem_wait_idle *args = data; |
469 | struct drm_gem_object *gobj; | |
4c788679 | 470 | struct radeon_bo *robj; |
39e7f6f8 | 471 | int r = 0; |
404a6a51 | 472 | uint32_t cur_placement = 0; |
39e7f6f8 | 473 | long ret; |
771fe6b9 | 474 | |
a8ad0bd8 | 475 | gobj = drm_gem_object_lookup(filp, args->handle); |
771fe6b9 | 476 | if (gobj == NULL) { |
bf79cb91 | 477 | return -ENOENT; |
771fe6b9 | 478 | } |
7e4d15d9 | 479 | robj = gem_to_radeon_bo(gobj); |
39e7f6f8 | 480 | |
52791eee | 481 | ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); |
39e7f6f8 ML |
482 | if (ret == 0) |
483 | r = -EBUSY; | |
484 | else if (ret < 0) | |
485 | r = ret; | |
486 | ||
124764f1 | 487 | /* Flush HDP cache via MMIO if necessary */ |
6aa7de05 | 488 | cur_placement = READ_ONCE(robj->tbo.mem.mem_type); |
404a6a51 MD |
489 | if (rdev->asic->mmio_hdp_flush && |
490 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | |
124764f1 | 491 | robj->rdev->asic->mmio_hdp_flush(rdev); |
07f65bb2 | 492 | drm_gem_object_put_unlocked(gobj); |
1ef5325b | 493 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
494 | return r; |
495 | } | |
e024e110 DA |
496 | |
497 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |
498 | struct drm_file *filp) | |
499 | { | |
500 | struct drm_radeon_gem_set_tiling *args = data; | |
501 | struct drm_gem_object *gobj; | |
4c788679 | 502 | struct radeon_bo *robj; |
e024e110 DA |
503 | int r = 0; |
504 | ||
505 | DRM_DEBUG("%d \n", args->handle); | |
a8ad0bd8 | 506 | gobj = drm_gem_object_lookup(filp, args->handle); |
e024e110 | 507 | if (gobj == NULL) |
bf79cb91 | 508 | return -ENOENT; |
7e4d15d9 | 509 | robj = gem_to_radeon_bo(gobj); |
4c788679 | 510 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
07f65bb2 | 511 | drm_gem_object_put_unlocked(gobj); |
e024e110 DA |
512 | return r; |
513 | } | |
514 | ||
515 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |
516 | struct drm_file *filp) | |
517 | { | |
518 | struct drm_radeon_gem_get_tiling *args = data; | |
519 | struct drm_gem_object *gobj; | |
4c788679 | 520 | struct radeon_bo *rbo; |
e024e110 DA |
521 | int r = 0; |
522 | ||
523 | DRM_DEBUG("\n"); | |
a8ad0bd8 | 524 | gobj = drm_gem_object_lookup(filp, args->handle); |
e024e110 | 525 | if (gobj == NULL) |
bf79cb91 | 526 | return -ENOENT; |
7e4d15d9 | 527 | rbo = gem_to_radeon_bo(gobj); |
4c788679 JG |
528 | r = radeon_bo_reserve(rbo, false); |
529 | if (unlikely(r != 0)) | |
51f07b7e | 530 | goto out; |
4c788679 JG |
531 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
532 | radeon_bo_unreserve(rbo); | |
51f07b7e | 533 | out: |
07f65bb2 | 534 | drm_gem_object_put_unlocked(gobj); |
721604a1 JG |
535 | return r; |
536 | } | |
537 | ||
2f2624c2 CK |
538 | /** |
539 | * radeon_gem_va_update_vm -update the bo_va in its VM | |
540 | * | |
541 | * @rdev: radeon_device pointer | |
542 | * @bo_va: bo_va to update | |
543 | * | |
544 | * Update the bo_va directly after setting it's address. Errors are not | |
545 | * vital here, so they are not reported back to userspace. | |
546 | */ | |
547 | static void radeon_gem_va_update_vm(struct radeon_device *rdev, | |
548 | struct radeon_bo_va *bo_va) | |
549 | { | |
550 | struct ttm_validate_buffer tv, *entry; | |
1d0c0942 | 551 | struct radeon_bo_list *vm_bos; |
2f2624c2 CK |
552 | struct ww_acquire_ctx ticket; |
553 | struct list_head list; | |
554 | unsigned domain; | |
555 | int r; | |
556 | ||
557 | INIT_LIST_HEAD(&list); | |
558 | ||
559 | tv.bo = &bo_va->bo->tbo; | |
a9f34c70 | 560 | tv.num_shared = 1; |
2f2624c2 CK |
561 | list_add(&tv.head, &list); |
562 | ||
563 | vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); | |
564 | if (!vm_bos) | |
565 | return; | |
566 | ||
6e58ab7a | 567 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true); |
2f2624c2 CK |
568 | if (r) |
569 | goto error_free; | |
570 | ||
571 | list_for_each_entry(entry, &list, head) { | |
572 | domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); | |
573 | /* if anything is swapped out don't swap it in here, | |
574 | just abort and wait for the next CS */ | |
575 | if (domain == RADEON_GEM_DOMAIN_CPU) | |
576 | goto error_unreserve; | |
577 | } | |
578 | ||
579 | mutex_lock(&bo_va->vm->mutex); | |
580 | r = radeon_vm_clear_freed(rdev, bo_va->vm); | |
581 | if (r) | |
582 | goto error_unlock; | |
583 | ||
584 | if (bo_va->it.start) | |
585 | r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); | |
586 | ||
587 | error_unlock: | |
588 | mutex_unlock(&bo_va->vm->mutex); | |
589 | ||
590 | error_unreserve: | |
591 | ttm_eu_backoff_reservation(&ticket, &list); | |
592 | ||
593 | error_free: | |
2098105e | 594 | kvfree(vm_bos); |
2f2624c2 | 595 | |
ad1a6222 | 596 | if (r && r != -ERESTARTSYS) |
2f2624c2 CK |
597 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
598 | } | |
599 | ||
721604a1 JG |
600 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, |
601 | struct drm_file *filp) | |
602 | { | |
603 | struct drm_radeon_gem_va *args = data; | |
604 | struct drm_gem_object *gobj; | |
605 | struct radeon_device *rdev = dev->dev_private; | |
606 | struct radeon_fpriv *fpriv = filp->driver_priv; | |
607 | struct radeon_bo *rbo; | |
608 | struct radeon_bo_va *bo_va; | |
609 | u32 invalid_flags; | |
610 | int r = 0; | |
611 | ||
67e915e4 AD |
612 | if (!rdev->vm_manager.enabled) { |
613 | args->operation = RADEON_VA_RESULT_ERROR; | |
614 | return -ENOTTY; | |
615 | } | |
616 | ||
721604a1 JG |
617 | /* !! DONT REMOVE !! |
618 | * We don't support vm_id yet, to be sure we don't have have broken | |
619 | * userspace, reject anyone trying to use non 0 value thus moving | |
620 | * forward we can use those fields without breaking existant userspace | |
621 | */ | |
622 | if (args->vm_id) { | |
623 | args->operation = RADEON_VA_RESULT_ERROR; | |
624 | return -EINVAL; | |
625 | } | |
626 | ||
627 | if (args->offset < RADEON_VA_RESERVED_SIZE) { | |
628 | dev_err(&dev->pdev->dev, | |
629 | "offset 0x%lX is in reserved area 0x%X\n", | |
630 | (unsigned long)args->offset, | |
631 | RADEON_VA_RESERVED_SIZE); | |
632 | args->operation = RADEON_VA_RESULT_ERROR; | |
633 | return -EINVAL; | |
634 | } | |
635 | ||
636 | /* don't remove, we need to enforce userspace to set the snooped flag | |
637 | * otherwise we will endup with broken userspace and we won't be able | |
638 | * to enable this feature without adding new interface | |
639 | */ | |
640 | invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; | |
641 | if ((args->flags & invalid_flags)) { | |
642 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | |
643 | args->flags, invalid_flags); | |
644 | args->operation = RADEON_VA_RESULT_ERROR; | |
645 | return -EINVAL; | |
646 | } | |
721604a1 JG |
647 | |
648 | switch (args->operation) { | |
649 | case RADEON_VA_MAP: | |
650 | case RADEON_VA_UNMAP: | |
651 | break; | |
652 | default: | |
653 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", | |
654 | args->operation); | |
655 | args->operation = RADEON_VA_RESULT_ERROR; | |
656 | return -EINVAL; | |
657 | } | |
658 | ||
a8ad0bd8 | 659 | gobj = drm_gem_object_lookup(filp, args->handle); |
721604a1 JG |
660 | if (gobj == NULL) { |
661 | args->operation = RADEON_VA_RESULT_ERROR; | |
662 | return -ENOENT; | |
663 | } | |
664 | rbo = gem_to_radeon_bo(gobj); | |
665 | r = radeon_bo_reserve(rbo, false); | |
666 | if (r) { | |
667 | args->operation = RADEON_VA_RESULT_ERROR; | |
07f65bb2 | 668 | drm_gem_object_put_unlocked(gobj); |
721604a1 JG |
669 | return r; |
670 | } | |
e971bd5e CK |
671 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
672 | if (!bo_va) { | |
673 | args->operation = RADEON_VA_RESULT_ERROR; | |
186bac81 | 674 | radeon_bo_unreserve(rbo); |
07f65bb2 | 675 | drm_gem_object_put_unlocked(gobj); |
e971bd5e CK |
676 | return -ENOENT; |
677 | } | |
678 | ||
721604a1 JG |
679 | switch (args->operation) { |
680 | case RADEON_VA_MAP: | |
0aea5e4a | 681 | if (bo_va->it.start) { |
721604a1 | 682 | args->operation = RADEON_VA_RESULT_VA_EXIST; |
0aea5e4a | 683 | args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; |
85761f60 | 684 | radeon_bo_unreserve(rbo); |
721604a1 JG |
685 | goto out; |
686 | } | |
e971bd5e | 687 | r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); |
721604a1 JG |
688 | break; |
689 | case RADEON_VA_UNMAP: | |
e971bd5e | 690 | r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); |
721604a1 JG |
691 | break; |
692 | default: | |
693 | break; | |
694 | } | |
2f2624c2 CK |
695 | if (!r) |
696 | radeon_gem_va_update_vm(rdev, bo_va); | |
721604a1 JG |
697 | args->operation = RADEON_VA_RESULT_OK; |
698 | if (r) { | |
699 | args->operation = RADEON_VA_RESULT_ERROR; | |
700 | } | |
701 | out: | |
07f65bb2 | 702 | drm_gem_object_put_unlocked(gobj); |
e024e110 | 703 | return r; |
bda72d58 MO |
704 | } |
705 | ||
706 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | |
707 | struct drm_file *filp) | |
708 | { | |
709 | struct drm_radeon_gem_op *args = data; | |
710 | struct drm_gem_object *gobj; | |
711 | struct radeon_bo *robj; | |
712 | int r; | |
713 | ||
a8ad0bd8 | 714 | gobj = drm_gem_object_lookup(filp, args->handle); |
bda72d58 MO |
715 | if (gobj == NULL) { |
716 | return -ENOENT; | |
717 | } | |
718 | robj = gem_to_radeon_bo(gobj); | |
f72a113a CK |
719 | |
720 | r = -EPERM; | |
721 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) | |
722 | goto out; | |
723 | ||
bda72d58 MO |
724 | r = radeon_bo_reserve(robj, false); |
725 | if (unlikely(r)) | |
726 | goto out; | |
727 | ||
728 | switch (args->op) { | |
729 | case RADEON_GEM_OP_GET_INITIAL_DOMAIN: | |
730 | args->value = robj->initial_domain; | |
731 | break; | |
732 | case RADEON_GEM_OP_SET_INITIAL_DOMAIN: | |
733 | robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | | |
734 | RADEON_GEM_DOMAIN_GTT | | |
735 | RADEON_GEM_DOMAIN_CPU); | |
736 | break; | |
737 | default: | |
738 | r = -EINVAL; | |
739 | } | |
740 | ||
741 | radeon_bo_unreserve(robj); | |
742 | out: | |
07f65bb2 | 743 | drm_gem_object_put_unlocked(gobj); |
bda72d58 | 744 | return r; |
e024e110 | 745 | } |
ff72145b DA |
746 | |
747 | int radeon_mode_dumb_create(struct drm_file *file_priv, | |
748 | struct drm_device *dev, | |
749 | struct drm_mode_create_dumb *args) | |
750 | { | |
751 | struct radeon_device *rdev = dev->dev_private; | |
752 | struct drm_gem_object *gobj; | |
c87a8d8d | 753 | uint32_t handle; |
ff72145b DA |
754 | int r; |
755 | ||
802aaf76 LP |
756 | args->pitch = radeon_align_pitch(rdev, args->width, |
757 | DIV_ROUND_UP(args->bpp, 8), 0); | |
ff72145b DA |
758 | args->size = args->pitch * args->height; |
759 | args->size = ALIGN(args->size, PAGE_SIZE); | |
760 | ||
761 | r = radeon_gem_object_create(rdev, args->size, 0, | |
02376d82 | 762 | RADEON_GEM_DOMAIN_VRAM, 0, |
ed5cb43f | 763 | false, &gobj); |
ff72145b DA |
764 | if (r) |
765 | return -ENOMEM; | |
766 | ||
c87a8d8d DA |
767 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
768 | /* drop reference from allocate - handle holds it now */ | |
07f65bb2 | 769 | drm_gem_object_put_unlocked(gobj); |
ff72145b | 770 | if (r) { |
ff72145b DA |
771 | return r; |
772 | } | |
c87a8d8d | 773 | args->handle = handle; |
ff72145b DA |
774 | return 0; |
775 | } | |
776 | ||
409851f4 JG |
777 | #if defined(CONFIG_DEBUG_FS) |
778 | static int radeon_debugfs_gem_info(struct seq_file *m, void *data) | |
779 | { | |
780 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
781 | struct drm_device *dev = node->minor->dev; | |
782 | struct radeon_device *rdev = dev->dev_private; | |
783 | struct radeon_bo *rbo; | |
784 | unsigned i = 0; | |
785 | ||
786 | mutex_lock(&rdev->gem.mutex); | |
787 | list_for_each_entry(rbo, &rdev->gem.objects, list) { | |
788 | unsigned domain; | |
789 | const char *placement; | |
790 | ||
791 | domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); | |
792 | switch (domain) { | |
793 | case RADEON_GEM_DOMAIN_VRAM: | |
794 | placement = "VRAM"; | |
795 | break; | |
796 | case RADEON_GEM_DOMAIN_GTT: | |
797 | placement = " GTT"; | |
798 | break; | |
799 | case RADEON_GEM_DOMAIN_CPU: | |
800 | default: | |
801 | placement = " CPU"; | |
802 | break; | |
803 | } | |
804 | seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", | |
805 | i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, | |
806 | placement, (unsigned long)rbo->pid); | |
807 | i++; | |
808 | } | |
809 | mutex_unlock(&rdev->gem.mutex); | |
810 | return 0; | |
811 | } | |
812 | ||
813 | static struct drm_info_list radeon_debugfs_gem_list[] = { | |
814 | {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, | |
815 | }; | |
816 | #endif | |
817 | ||
818 | int radeon_gem_debugfs_init(struct radeon_device *rdev) | |
819 | { | |
820 | #if defined(CONFIG_DEBUG_FS) | |
821 | return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); | |
822 | #endif | |
823 | return 0; | |
824 | } |