Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_object.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
f9183127
SR
32
33#include <linux/io.h>
771fe6b9 34#include <linux/list.h>
5a0e3ad6 35#include <linux/slab.h>
f9183127 36
c5244987 37#include <drm/drm_cache.h>
f9183127
SR
38#include <drm/drm_prime.h>
39#include <drm/radeon_drm.h>
40
771fe6b9 41#include "radeon.h"
99ee7fac 42#include "radeon_trace.h"
afd90af8 43#include "radeon_ttm.h"
771fe6b9 44
4c788679 45static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
771fe6b9
JG
46
47/*
48 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
49 * function are calling it.
50 */
51
4c788679 52static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
771fe6b9 53{
4c788679 54 struct radeon_bo *bo;
771fe6b9 55
4c788679 56 bo = container_of(tbo, struct radeon_bo, tbo);
67e8e3f9 57
4c788679
JG
58 mutex_lock(&bo->rdev->gem.mutex);
59 list_del_init(&bo->list);
60 mutex_unlock(&bo->rdev->gem.mutex);
61 radeon_bo_clear_surface_reg(bo);
634b6a8a 62 WARN_ON_ONCE(!list_empty(&bo->va));
ce77038f
GH
63 if (bo->tbo.base.import_attach)
64 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
65 drm_gem_object_release(&bo->tbo.base);
4c788679 66 kfree(bo);
771fe6b9
JG
67}
68
d03d8589
JG
69bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
70{
71 if (bo->destroy == &radeon_ttm_bo_destroy)
72 return true;
73 return false;
74}
75
312ea8da
JG
76void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
77{
deadcb36 78 u32 c = 0, i;
312ea8da 79
312ea8da 80 rbo->placement.placement = rbo->placements;
c9da4a4b
MD
81 if (domain & RADEON_GEM_DOMAIN_VRAM) {
82 /* Try placing BOs which don't need CPU access outside of the
83 * CPU accessible part of VRAM
84 */
85 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
86 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
87 rbo->placements[c].fpfn =
88 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
48e07c23 89 rbo->placements[c].mem_type = TTM_PL_VRAM;
ce65b874 90 rbo->placements[c++].flags = 0;
c9da4a4b
MD
91 }
92
93 rbo->placements[c].fpfn = 0;
48e07c23 94 rbo->placements[c].mem_type = TTM_PL_VRAM;
ce65b874 95 rbo->placements[c++].flags = 0;
c9da4a4b 96 }
f1217ed0 97
0d0b3e74 98 if (domain & RADEON_GEM_DOMAIN_GTT) {
ce65b874
CK
99 rbo->placements[c].fpfn = 0;
100 rbo->placements[c].mem_type = TTM_PL_TT;
101 rbo->placements[c++].flags = 0;
0d0b3e74 102 }
f1217ed0 103
0d0b3e74 104 if (domain & RADEON_GEM_DOMAIN_CPU) {
ce65b874
CK
105 rbo->placements[c].fpfn = 0;
106 rbo->placements[c].mem_type = TTM_PL_SYSTEM;
107 rbo->placements[c++].flags = 0;
0d0b3e74 108 }
c9da4a4b
MD
109 if (!c) {
110 rbo->placements[c].fpfn = 0;
48e07c23 111 rbo->placements[c].mem_type = TTM_PL_SYSTEM;
ce65b874 112 rbo->placements[c++].flags = 0;
c9da4a4b 113 }
f1217ed0 114
312ea8da 115 rbo->placement.num_placement = c;
deadcb36 116
f1217ed0 117 for (i = 0; i < c; ++i) {
c8584039 118 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
48e07c23 119 (rbo->placements[i].mem_type == TTM_PL_VRAM) &&
c9da4a4b 120 !rbo->placements[i].fpfn)
c8584039
MD
121 rbo->placements[i].lpfn =
122 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
123 else
124 rbo->placements[i].lpfn = 0;
f1217ed0 125 }
312ea8da
JG
126}
127
441921d5 128int radeon_bo_create(struct radeon_device *rdev,
831b6966
ML
129 unsigned long size, int byte_align, bool kernel,
130 u32 domain, u32 flags, struct sg_table *sg,
52791eee 131 struct dma_resv *resv,
831b6966 132 struct radeon_bo **bo_ptr)
771fe6b9 133{
4c788679 134 struct radeon_bo *bo;
771fe6b9 135 enum ttm_bo_type type;
93225b0d 136 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
771fe6b9
JG
137 int r;
138
441921d5
DV
139 size = ALIGN(size, PAGE_SIZE);
140
771fe6b9
JG
141 if (kernel) {
142 type = ttm_bo_type_kernel;
40f5cf99
AD
143 } else if (sg) {
144 type = ttm_bo_type_sg;
771fe6b9
JG
145 } else {
146 type = ttm_bo_type_device;
147 }
4c788679 148 *bo_ptr = NULL;
2b66b50b 149
4c788679
JG
150 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
151 if (bo == NULL)
771fe6b9 152 return -ENOMEM;
fb1b5e1d 153 drm_gem_private_object_init(rdev_to_drm(rdev), &bo->tbo.base, size);
01b64bc0 154 bo->tbo.base.funcs = &radeon_gem_object_funcs;
4c788679 155 bo->rdev = rdev;
4c788679
JG
156 bo->surface_reg = -1;
157 INIT_LIST_HEAD(&bo->list);
721604a1 158 INIT_LIST_HEAD(&bo->va);
bda72d58 159 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
3cf8bb1a
JG
160 RADEON_GEM_DOMAIN_GTT |
161 RADEON_GEM_DOMAIN_CPU);
02376d82
MD
162
163 bo->flags = flags;
164 /* PCI GART is always snooped */
165 if (!(rdev->flags & RADEON_IS_PCIE))
166 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
167
96ea47c0
MD
168 /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
169 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
170 */
171 if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
172 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
173
a08b588e
MD
174#ifdef CONFIG_X86_32
175 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
176 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
177 */
a28bbd58 178 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
a53fa438
MD
179#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
180 /* Don't try to enable write-combining when it can't work, or things
181 * may be slow
182 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
183 */
c02216ac 184#ifndef CONFIG_COMPILE_TEST
a53fa438
MD
185#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
186 thanks to write-combining
c02216ac 187#endif
a53fa438 188
93820498
MD
189 if (bo->flags & RADEON_GEM_GTT_WC)
190 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
191 "better performance thanks to write-combining\n");
a28bbd58 192 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
c5244987
OG
193#else
194 /* For architectures that don't support WC memory,
195 * mask out the WC flag from the BO
196 */
197 if (!drm_arch_can_wc_memory())
198 bo->flags &= ~RADEON_GEM_GTT_WC;
a08b588e
MD
199#endif
200
1fb107fc 201 radeon_ttm_placement_from_domain(bo, domain);
5cc6fbab 202 /* Kernel allocation are uninterruptible */
db7fce39 203 down_read(&rdev->pm.mclk_lock);
347987a2
CK
204 r = ttm_bo_init_validate(&rdev->mman.bdev, &bo->tbo, type,
205 &bo->placement, page_align, !kernel, sg, resv,
206 &radeon_ttm_bo_destroy);
db7fce39 207 up_read(&rdev->pm.mclk_lock);
771fe6b9 208 if (unlikely(r != 0)) {
771fe6b9
JG
209 return r;
210 }
4c788679 211 *bo_ptr = bo;
441921d5 212
99ee7fac 213 trace_radeon_bo_create(bo);
441921d5 214
771fe6b9
JG
215 return 0;
216}
217
4c788679 218int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
771fe6b9 219{
4c788679 220 bool is_iomem;
91f0c245
CK
221 long r;
222
223 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
224 false, MAX_SCHEDULE_TIMEOUT);
225 if (r < 0)
226 return r;
771fe6b9 227
4c788679 228 if (bo->kptr) {
771fe6b9 229 if (ptr) {
4c788679 230 *ptr = bo->kptr;
771fe6b9 231 }
771fe6b9
JG
232 return 0;
233 }
e3c92eb4 234 r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
771fe6b9
JG
235 if (r) {
236 return r;
237 }
4c788679 238 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
771fe6b9 239 if (ptr) {
4c788679 240 *ptr = bo->kptr;
771fe6b9 241 }
4c788679 242 radeon_bo_check_tiling(bo, 0, 0);
771fe6b9
JG
243 return 0;
244}
245
4c788679 246void radeon_bo_kunmap(struct radeon_bo *bo)
771fe6b9 247{
4c788679 248 if (bo->kptr == NULL)
771fe6b9 249 return;
4c788679
JG
250 bo->kptr = NULL;
251 radeon_bo_check_tiling(bo, 0, 0);
252 ttm_bo_kunmap(&bo->kmap);
771fe6b9
JG
253}
254
512d8afc
CK
255struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
256{
257 if (bo == NULL)
258 return NULL;
259
fd69ef05 260 drm_gem_object_get(&bo->tbo.base);
512d8afc
CK
261 return bo;
262}
263
4c788679 264void radeon_bo_unref(struct radeon_bo **bo)
771fe6b9 265{
4c788679 266 if ((*bo) == NULL)
771fe6b9 267 return;
fd69ef05 268 drm_gem_object_put(&(*bo)->tbo.base);
77605e43 269 *bo = NULL;
771fe6b9
JG
270}
271
c4353016
MD
272int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
273 u64 *gpu_addr)
771fe6b9 274{
19be5570 275 struct ttm_operation_ctx ctx = { false, false };
312ea8da 276 int r, i;
771fe6b9 277
a68bb193 278 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
f72a113a
CK
279 return -EPERM;
280
0b8793f6
CK
281 if (bo->tbo.pin_count) {
282 ttm_bo_pin(&bo->tbo);
4c788679
JG
283 if (gpu_addr)
284 *gpu_addr = radeon_bo_gpu_offset(bo);
d936622c
MD
285
286 if (max_offset != 0) {
287 u64 domain_start;
288
289 if (domain == RADEON_GEM_DOMAIN_VRAM)
290 domain_start = bo->rdev->mc.vram_start;
291 else
292 domain_start = bo->rdev->mc.gtt_start;
e199fd42
MD
293 WARN_ON_ONCE(max_offset <
294 (radeon_bo_gpu_offset(bo) - domain_start));
d936622c
MD
295 }
296
771fe6b9
JG
297 return 0;
298 }
ede2e019
CJHR
299 if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
300 /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
301 return -EINVAL;
302 }
303
312ea8da 304 radeon_ttm_placement_from_domain(bo, domain);
f1217ed0 305 for (i = 0; i < bo->placement.num_placement; i++) {
3ca82da3 306 /* force to pin into visible video ram */
48e07c23 307 if ((bo->placements[i].mem_type == TTM_PL_VRAM) &&
f266f04d 308 !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
b76ee67a
MD
309 (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
310 bo->placements[i].lpfn =
311 bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
f1217ed0 312 else
b76ee67a 313 bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
c4353016 314 }
f1217ed0 315
19be5570 316 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
4c788679 317 if (likely(r == 0)) {
0b8793f6 318 ttm_bo_pin(&bo->tbo);
4c788679
JG
319 if (gpu_addr != NULL)
320 *gpu_addr = radeon_bo_gpu_offset(bo);
71ecc97e
AD
321 if (domain == RADEON_GEM_DOMAIN_VRAM)
322 bo->rdev->vram_pin_size += radeon_bo_size(bo);
323 else
324 bo->rdev->gart_pin_size += radeon_bo_size(bo);
325 } else {
4c788679 326 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
71ecc97e 327 }
771fe6b9
JG
328 return r;
329}
c4353016
MD
330
331int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
332{
333 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
334}
771fe6b9 335
0b8793f6 336void radeon_bo_unpin(struct radeon_bo *bo)
771fe6b9 337{
0b8793f6
CK
338 ttm_bo_unpin(&bo->tbo);
339 if (!bo->tbo.pin_count) {
d3116756 340 if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
71ecc97e
AD
341 bo->rdev->vram_pin_size -= radeon_bo_size(bo);
342 else
343 bo->rdev->gart_pin_size -= radeon_bo_size(bo);
71ecc97e 344 }
cefb87ef
DA
345}
346
4c788679 347int radeon_bo_evict_vram(struct radeon_device *rdev)
771fe6b9 348{
8af8a109 349 struct ttm_device *bdev = &rdev->mman.bdev;
4ce032d6
CK
350 struct ttm_resource_manager *man;
351
d796d844 352 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
d6257ab5
PM
353#ifndef CONFIG_HIBERNATION
354 if (rdev->flags & RADEON_IS_IGP) {
06b6476d
AD
355 if (rdev->mc.igp_sideport_enabled == false)
356 /* Useless to evict on IGP chips */
357 return 0;
771fe6b9 358 }
d6257ab5 359#endif
4ce032d6 360 man = ttm_manager_type(bdev, TTM_PL_VRAM);
05eacc0f
TZ
361 if (!man)
362 return 0;
4ce032d6 363 return ttm_resource_manager_evict_all(bdev, man);
771fe6b9
JG
364}
365
4c788679 366void radeon_bo_force_delete(struct radeon_device *rdev)
771fe6b9 367{
4c788679 368 struct radeon_bo *bo, *n;
771fe6b9
JG
369
370 if (list_empty(&rdev->gem.objects)) {
371 return;
372 }
4c788679
JG
373 dev_err(rdev->dev, "Userspace still has active objects !\n");
374 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
4c788679 375 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
ce77038f
GH
376 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
377 *((unsigned long *)&bo->tbo.base.refcount));
4c788679
JG
378 mutex_lock(&bo->rdev->gem.mutex);
379 list_del_init(&bo->list);
380 mutex_unlock(&bo->rdev->gem.mutex);
91132d6b 381 /* this should unref the ttm bo */
f11fb66a 382 drm_gem_object_put(&bo->tbo.base);
771fe6b9
JG
383 }
384}
385
4c788679 386int radeon_bo_init(struct radeon_device *rdev)
771fe6b9 387{
7cf321d1
DA
388 /* reserve PAT memory space to WC for VRAM */
389 arch_io_reserve_memtype_wc(rdev->mc.aper_base,
390 rdev->mc.aper_size);
391
a4d68279 392 /* Add an MTRR for the VRAM */
a0a53aa8 393 if (!rdev->fastfb_working) {
07ebea25
AL
394 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
395 rdev->mc.aper_size);
a0a53aa8 396 }
a4d68279
JG
397 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
398 rdev->mc.mc_vram_size >> 20,
399 (unsigned long long)rdev->mc.aper_size >> 20);
400 DRM_INFO("RAM width %dbits %cDR\n",
401 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
771fe6b9
JG
402 return radeon_ttm_init(rdev);
403}
404
4c788679 405void radeon_bo_fini(struct radeon_device *rdev)
771fe6b9
JG
406{
407 radeon_ttm_fini(rdev);
07ebea25 408 arch_phys_wc_del(rdev->mc.vram_mtrr);
7cf321d1 409 arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
771fe6b9
JG
410}
411
19dff56a
MO
412/* Returns how many bytes TTM can move per IB.
413 */
414static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
415{
416 u64 real_vram_size = rdev->mc.real_vram_size;
a32ba6bd
CK
417 struct ttm_resource_manager *man =
418 ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
419 u64 vram_usage = ttm_resource_manager_usage(man);
19dff56a
MO
420
421 /* This function is based on the current VRAM usage.
422 *
423 * - If all of VRAM is free, allow relocating the number of bytes that
424 * is equal to 1/4 of the size of VRAM for this IB.
425
426 * - If more than one half of VRAM is occupied, only allow relocating
427 * 1 MB of data for this IB.
428 *
429 * - From 0 to one half of used VRAM, the threshold decreases
430 * linearly.
431 * __________________
432 * 1/4 of -|\ |
433 * VRAM | \ |
434 * | \ |
435 * | \ |
436 * | \ |
437 * | \ |
438 * | \ |
439 * | \________|1 MB
440 * |----------------|
441 * VRAM 0 % 100 %
442 * used used
443 *
444 * Note: It's a threshold, not a limit. The threshold must be crossed
445 * for buffer relocations to stop, so any buffer of an arbitrary size
446 * can be moved as long as the threshold isn't crossed before
447 * the relocation takes place. We don't want to disable buffer
448 * relocations completely.
449 *
450 * The idea is that buffers should be placed in VRAM at creation time
451 * and TTM should only do a minimum number of relocations during
452 * command submission. In practice, you need to submit at least
453 * a dozen IBs to move all buffers to VRAM if they are in GTT.
454 *
455 * Also, things can get pretty crazy under memory pressure and actual
456 * VRAM usage can change a lot, so playing safe even at 50% does
457 * consistently increase performance.
458 */
459
460 u64 half_vram = real_vram_size >> 1;
461 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
462 u64 bytes_moved_threshold = half_free_vram >> 1;
463 return max(bytes_moved_threshold, 1024*1024ull);
464}
465
466int radeon_bo_list_validate(struct radeon_device *rdev,
e2b3f7c8 467 struct drm_exec *exec,
ecff665f 468 struct list_head *head, int ring)
771fe6b9 469{
19be5570 470 struct ttm_operation_ctx ctx = { true, false };
1d0c0942 471 struct radeon_bo_list *lobj;
19dff56a
MO
472 u64 bytes_moved = 0, initial_bytes_moved;
473 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
e2b3f7c8 474 int r;
771fe6b9 475
e2b3f7c8
CK
476 drm_exec_until_all_locked(exec) {
477 list_for_each_entry(lobj, head, list) {
478 r = drm_exec_prepare_obj(exec, &lobj->robj->tbo.base,
479 1);
480 drm_exec_retry_on_contention(exec);
481 if (unlikely(r && r != -EALREADY))
482 return r;
483 }
771fe6b9 484 }
19dff56a 485
e2b3f7c8 486 list_for_each_entry(lobj, head, list) {
466be338 487 struct radeon_bo *bo = lobj->robj;
0b8793f6 488 if (!bo->tbo.pin_count) {
5dcd3345 489 u32 domain = lobj->preferred_domains;
3852752c 490 u32 allowed = lobj->allowed_domains;
19dff56a 491 u32 current_domain =
d3116756 492 radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
19dff56a
MO
493
494 /* Check if this buffer will be moved and don't move it
495 * if we have moved too many buffers for this IB already.
496 *
497 * Note that this allows moving at least one buffer of
498 * any size, because it doesn't take the current "bo"
499 * into account. We don't want to disallow buffer moves
500 * completely.
501 */
3852752c 502 if ((allowed & current_domain) != 0 &&
19dff56a
MO
503 (domain & current_domain) == 0 && /* will be moved */
504 bytes_moved > bytes_moved_threshold) {
505 /* don't move it */
506 domain = current_domain;
507 }
508
20707874
AD
509 retry:
510 radeon_ttm_placement_from_domain(bo, domain);
f2ba57b5 511 if (ring == R600_RING_TYPE_UVD_INDEX)
3852752c 512 radeon_uvd_force_into_uvd_segment(bo, allowed);
19dff56a
MO
513
514 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
19be5570 515 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
19dff56a
MO
516 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
517 initial_bytes_moved;
518
e376573f 519 if (unlikely(r)) {
ce6758c8
CK
520 if (r != -ERESTARTSYS &&
521 domain != lobj->allowed_domains) {
522 domain = lobj->allowed_domains;
20707874
AD
523 goto retry;
524 }
771fe6b9 525 return r;
e376573f 526 }
771fe6b9 527 }
4c788679
JG
528 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
529 lobj->tiling_flags = bo->tiling_flags;
771fe6b9 530 }
466be338 531
771fe6b9
JG
532 return 0;
533}
534
550e2d92 535int radeon_bo_get_surface_reg(struct radeon_bo *bo)
771fe6b9 536{
4c788679 537 struct radeon_device *rdev = bo->rdev;
e024e110 538 struct radeon_surface_reg *reg;
4c788679 539 struct radeon_bo *old_object;
e024e110
DA
540 int steal;
541 int i;
542
52791eee 543 dma_resv_assert_held(bo->tbo.base.resv);
4c788679
JG
544
545 if (!bo->tiling_flags)
e024e110
DA
546 return 0;
547
4c788679 548 if (bo->surface_reg >= 0) {
4c788679 549 i = bo->surface_reg;
e024e110
DA
550 goto out;
551 }
552
553 steal = -1;
554 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
555
556 reg = &rdev->surface_regs[i];
4c788679 557 if (!reg->bo)
e024e110
DA
558 break;
559
4c788679 560 old_object = reg->bo;
0b8793f6 561 if (old_object->tbo.pin_count == 0)
e024e110
DA
562 steal = i;
563 }
564
565 /* if we are all out */
566 if (i == RADEON_GEM_MAX_SURFACES) {
567 if (steal == -1)
568 return -ENOMEM;
569 /* find someone with a surface reg and nuke their BO */
570 reg = &rdev->surface_regs[steal];
4c788679 571 old_object = reg->bo;
e024e110
DA
572 /* blow away the mapping */
573 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
4c788679 574 ttm_bo_unmap_virtual(&old_object->tbo);
e024e110
DA
575 old_object->surface_reg = -1;
576 i = steal;
577 }
578
4c788679
JG
579 bo->surface_reg = i;
580 reg->bo = bo;
e024e110
DA
581
582out:
4c788679 583 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
d3116756 584 bo->tbo.resource->start << PAGE_SHIFT,
e11bfb99 585 bo->tbo.base.size);
e024e110
DA
586 return 0;
587}
588
4c788679 589static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
e024e110 590{
4c788679 591 struct radeon_device *rdev = bo->rdev;
e024e110
DA
592 struct radeon_surface_reg *reg;
593
4c788679 594 if (bo->surface_reg == -1)
e024e110
DA
595 return;
596
4c788679
JG
597 reg = &rdev->surface_regs[bo->surface_reg];
598 radeon_clear_surface_reg(rdev, bo->surface_reg);
e024e110 599
4c788679
JG
600 reg->bo = NULL;
601 bo->surface_reg = -1;
e024e110
DA
602}
603
4c788679
JG
604int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
605 uint32_t tiling_flags, uint32_t pitch)
e024e110 606{
285484e2 607 struct radeon_device *rdev = bo->rdev;
4c788679
JG
608 int r;
609
285484e2
JG
610 if (rdev->family >= CHIP_CEDAR) {
611 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
612
613 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
614 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
615 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
616 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
617 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
618 switch (bankw) {
619 case 0:
620 case 1:
621 case 2:
622 case 4:
623 case 8:
624 break;
625 default:
626 return -EINVAL;
627 }
628 switch (bankh) {
629 case 0:
630 case 1:
631 case 2:
632 case 4:
633 case 8:
634 break;
635 default:
636 return -EINVAL;
637 }
638 switch (mtaspect) {
639 case 0:
640 case 1:
641 case 2:
642 case 4:
643 case 8:
644 break;
645 default:
646 return -EINVAL;
647 }
648 if (tilesplit > 6) {
649 return -EINVAL;
650 }
651 if (stilesplit > 6) {
652 return -EINVAL;
653 }
654 }
4c788679
JG
655 r = radeon_bo_reserve(bo, false);
656 if (unlikely(r != 0))
657 return r;
658 bo->tiling_flags = tiling_flags;
659 bo->pitch = pitch;
660 radeon_bo_unreserve(bo);
661 return 0;
e024e110
DA
662}
663
4c788679
JG
664void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
665 uint32_t *tiling_flags,
666 uint32_t *pitch)
e024e110 667{
52791eee 668 dma_resv_assert_held(bo->tbo.base.resv);
977c38d5 669
e024e110 670 if (tiling_flags)
4c788679 671 *tiling_flags = bo->tiling_flags;
e024e110 672 if (pitch)
4c788679 673 *pitch = bo->pitch;
e024e110
DA
674}
675
4c788679
JG
676int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
677 bool force_drop)
e024e110 678{
977c38d5 679 if (!force_drop)
52791eee 680 dma_resv_assert_held(bo->tbo.base.resv);
4c788679
JG
681
682 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
e024e110
DA
683 return 0;
684
685 if (force_drop) {
4c788679 686 radeon_bo_clear_surface_reg(bo);
e024e110
DA
687 return 0;
688 }
689
d3116756 690 if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
e024e110
DA
691 if (!has_moved)
692 return 0;
693
4c788679
JG
694 if (bo->surface_reg >= 0)
695 radeon_bo_clear_surface_reg(bo);
e024e110
DA
696 return 0;
697 }
698
4c788679 699 if ((bo->surface_reg >= 0) && !has_moved)
e024e110
DA
700 return 0;
701
4c788679 702 return radeon_bo_get_surface_reg(bo);
e024e110
DA
703}
704
a32ba6bd 705void radeon_bo_move_notify(struct ttm_buffer_object *bo)
e024e110 706{
d03d8589 707 struct radeon_bo *rbo;
67e8e3f9 708
d03d8589
JG
709 if (!radeon_ttm_bo_is_radeon_bo(bo))
710 return;
67e8e3f9 711
d03d8589 712 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 713 radeon_bo_check_tiling(rbo, 0, 1);
721604a1 714 radeon_vm_bo_invalidate(rbo->rdev, rbo);
e024e110
DA
715}
716
8e0310f0 717vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
e024e110 718{
19be5570 719 struct ttm_operation_ctx ctx = { false, false };
0a2d50e3 720 struct radeon_device *rdev;
d03d8589 721 struct radeon_bo *rbo;
c9da4a4b
MD
722 unsigned long offset, size, lpfn;
723 int i, r;
0a2d50e3 724
d03d8589 725 if (!radeon_ttm_bo_is_radeon_bo(bo))
0a2d50e3 726 return 0;
d03d8589 727 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 728 radeon_bo_check_tiling(rbo, 0, 0);
0a2d50e3 729 rdev = rbo->rdev;
d3116756 730 if (bo->resource->mem_type != TTM_PL_VRAM)
54409259
CK
731 return 0;
732
e3c92eb4 733 size = bo->resource->size;
d3116756 734 offset = bo->resource->start << PAGE_SHIFT;
54409259
CK
735 if ((offset + size) <= rdev->mc.visible_vram_size)
736 return 0;
737
e1a575ad 738 /* Can't move a pinned BO to visible VRAM */
0b8793f6 739 if (rbo->tbo.pin_count > 0)
8e0310f0 740 return VM_FAULT_SIGBUS;
e1a575ad 741
54409259
CK
742 /* hurrah the memory is not visible ! */
743 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
c9da4a4b
MD
744 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
745 for (i = 0; i < rbo->placement.num_placement; i++) {
746 /* Force into visible VRAM */
48e07c23 747 if ((rbo->placements[i].mem_type == TTM_PL_VRAM) &&
c9da4a4b
MD
748 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
749 rbo->placements[i].lpfn = lpfn;
750 }
19be5570 751 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
54409259
CK
752 if (unlikely(r == -ENOMEM)) {
753 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
8e0310f0
CK
754 r = ttm_bo_validate(bo, &rbo->placement, &ctx);
755 } else if (likely(!r)) {
d3116756 756 offset = bo->resource->start << PAGE_SHIFT;
8e0310f0
CK
757 /* this should never happen */
758 if ((offset + size) > rdev->mc.visible_vram_size)
759 return VM_FAULT_SIGBUS;
0a2d50e3 760 }
54409259 761
8e0310f0
CK
762 if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
763 return VM_FAULT_NOPAGE;
764 else if (unlikely(r))
765 return VM_FAULT_SIGBUS;
54409259 766
8e0310f0 767 ttm_bo_move_to_lru_tail_unlocked(bo);
0a2d50e3 768 return 0;
e024e110 769}
ce580fab 770
587cdda8
CK
771/**
772 * radeon_bo_fence - add fence to buffer object
773 *
774 * @bo: buffer object in question
775 * @fence: fence to add
776 * @shared: true if fence should be added shared
777 *
778 */
779void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
3cf8bb1a 780 bool shared)
587cdda8 781{
52791eee 782 struct dma_resv *resv = bo->tbo.base.resv;
c8d4c18b
CK
783 int r;
784
785 r = dma_resv_reserve_fences(resv, 1);
786 if (r) {
787 /* As last resort on OOM we block for the fence */
788 dma_fence_wait(&fence->base, false);
789 return;
790 }
587cdda8 791
73511edf
CK
792 dma_resv_add_fence(resv, &fence->base, shared ?
793 DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
587cdda8 794}