Merge branch 'for-rmk/samsung3' of git://git.fluff.org/bjdooks/linux into devel-stable
[linux-2.6-block.git] / drivers / gpu / drm / radeon / radeon_ttm.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
fa8a1238 38#include <linux/seq_file.h>
771fe6b9
JG
39#include "radeon_reg.h"
40#include "radeon.h"
41
42#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43
fa8a1238
DA
44static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45
771fe6b9
JG
46static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
47{
48 struct radeon_mman *mman;
49 struct radeon_device *rdev;
50
51 mman = container_of(bdev, struct radeon_mman, bdev);
52 rdev = container_of(mman, struct radeon_device, mman);
53 return rdev;
54}
55
56
57/*
58 * Global memory.
59 */
60static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
61{
62 return ttm_mem_global_init(ref->object);
63}
64
65static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
66{
67 ttm_mem_global_release(ref->object);
68}
69
70static int radeon_ttm_global_init(struct radeon_device *rdev)
71{
72 struct ttm_global_reference *global_ref;
73 int r;
74
75 rdev->mman.mem_global_referenced = false;
76 global_ref = &rdev->mman.mem_global_ref;
77 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78 global_ref->size = sizeof(struct ttm_mem_global);
79 global_ref->init = &radeon_ttm_mem_global_init;
80 global_ref->release = &radeon_ttm_mem_global_release;
81 r = ttm_global_item_ref(global_ref);
82 if (r != 0) {
a987fcaa
TH
83 DRM_ERROR("Failed setting up TTM memory accounting "
84 "subsystem.\n");
771fe6b9
JG
85 return r;
86 }
a987fcaa
TH
87
88 rdev->mman.bo_global_ref.mem_glob =
89 rdev->mman.mem_global_ref.object;
90 global_ref = &rdev->mman.bo_global_ref.ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_BO;
7f5f4db2 92 global_ref->size = sizeof(struct ttm_bo_global);
a987fcaa
TH
93 global_ref->init = &ttm_bo_global_init;
94 global_ref->release = &ttm_bo_global_release;
95 r = ttm_global_item_ref(global_ref);
96 if (r != 0) {
97 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98 ttm_global_item_unref(&rdev->mman.mem_global_ref);
99 return r;
100 }
101
771fe6b9
JG
102 rdev->mman.mem_global_referenced = true;
103 return 0;
104}
105
106static void radeon_ttm_global_fini(struct radeon_device *rdev)
107{
108 if (rdev->mman.mem_global_referenced) {
a987fcaa 109 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
771fe6b9
JG
110 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111 rdev->mman.mem_global_referenced = false;
112 }
113}
114
115struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
116
117static struct ttm_backend*
118radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
119{
120 struct radeon_device *rdev;
121
122 rdev = radeon_get_rdev(bdev);
123#if __OS_HAS_AGP
124 if (rdev->flags & RADEON_IS_AGP) {
125 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
126 } else
127#endif
128 {
129 return radeon_ttm_backend_create(rdev);
130 }
131}
132
133static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
134{
135 return 0;
136}
137
138static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139 struct ttm_mem_type_manager *man)
140{
141 struct radeon_device *rdev;
142
143 rdev = radeon_get_rdev(bdev);
144
145 switch (type) {
146 case TTM_PL_SYSTEM:
147 /* System memory */
148 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->available_caching = TTM_PL_MASK_CACHING;
150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break;
152 case TTM_PL_TT:
4c788679 153 man->gpu_offset = rdev->mc.gtt_location;
771fe6b9
JG
154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED;
55c93278 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
771fe6b9
JG
157#if __OS_HAS_AGP
158 if (rdev->flags & RADEON_IS_AGP) {
159 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160 DRM_ERROR("AGP is not enabled for memory type %u\n",
161 (unsigned)type);
162 return -EINVAL;
163 }
164 man->io_offset = rdev->mc.agp_base;
165 man->io_size = rdev->mc.gtt_size;
166 man->io_addr = NULL;
55c93278
MD
167 if (!rdev->ddev->agp->cant_use_aperture)
168 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
169 TTM_MEMTYPE_FLAG_MAPPABLE;
771fe6b9
JG
170 man->available_caching = TTM_PL_FLAG_UNCACHED |
171 TTM_PL_FLAG_WC;
172 man->default_caching = TTM_PL_FLAG_WC;
173 } else
174#endif
175 {
176 man->io_offset = 0;
177 man->io_size = 0;
178 man->io_addr = NULL;
771fe6b9
JG
179 }
180 break;
181 case TTM_PL_VRAM:
182 /* "On-card" video ram */
4c788679 183 man->gpu_offset = rdev->mc.vram_location;
771fe6b9
JG
184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE;
187 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
188 man->default_caching = TTM_PL_FLAG_WC;
189 man->io_addr = NULL;
190 man->io_offset = rdev->mc.aper_base;
191 man->io_size = rdev->mc.aper_size;
192 break;
193 default:
194 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
195 return -EINVAL;
196 }
197 return 0;
198}
199
312ea8da
JG
200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement)
771fe6b9 202{
d03d8589
JG
203 struct radeon_bo *rbo;
204 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
205
206 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
207 placement->fpfn = 0;
208 placement->lpfn = 0;
209 placement->placement = &placements;
210 placement->busy_placement = &placements;
211 placement->num_placement = 1;
212 placement->num_busy_placement = 1;
213 return;
214 }
215 rbo = container_of(bo, struct radeon_bo, tbo);
771fe6b9 216 switch (bo->mem.mem_type) {
312ea8da 217 case TTM_PL_VRAM:
9270eb1b
DA
218 if (rbo->rdev->cp.ready == false)
219 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
220 else
221 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
312ea8da
JG
222 break;
223 case TTM_PL_TT:
771fe6b9 224 default:
312ea8da 225 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
771fe6b9 226 }
eaa5fd1a 227 *placement = rbo->placement;
771fe6b9
JG
228}
229
230static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
231{
232 return 0;
233}
234
235static void radeon_move_null(struct ttm_buffer_object *bo,
236 struct ttm_mem_reg *new_mem)
237{
238 struct ttm_mem_reg *old_mem = &bo->mem;
239
240 BUG_ON(old_mem->mm_node != NULL);
241 *old_mem = *new_mem;
242 new_mem->mm_node = NULL;
243}
244
245static int radeon_move_blit(struct ttm_buffer_object *bo,
246 bool evict, int no_wait,
247 struct ttm_mem_reg *new_mem,
248 struct ttm_mem_reg *old_mem)
249{
250 struct radeon_device *rdev;
251 uint64_t old_start, new_start;
252 struct radeon_fence *fence;
253 int r;
254
255 rdev = radeon_get_rdev(bo->bdev);
256 r = radeon_fence_create(rdev, &fence);
257 if (unlikely(r)) {
258 return r;
259 }
260 old_start = old_mem->mm_node->start << PAGE_SHIFT;
261 new_start = new_mem->mm_node->start << PAGE_SHIFT;
262
263 switch (old_mem->mem_type) {
264 case TTM_PL_VRAM:
265 old_start += rdev->mc.vram_location;
266 break;
267 case TTM_PL_TT:
268 old_start += rdev->mc.gtt_location;
269 break;
270 default:
271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
272 return -EINVAL;
273 }
274 switch (new_mem->mem_type) {
275 case TTM_PL_VRAM:
276 new_start += rdev->mc.vram_location;
277 break;
278 case TTM_PL_TT:
279 new_start += rdev->mc.gtt_location;
280 break;
281 default:
282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
283 return -EINVAL;
284 }
285 if (!rdev->cp.ready) {
286 DRM_ERROR("Trying to move memory with CP turned off.\n");
287 return -EINVAL;
288 }
289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
290 /* FIXME: handle copy error */
291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
292 evict, no_wait, new_mem);
293 radeon_fence_unref(&fence);
294 return r;
295}
296
297static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
298 bool evict, bool interruptible, bool no_wait,
299 struct ttm_mem_reg *new_mem)
300{
301 struct radeon_device *rdev;
302 struct ttm_mem_reg *old_mem = &bo->mem;
303 struct ttm_mem_reg tmp_mem;
312ea8da
JG
304 u32 placements;
305 struct ttm_placement placement;
771fe6b9
JG
306 int r;
307
308 rdev = radeon_get_rdev(bo->bdev);
309 tmp_mem = *new_mem;
310 tmp_mem.mm_node = NULL;
312ea8da
JG
311 placement.fpfn = 0;
312 placement.lpfn = 0;
313 placement.num_placement = 1;
314 placement.placement = &placements;
315 placement.num_busy_placement = 1;
316 placement.busy_placement = &placements;
317 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
318 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
771fe6b9
JG
319 interruptible, no_wait);
320 if (unlikely(r)) {
321 return r;
322 }
df67bed9
DA
323
324 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
325 if (unlikely(r)) {
326 goto out_cleanup;
327 }
328
771fe6b9
JG
329 r = ttm_tt_bind(bo->ttm, &tmp_mem);
330 if (unlikely(r)) {
331 goto out_cleanup;
332 }
333 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
334 if (unlikely(r)) {
335 goto out_cleanup;
336 }
337 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
338out_cleanup:
339 if (tmp_mem.mm_node) {
a987fcaa
TH
340 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
341
342 spin_lock(&glob->lru_lock);
771fe6b9 343 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 344 spin_unlock(&glob->lru_lock);
771fe6b9
JG
345 return r;
346 }
347 return r;
348}
349
350static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
351 bool evict, bool interruptible, bool no_wait,
352 struct ttm_mem_reg *new_mem)
353{
354 struct radeon_device *rdev;
355 struct ttm_mem_reg *old_mem = &bo->mem;
356 struct ttm_mem_reg tmp_mem;
312ea8da
JG
357 struct ttm_placement placement;
358 u32 placements;
771fe6b9
JG
359 int r;
360
361 rdev = radeon_get_rdev(bo->bdev);
362 tmp_mem = *new_mem;
363 tmp_mem.mm_node = NULL;
312ea8da
JG
364 placement.fpfn = 0;
365 placement.lpfn = 0;
366 placement.num_placement = 1;
367 placement.placement = &placements;
368 placement.num_busy_placement = 1;
369 placement.busy_placement = &placements;
370 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
371 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
771fe6b9
JG
372 if (unlikely(r)) {
373 return r;
374 }
375 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
376 if (unlikely(r)) {
377 goto out_cleanup;
378 }
379 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
380 if (unlikely(r)) {
381 goto out_cleanup;
382 }
383out_cleanup:
384 if (tmp_mem.mm_node) {
a987fcaa
TH
385 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
386
387 spin_lock(&glob->lru_lock);
771fe6b9 388 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 389 spin_unlock(&glob->lru_lock);
771fe6b9
JG
390 return r;
391 }
392 return r;
393}
394
395static int radeon_bo_move(struct ttm_buffer_object *bo,
396 bool evict, bool interruptible, bool no_wait,
397 struct ttm_mem_reg *new_mem)
398{
399 struct radeon_device *rdev;
400 struct ttm_mem_reg *old_mem = &bo->mem;
401 int r;
402
403 rdev = radeon_get_rdev(bo->bdev);
404 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
405 radeon_move_null(bo, new_mem);
406 return 0;
407 }
408 if ((old_mem->mem_type == TTM_PL_TT &&
409 new_mem->mem_type == TTM_PL_SYSTEM) ||
410 (old_mem->mem_type == TTM_PL_SYSTEM &&
411 new_mem->mem_type == TTM_PL_TT)) {
af901ca1 412 /* bind is enough */
771fe6b9
JG
413 radeon_move_null(bo, new_mem);
414 return 0;
415 }
3ce0a23d 416 if (!rdev->cp.ready || rdev->asic->copy == NULL) {
771fe6b9 417 /* use memcpy */
1ab2e105 418 goto memcpy;
771fe6b9
JG
419 }
420
421 if (old_mem->mem_type == TTM_PL_VRAM &&
422 new_mem->mem_type == TTM_PL_SYSTEM) {
1ab2e105 423 r = radeon_move_vram_ram(bo, evict, interruptible,
771fe6b9
JG
424 no_wait, new_mem);
425 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
426 new_mem->mem_type == TTM_PL_VRAM) {
1ab2e105 427 r = radeon_move_ram_vram(bo, evict, interruptible,
771fe6b9
JG
428 no_wait, new_mem);
429 } else {
430 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
771fe6b9 431 }
1ab2e105
MD
432
433 if (r) {
434memcpy:
435 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
436 }
437
771fe6b9
JG
438 return r;
439}
440
771fe6b9
JG
441static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
442 bool lazy, bool interruptible)
443{
444 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
445}
446
447static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
448{
449 return 0;
450}
451
452static void radeon_sync_obj_unref(void **sync_obj)
453{
454 radeon_fence_unref((struct radeon_fence **)sync_obj);
455}
456
457static void *radeon_sync_obj_ref(void *sync_obj)
458{
459 return radeon_fence_ref((struct radeon_fence *)sync_obj);
460}
461
462static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
463{
464 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
465}
466
467static struct ttm_bo_driver radeon_bo_driver = {
771fe6b9
JG
468 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
469 .invalidate_caches = &radeon_invalidate_caches,
470 .init_mem_type = &radeon_init_mem_type,
471 .evict_flags = &radeon_evict_flags,
472 .move = &radeon_bo_move,
473 .verify_access = &radeon_verify_access,
474 .sync_obj_signaled = &radeon_sync_obj_signaled,
475 .sync_obj_wait = &radeon_sync_obj_wait,
476 .sync_obj_flush = &radeon_sync_obj_flush,
477 .sync_obj_unref = &radeon_sync_obj_unref,
478 .sync_obj_ref = &radeon_sync_obj_ref,
e024e110
DA
479 .move_notify = &radeon_bo_move_notify,
480 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
771fe6b9
JG
481};
482
483int radeon_ttm_init(struct radeon_device *rdev)
484{
485 int r;
486
487 r = radeon_ttm_global_init(rdev);
488 if (r) {
489 return r;
490 }
491 /* No others user of address space so set it to 0 */
492 r = ttm_bo_device_init(&rdev->mman.bdev,
a987fcaa 493 rdev->mman.bo_global_ref.ref.object,
ad49f501
DA
494 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
495 rdev->need_dma32);
771fe6b9
JG
496 if (r) {
497 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
498 return r;
499 }
0a0c7596 500 rdev->mman.initialized = true;
4c788679 501 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
312ea8da 502 rdev->mc.real_vram_size >> PAGE_SHIFT);
771fe6b9
JG
503 if (r) {
504 DRM_ERROR("Failed initializing VRAM heap.\n");
505 return r;
506 }
4c788679
JG
507 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
508 RADEON_GEM_DOMAIN_VRAM,
509 &rdev->stollen_vga_memory);
771fe6b9
JG
510 if (r) {
511 return r;
512 }
4c788679
JG
513 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
514 if (r)
515 return r;
516 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
517 radeon_bo_unreserve(rdev->stollen_vga_memory);
771fe6b9 518 if (r) {
4c788679 519 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
520 return r;
521 }
522 DRM_INFO("radeon: %uM of VRAM memory ready\n",
3ce0a23d 523 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
4c788679 524 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
312ea8da 525 rdev->mc.gtt_size >> PAGE_SHIFT);
771fe6b9
JG
526 if (r) {
527 DRM_ERROR("Failed initializing GTT heap.\n");
528 return r;
529 }
530 DRM_INFO("radeon: %uM of GTT memory ready.\n",
3ce0a23d 531 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
771fe6b9
JG
532 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
533 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
534 }
fa8a1238
DA
535
536 r = radeon_ttm_debugfs_init(rdev);
537 if (r) {
538 DRM_ERROR("Failed to init debugfs\n");
539 return r;
540 }
771fe6b9
JG
541 return 0;
542}
543
544void radeon_ttm_fini(struct radeon_device *rdev)
545{
4c788679
JG
546 int r;
547
0a0c7596
JG
548 if (!rdev->mman.initialized)
549 return;
771fe6b9 550 if (rdev->stollen_vga_memory) {
4c788679
JG
551 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
552 if (r == 0) {
553 radeon_bo_unpin(rdev->stollen_vga_memory);
554 radeon_bo_unreserve(rdev->stollen_vga_memory);
555 }
556 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
557 }
558 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
559 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
560 ttm_bo_device_release(&rdev->mman.bdev);
561 radeon_gart_fini(rdev);
562 radeon_ttm_global_fini(rdev);
0a0c7596 563 rdev->mman.initialized = false;
771fe6b9
JG
564 DRM_INFO("radeon: ttm finalized\n");
565}
566
567static struct vm_operations_struct radeon_ttm_vm_ops;
f0f37e2f 568static const struct vm_operations_struct *ttm_vm_ops = NULL;
771fe6b9
JG
569
570static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
571{
572 struct ttm_buffer_object *bo;
573 int r;
574
575 bo = (struct ttm_buffer_object *)vma->vm_private_data;
576 if (bo == NULL) {
577 return VM_FAULT_NOPAGE;
578 }
579 r = ttm_vm_ops->fault(vma, vmf);
580 return r;
581}
582
583int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
584{
585 struct drm_file *file_priv;
586 struct radeon_device *rdev;
587 int r;
588
589 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
590 return drm_mmap(filp, vma);
591 }
592
593 file_priv = (struct drm_file *)filp->private_data;
594 rdev = file_priv->minor->dev->dev_private;
595 if (rdev == NULL) {
596 return -EINVAL;
597 }
598 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
599 if (unlikely(r != 0)) {
600 return r;
601 }
602 if (unlikely(ttm_vm_ops == NULL)) {
603 ttm_vm_ops = vma->vm_ops;
604 radeon_ttm_vm_ops = *ttm_vm_ops;
605 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
606 }
607 vma->vm_ops = &radeon_ttm_vm_ops;
608 return 0;
609}
610
611
612/*
613 * TTM backend functions.
614 */
615struct radeon_ttm_backend {
616 struct ttm_backend backend;
617 struct radeon_device *rdev;
618 unsigned long num_pages;
619 struct page **pages;
620 struct page *dummy_read_page;
621 bool populated;
622 bool bound;
623 unsigned offset;
624};
625
626static int radeon_ttm_backend_populate(struct ttm_backend *backend,
627 unsigned long num_pages,
628 struct page **pages,
629 struct page *dummy_read_page)
630{
631 struct radeon_ttm_backend *gtt;
632
633 gtt = container_of(backend, struct radeon_ttm_backend, backend);
634 gtt->pages = pages;
635 gtt->num_pages = num_pages;
636 gtt->dummy_read_page = dummy_read_page;
637 gtt->populated = true;
638 return 0;
639}
640
641static void radeon_ttm_backend_clear(struct ttm_backend *backend)
642{
643 struct radeon_ttm_backend *gtt;
644
645 gtt = container_of(backend, struct radeon_ttm_backend, backend);
646 gtt->pages = NULL;
647 gtt->num_pages = 0;
648 gtt->dummy_read_page = NULL;
649 gtt->populated = false;
650 gtt->bound = false;
651}
652
653
654static int radeon_ttm_backend_bind(struct ttm_backend *backend,
655 struct ttm_mem_reg *bo_mem)
656{
657 struct radeon_ttm_backend *gtt;
658 int r;
659
660 gtt = container_of(backend, struct radeon_ttm_backend, backend);
661 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
662 if (!gtt->num_pages) {
663 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
664 }
665 r = radeon_gart_bind(gtt->rdev, gtt->offset,
666 gtt->num_pages, gtt->pages);
667 if (r) {
668 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
669 gtt->num_pages, gtt->offset);
670 return r;
671 }
672 gtt->bound = true;
673 return 0;
674}
675
676static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
677{
678 struct radeon_ttm_backend *gtt;
679
680 gtt = container_of(backend, struct radeon_ttm_backend, backend);
681 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
682 gtt->bound = false;
683 return 0;
684}
685
686static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
687{
688 struct radeon_ttm_backend *gtt;
689
690 gtt = container_of(backend, struct radeon_ttm_backend, backend);
691 if (gtt->bound) {
692 radeon_ttm_backend_unbind(backend);
693 }
694 kfree(gtt);
695}
696
697static struct ttm_backend_func radeon_backend_func = {
698 .populate = &radeon_ttm_backend_populate,
699 .clear = &radeon_ttm_backend_clear,
700 .bind = &radeon_ttm_backend_bind,
701 .unbind = &radeon_ttm_backend_unbind,
702 .destroy = &radeon_ttm_backend_destroy,
703};
704
705struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
706{
707 struct radeon_ttm_backend *gtt;
708
709 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
710 if (gtt == NULL) {
711 return NULL;
712 }
713 gtt->backend.bdev = &rdev->mman.bdev;
714 gtt->backend.flags = 0;
715 gtt->backend.func = &radeon_backend_func;
716 gtt->rdev = rdev;
717 gtt->pages = NULL;
718 gtt->num_pages = 0;
719 gtt->dummy_read_page = NULL;
720 gtt->populated = false;
721 gtt->bound = false;
722 return &gtt->backend;
723}
fa8a1238
DA
724
725#define RADEON_DEBUGFS_MEM_TYPES 2
726
fa8a1238
DA
727#if defined(CONFIG_DEBUG_FS)
728static int radeon_mm_dump_table(struct seq_file *m, void *data)
729{
730 struct drm_info_node *node = (struct drm_info_node *)m->private;
731 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
732 struct drm_device *dev = node->minor->dev;
733 struct radeon_device *rdev = dev->dev_private;
734 int ret;
735 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
736
737 spin_lock(&glob->lru_lock);
738 ret = drm_mm_dump_table(m, mm);
739 spin_unlock(&glob->lru_lock);
740 return ret;
741}
742#endif
743
744static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
745{
f4e45d02
MP
746#if defined(CONFIG_DEBUG_FS)
747 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
748 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
fa8a1238
DA
749 unsigned i;
750
fa8a1238
DA
751 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
752 if (i == 0)
753 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
754 else
755 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
756 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
757 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
758 radeon_mem_types_list[i].driver_features = 0;
759 if (i == 0)
760 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
761 else
762 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
763
764 }
765 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
766
767#endif
768 return 0;
769}