drm/nouveau/vm: reduce number of entry-points to vm_map()
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nouveau_sgdma.c
CommitLineData
6ee73861 1#include <linux/pagemap.h>
5a0e3ad6 2#include <linux/slab.h>
6ee73861 3
ebb945a9
BS
4#include <subdev/fb.h>
5
6#include "nouveau_drm.h"
7#include "nouveau_ttm.h"
6ee73861
BS
8
9struct nouveau_sgdma_be {
8e7e7052
JG
10 /* this has to be the first field so populate/unpopulated in
11 * nouve_bo.c works properly, otherwise have to move them here
12 */
13 struct ttm_dma_tt ttm;
6ee73861 14 struct drm_device *dev;
3863c9bc 15 struct nouveau_mem *node;
6ee73861
BS
16};
17
efa58db3 18static void
649bf3ca 19nouveau_sgdma_destroy(struct ttm_tt *ttm)
efa58db3 20{
649bf3ca 21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
efa58db3 22
649bf3ca 23 if (ttm) {
8e7e7052 24 ttm_dma_tt_fini(&nvbe->ttm);
649bf3ca 25 kfree(nvbe);
efa58db3
BS
26 }
27}
28
6ee73861 29static int
649bf3ca 30nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
6ee73861 31{
649bf3ca 32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
3863c9bc 33 struct nouveau_mem *node = mem->mm_node;
6ee73861 34
3863c9bc 35 if (ttm->sg) {
2e2cfbe6
BS
36 node->sg = ttm->sg;
37 node->pages = NULL;
3863c9bc 38 } else {
2e2cfbe6 39 node->sg = NULL;
3863c9bc 40 node->pages = nvbe->ttm.dma_address;
6ee73861 41 }
2e2cfbe6 42 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
6ee73861 43
2e2cfbe6 44 nouveau_vm_map(&node->vma[0], node);
3863c9bc 45 nvbe->node = node;
6ee73861
BS
46 return 0;
47}
48
49static int
649bf3ca 50nv04_sgdma_unbind(struct ttm_tt *ttm)
6ee73861 51{
649bf3ca 52 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
3863c9bc 53 nouveau_vm_unmap(&nvbe->node->vma[0]);
6ee73861
BS
54 return 0;
55}
56
efa58db3 57static struct ttm_backend_func nv04_sgdma_backend = {
efa58db3
BS
58 .bind = nv04_sgdma_bind,
59 .unbind = nv04_sgdma_unbind,
60 .destroy = nouveau_sgdma_destroy
61};
6ee73861 62
b571fe21 63static int
649bf3ca 64nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
b571fe21 65{
8e7e7052 66 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
26c0c9e3 67 struct nouveau_mem *node = mem->mm_node;
649bf3ca 68
26c0c9e3 69 /* noop: bound in move_notify() */
22b33e8e 70 if (ttm->sg) {
2e2cfbe6
BS
71 node->sg = ttm->sg;
72 node->pages = NULL;
73 } else {
74 node->sg = NULL;
22b33e8e 75 node->pages = nvbe->ttm.dma_address;
2e2cfbe6
BS
76 }
77 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
b571fe21
BS
78 return 0;
79}
80
81static int
649bf3ca 82nv50_sgdma_unbind(struct ttm_tt *ttm)
b571fe21 83{
26c0c9e3 84 /* noop: unbound in move_notify() */
b571fe21
BS
85 return 0;
86}
87
b571fe21 88static struct ttm_backend_func nv50_sgdma_backend = {
b571fe21
BS
89 .bind = nv50_sgdma_bind,
90 .unbind = nv50_sgdma_unbind,
91 .destroy = nouveau_sgdma_destroy
92};
93
649bf3ca
JG
94struct ttm_tt *
95nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
96 unsigned long size, uint32_t page_flags,
97 struct page *dummy_read_page)
6ee73861 98{
ebb945a9 99 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
100 struct nouveau_sgdma_be *nvbe;
101
6ee73861
BS
102 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
103 if (!nvbe)
104 return NULL;
105
ebb945a9
BS
106 nvbe->dev = drm->dev;
107 if (nv_device(drm->device)->card_type < NV_50)
108 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
109 else
110 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
6ee73861 111
7a59cc34 112 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
649bf3ca 113 return NULL;
8e7e7052 114 return &nvbe->ttm.ttm;
6ee73861 115}