drm/nouveau/mmu: implement base for new vm management
[linux-2.6-block.git] / drivers / gpu / drm / nouveau / nvkm / subdev / mmu / vmm.h
CommitLineData
806a7335
BS
1#ifndef __NVKM_VMM_H__
2#define __NVKM_VMM_H__
3#include "priv.h"
4#include <core/memory.h>
5
6struct nvkm_vmm_pt {
7 /* Some GPUs have a mapping level with a dual page tables to
8 * support large and small pages in the same address-range.
9 *
10 * We track the state of both page tables in one place, which
11 * is why there's multiple PT pointers/refcounts here.
12 */
13 struct nvkm_mmu_pt *pt[2];
14 u32 refs[2];
15
16 /* Page size handled by this PT.
17 *
18 * Tesla backend needs to know this when writinge PDEs,
19 * otherwise unnecessary.
20 */
21 u8 page;
22
23 /* Entire page table sparse.
24 *
25 * Used to propagate sparseness to child page tables.
26 */
27 bool sparse:1;
28
29 /* Tracking for page directories.
30 *
31 * The array is indexed by PDE, and will either point to the
32 * child page table, or indicate the PDE is marked as sparse.
33 **/
34#define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde)
35#define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde)
36#define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY)
37 struct nvkm_vmm_pt **pde;
38
39 /* Tracking for dual page tables.
40 *
41 * There's one entry for each LPTE, keeping track of whether
42 * there are valid SPTEs in the same address-range.
43 *
44 * This information is used to manage LPTE state transitions.
45 */
46#define NVKM_VMM_PTE_SPARSE 0x80
47#define NVKM_VMM_PTE_VALID 0x40
48#define NVKM_VMM_PTE_SPTES 0x3f
49 u8 pte[];
50};
51
52struct nvkm_vmm_desc_func {
53};
54
55struct nvkm_vmm_desc {
56 enum {
57 PGD,
58 PGT,
59 SPT,
60 LPT,
61 } type;
62 u8 bits; /* VMA bits covered by PT. */
63 u8 size; /* Bytes-per-PTE. */
64 u32 align; /* PT address alignment. */
65 const struct nvkm_vmm_desc_func *func;
66};
67
68struct nvkm_vmm_page {
69 u8 shift;
70 const struct nvkm_vmm_desc *desc;
71#define NVKM_VMM_PAGE_SPARSE 0x01
72#define NVKM_VMM_PAGE_VRAM 0x02
73#define NVKM_VMM_PAGE_HOST 0x04
74#define NVKM_VMM_PAGE_COMP 0x08
75#define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE)
76#define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM)
77#define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM)
78#define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST)
79#define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST)
80#define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST)
81#define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST)
82#define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP)
83#define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP)
84#define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP)
85#define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP)
86 u8 type;
87};
88
89struct nvkm_vmm_func {
90 int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst);
91 void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst);
92
93 u64 page_block;
94 const struct nvkm_vmm_page page[];
95};
96
97int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *,
98 u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
99 const char *name, struct nvkm_vmm **);
100int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
101 u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
102 const char *name, struct nvkm_vmm *);
103void nvkm_vmm_dtor(struct nvkm_vmm *);
104
105struct nvkm_vmm_user {
106 struct nvkm_sclass base;
107 int (*ctor)(struct nvkm_mmu *, u64 addr, u64 size, void *args, u32 argc,
108 struct lock_class_key *, const char *name,
109 struct nvkm_vmm **);
110};
111#endif